24#include "llvm/IR/IntrinsicsAMDGPU.h"
29#define DEBUG_TYPE "amdgpu-lower-kernel-attributes"
36enum DispatchPackedOffsets {
47enum ImplicitArgOffsets {
48 HIDDEN_BLOCK_COUNT_X = 0,
49 HIDDEN_BLOCK_COUNT_Y = 4,
50 HIDDEN_BLOCK_COUNT_Z = 8,
52 HIDDEN_GROUP_SIZE_X = 12,
53 HIDDEN_GROUP_SIZE_Y = 14,
54 HIDDEN_GROUP_SIZE_Z = 16,
56 HIDDEN_REMAINDER_X = 18,
57 HIDDEN_REMAINDER_Y = 20,
58 HIDDEN_REMAINDER_Z = 22,
61class AMDGPULowerKernelAttributes :
public ModulePass {
70 return "AMDGPU Kernel Attributes";
79 auto IntrinsicId = IsV5OrAbove ? Intrinsic::amdgcn_implicitarg_ptr
80 : Intrinsic::amdgcn_dispatch_ptr;
88 if (MaxNumGroups == 0 || MaxNumGroups == std::numeric_limits<uint32_t>::max())
91 if (!Load->getType()->isIntegerTy(32))
97 Load->setMetadata(LLVMContext::MD_range,
Range);
103 auto *MD =
F->getMetadata(
"reqd_work_group_size");
104 const bool HasReqdWorkGroupSize = MD && MD->getNumOperands() == 3;
106 const bool HasUniformWorkGroupSize =
107 F->getFnAttribute(
"uniform-work-group-size").getValueAsBool();
112 if (!HasReqdWorkGroupSize && !HasUniformWorkGroupSize &&
113 none_of(MaxNumWorkgroups, [](
unsigned X) {
return X != 0; }))
116 Value *BlockCounts[3] = {
nullptr,
nullptr,
nullptr};
117 Value *GroupSizes[3] = {
nullptr,
nullptr,
nullptr};
118 Value *Remainders[3] = {
nullptr,
nullptr,
nullptr};
119 Value *GridSizes[3] = {
nullptr,
nullptr,
nullptr};
130 auto *Load = dyn_cast<LoadInst>(U);
131 auto *BCI = dyn_cast<BitCastInst>(U);
135 Load = dyn_cast<LoadInst>(*U->user_begin());
136 BCI = dyn_cast<BitCastInst>(*U->user_begin());
140 if (!BCI->hasOneUse())
142 Load = dyn_cast<LoadInst>(*BCI->user_begin());
145 if (!Load || !Load->isSimple())
148 unsigned LoadSize =
DL.getTypeStoreSize(Load->getType());
153 case HIDDEN_BLOCK_COUNT_X:
155 BlockCounts[0] = Load;
159 case HIDDEN_BLOCK_COUNT_Y:
161 BlockCounts[1] = Load;
165 case HIDDEN_BLOCK_COUNT_Z:
167 BlockCounts[2] = Load;
171 case HIDDEN_GROUP_SIZE_X:
173 GroupSizes[0] = Load;
175 case HIDDEN_GROUP_SIZE_Y:
177 GroupSizes[1] = Load;
179 case HIDDEN_GROUP_SIZE_Z:
181 GroupSizes[2] = Load;
183 case HIDDEN_REMAINDER_X:
185 Remainders[0] = Load;
187 case HIDDEN_REMAINDER_Y:
189 Remainders[1] = Load;
191 case HIDDEN_REMAINDER_Z:
193 Remainders[2] = Load;
200 case WORKGROUP_SIZE_X:
202 GroupSizes[0] = Load;
204 case WORKGROUP_SIZE_Y:
206 GroupSizes[1] = Load;
208 case WORKGROUP_SIZE_Z:
210 GroupSizes[2] = Load;
230 bool MadeChange =
false;
231 if (IsV5OrAbove && HasUniformWorkGroupSize) {
239 for (
int I = 0;
I < 3; ++
I) {
240 Value *BlockCount = BlockCounts[
I];
246 I == 0 ? m_Intrinsic<Intrinsic::amdgcn_workgroup_id_x>()
247 : (
I == 1 ? m_Intrinsic<Intrinsic::amdgcn_workgroup_id_y>()
248 : m_Intrinsic<Intrinsic::amdgcn_workgroup_id_z>());
260 for (
Value *Remainder : Remainders) {
266 }
else if (HasUniformWorkGroupSize) {
286 for (
int I = 0;
I < 3; ++
I) {
287 Value *GroupSize = GroupSizes[
I];
288 Value *GridSize = GridSizes[
I];
289 if (!GroupSize || !GridSize)
294 I == 0 ? m_Intrinsic<Intrinsic::amdgcn_workgroup_id_x>()
295 : (
I == 1 ? m_Intrinsic<Intrinsic::amdgcn_workgroup_id_y>()
296 : m_Intrinsic<Intrinsic::amdgcn_workgroup_id_z>());
299 auto *ZextGroupSize = dyn_cast<ZExtInst>(U);
303 for (
User *
UMin : ZextGroupSize->users()) {
308 if (HasReqdWorkGroupSize) {
310 = mdconst::extract<ConstantInt>(MD->getOperand(
I));
312 KnownSize,
UMin->getType(),
false,
DL));
314 UMin->replaceAllUsesWith(ZextGroupSize);
325 if (!HasReqdWorkGroupSize)
328 for (
int I = 0;
I < 3;
I++) {
329 Value *GroupSize = GroupSizes[
I];
333 ConstantInt *KnownSize = mdconst::extract<ConstantInt>(MD->getOperand(
I));
345bool AMDGPULowerKernelAttributes::runOnModule(
Module &M) {
346 bool MadeChange =
false;
355 for (
auto *U :
BasePtr->users()) {
357 if (HandledUses.
insert(CI).second) {
368 "AMDGPU Kernel Attributes",
false,
false)
372char AMDGPULowerKernelAttributes::
ID = 0;
375 return new AMDGPULowerKernelAttributes();
382 Function *BasePtr = getBasePtrIntrinsic(*
F.getParent(), IsV5OrAbove);
388 if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
static void annotateGridSizeLoadWithRangeMD(LoadInst *Load, uint32_t MaxNumGroups)
static bool processUse(CallInst *CI, bool IsV5OrAbove)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Class for arbitrary precision integers.
A container for analyses that lazily runs them and caches their results.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
An instruction for reading from memory.
MDNode * createRange(const APInt &Lo, const APInt &Hi)
Return metadata describing the range [Lo, Hi).
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
virtual bool runOnModule(Module &M)=0
runOnModule - Virtual method overriden by subclasses to process the module being operated on.
A Module instance is used to store all the information related to an LLVM module.
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
const ParentTy * getParent() const
unsigned getAMDHSACodeObjectVersion(const Module &M)
SmallVector< unsigned > getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size, unsigned DefaultVal)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Function * getDeclarationIfExists(Module *M, ID id, ArrayRef< Type * > Tys, FunctionType *FT=nullptr)
This version supports overloaded intrinsics.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
ModulePass * createAMDGPULowerKernelAttributesPass()
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)