18#include "llvm/IR/IntrinsicsAMDGPU.h"
22#define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
28class PreloadKernelArgInfo {
32 unsigned NumFreeUserSGPRs;
38 setInitialFreeUserSGPRsCount();
43 void setInitialFreeUserSGPRsCount() {
44 const unsigned MaxUserSGPRs = ST.getMaxNumUserSGPRs();
50 bool tryAllocPreloadSGPRs(
unsigned AllocSize,
uint64_t ArgOffset,
58 unsigned Padding = ArgOffset - LastExplicitArgOffset;
59 unsigned PaddingSGPRs =
alignTo(Padding, 4) / 4;
60 unsigned NumPreloadSGPRs =
alignTo(AllocSize, 4) / 4;
61 if (NumPreloadSGPRs + PaddingSGPRs > NumFreeUserSGPRs)
64 NumFreeUserSGPRs -= (NumPreloadSGPRs + PaddingSGPRs);
89 AllocaInst *AI = dyn_cast<AllocaInst>(&*InsPt);
111 const Align KernArgBaseAlign(16);
112 const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
116 const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(
F, MaxAlign);
117 if (TotalKernArgSize == 0)
121 Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {},
122 nullptr,
F.getName() +
".kernarg.segment");
124 KernArgSegment->
addRetAttr(Attribute::NonNull);
130 bool InPreloadSequence =
true;
131 PreloadKernelArgInfo PreloadInfo(
F, ST);
134 const bool IsByRef = Arg.hasByRefAttr();
135 Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
136 MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
137 Align ABITypeAlign =
DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
140 uint64_t AllocSize =
DL.getTypeAllocSize(ArgTy);
142 uint64_t EltOffset =
alignTo(ExplicitArgOffset, ABITypeAlign) + BaseOffset;
143 uint64_t LastExplicitArgOffset = ExplicitArgOffset;
144 ExplicitArgOffset =
alignTo(ExplicitArgOffset, ABITypeAlign) + AllocSize;
147 if (Arg.hasInRegAttr() && InPreloadSequence && ST.hasKernargPreload() &&
148 !ST.needsKernargPreloadBackwardsCompatibility() &&
149 !Arg.getType()->isAggregateType())
150 if (PreloadInfo.tryAllocPreloadSGPRs(AllocSize, EltOffset,
151 LastExplicitArgOffset))
154 InPreloadSequence =
false;
162 Value *ArgOffsetPtr =
Builder.CreateConstInBoundsGEP1_64(
163 Builder.getInt8Ty(), KernArgSegment, EltOffset,
164 Arg.getName() +
".byval.kernarg.offset");
166 Value *CastOffsetPtr =
168 Arg.replaceAllUsesWith(CastOffsetPtr);
172 if (
PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
179 !ST.hasUsableDSOffset())
184 if (Arg.hasNoAliasAttr())
188 auto *VT = dyn_cast<FixedVectorType>(ArgTy);
189 bool IsV3 = VT && VT->getNumElements() == 3;
194 int64_t AlignDownOffset =
alignDown(EltOffset, 4);
195 int64_t OffsetDiff = EltOffset - AlignDownOffset;
197 KernArgBaseAlign, DoShiftOpt ? AlignDownOffset : EltOffset);
208 ArgPtr =
Builder.CreateConstInBoundsGEP1_64(
209 Builder.getInt8Ty(), KernArgSegment, AlignDownOffset,
210 Arg.
getName() +
".kernarg.offset.align.down");
211 AdjustedArgTy =
Builder.getInt32Ty();
213 ArgPtr =
Builder.CreateConstInBoundsGEP1_64(
214 Builder.getInt8Ty(), KernArgSegment, EltOffset,
215 Arg.getName() +
".kernarg.offset");
216 AdjustedArgTy = ArgTy;
219 if (IsV3 &&
Size >= 32) {
222 AdjustedArgTy = V4Ty;
226 Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
227 Load->setMetadata(LLVMContext::MD_invariant_load,
MDNode::get(Ctx, {}));
231 if (isa<PointerType>(ArgTy)) {
232 if (Arg.hasNonNullAttr())
233 Load->setMetadata(LLVMContext::MD_nonnull,
MDNode::get(Ctx, {}));
235 uint64_t DerefBytes = Arg.getDereferenceableBytes();
236 if (DerefBytes != 0) {
238 LLVMContext::MD_dereferenceable,
244 uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
245 if (DerefOrNullBytes != 0) {
247 LLVMContext::MD_dereferenceable_or_null,
250 DerefOrNullBytes))));
253 if (
MaybeAlign ParamAlign = Arg.getParamAlign()) {
255 LLVMContext::MD_align,
257 Builder.getInt64Ty(), ParamAlign->value()))));
264 Value *ExtractBits = OffsetDiff == 0 ?
265 Load :
Builder.CreateLShr(Load, OffsetDiff * 8);
268 Value *Trunc =
Builder.CreateTrunc(ExtractBits, ArgIntTy);
270 Arg.getName() +
".load");
271 Arg.replaceAllUsesWith(NewVal);
274 Arg.getName() +
".load");
275 Arg.replaceAllUsesWith(Shuf);
277 Load->setName(Arg.getName() +
".load");
278 Arg.replaceAllUsesWith(Load);
288bool AMDGPULowerKernelArguments::runOnFunction(
Function &
F) {
289 auto &TPC = getAnalysis<TargetPassConfig>();
295 "AMDGPU Lower Kernel Arguments",
false,
false)
299char AMDGPULowerKernelArguments::
ID = 0;
302 return new AMDGPULowerKernelArguments();
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
static BasicBlock::iterator getInsertPt(BasicBlock &BB)
static bool lowerKernelArguments(Function &F, const TargetMachine &TM)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Target-Independent Code Generator Pass Configuration Options pass.
PreservedAnalyses run(Function &, FunctionAnalysisManager &)
an instruction to allocate memory on the stack
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
A container for analyses that lazily runs them and caches their results.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
InstListType::iterator iterator
Instruction iterators...
Represents analyses that only rely on functions' control flow.
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
A parsed version of the target data layout string in and methods for querying it.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionPass class - This class is used to implement most global optimizations.
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
unsigned getNumUsedUserSGPRs() const
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
ConstantAsMetadata * createConstant(Constant *C)
Return the given constant as metadata.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Primary interface to the complete machine description for the target machine.
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isAggregateType() const
Return true if the type is an aggregate type.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
StringRef getName() const
Return a constant reference to the value's name.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
This is an optimization pass for GlobalISel generic memory operations.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
FunctionPass * createAMDGPULowerKernelArgumentsPass()
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.