195#include "llvm/IR/IntrinsicsAMDGPU.h"
212#define DEBUG_TYPE "amdgpu-lower-module-lds"
215using namespace AMDGPU;
220 "amdgpu-super-align-lds-globals",
221 cl::desc(
"Increase alignment of LDS if it is not on align boundary"),
224enum class LoweringKind { module, table, kernel, hybrid };
226 "amdgpu-lower-module-lds-strategy",
230 clEnumValN(LoweringKind::table,
"table",
"Lower via table lookup"),
231 clEnumValN(LoweringKind::module,
"module",
"Lower via module struct"),
233 LoweringKind::kernel,
"kernel",
234 "Lower variables reachable from one kernel, otherwise abort"),
236 "Lower via mixture of above strategies")));
238template <
typename T> std::vector<T> sortByName(std::vector<T> &&V) {
239 llvm::sort(V.begin(), V.end(), [](
const auto *L,
const auto *R) {
240 return L->getName() < R->getName();
242 return {std::move(V)};
245class AMDGPULowerModuleLDS {
249 removeLocalVarsFromUsedLists(
Module &M,
255 LocalVarsSet.
insert(cast<Constant>(LocalVar->stripPointerCasts()));
261 LocalVar->removeDeadConstantUsers();
286 IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
289 Func->getParent(), Intrinsic::donothing, {});
291 Value *UseInstance[1] = {
301 struct LDSVariableReplacement {
311 static Constant *getAddressesOfVariablesInKernel(
321 ArrayType *KernelOffsetsType = ArrayType::get(I32, Variables.
size());
325 auto ConstantGepIt = LDSVarsToConstantGEP.
find(GV);
326 if (ConstantGepIt != LDSVarsToConstantGEP.
end()) {
328 Elements.push_back(elt);
340 if (Variables.
empty()) {
345 const size_t NumberVariables = Variables.
size();
346 const size_t NumberKernels = kernels.
size();
352 ArrayType::get(KernelOffsetsType, NumberKernels);
355 std::vector<Constant *> overallConstantExprElts(NumberKernels);
356 for (
size_t i = 0; i < NumberKernels; i++) {
357 auto Replacement = KernelToReplacement.
find(kernels[i]);
358 overallConstantExprElts[i] =
359 (Replacement == KernelToReplacement.
end())
361 : getAddressesOfVariablesInKernel(
362 Ctx, Variables, Replacement->second.LDSVarsToConstantGEP);
377 Value *OptionalIndex) {
381 auto *
I = cast<Instruction>(U.getUser());
383 Value *tableKernelIndex = getTableLookupKernelIndex(M,
I->getFunction());
385 if (
auto *Phi = dyn_cast<PHINode>(
I)) {
393 ConstantInt::get(I32, 0),
400 LookupTable->getValueType(), LookupTable, GEPIdx, GV->
getName());
410 void replaceUsesInInstructionsWithTableLookup(
418 for (
size_t Index = 0; Index < ModuleScopeVariables.
size(); Index++) {
419 auto *GV = ModuleScopeVariables[Index];
422 auto *
I = dyn_cast<Instruction>(U.getUser());
426 replaceUseWithTableLookup(M, Builder, LookupTable, GV, U,
427 ConstantInt::get(I32, Index));
438 if (VariableSet.
empty())
441 for (
Function &Func : M.functions()) {
446 KernelSet.insert(&Func);
456 chooseBestVariableForModuleStrategy(
const DataLayout &
DL,
462 size_t UserCount = 0;
465 CandidateTy() =
default;
468 : GV(GV), UserCount(UserCount),
Size(AllocSize) {}
472 if (UserCount <
Other.UserCount) {
475 if (UserCount >
Other.UserCount) {
493 CandidateTy MostUsed;
495 for (
auto &K : LDSVars) {
497 if (K.second.size() <= 1) {
502 CandidateTy Candidate(
505 if (MostUsed < Candidate)
506 MostUsed = Candidate;
530 auto [It, Inserted] = tableKernelIndexCache.
try_emplace(
F);
532 auto InsertAt =
F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
542 static std::vector<Function *> assignLDSKernelIDToEachKernel(
550 std::vector<Function *> OrderedKernels;
551 if (!KernelsThatAllocateTableLDS.
empty() ||
552 !KernelsThatIndirectlyAllocateDynamicLDS.
empty()) {
554 for (
Function &Func : M->functions()) {
555 if (Func.isDeclaration())
560 if (KernelsThatAllocateTableLDS.
contains(&Func) ||
561 KernelsThatIndirectlyAllocateDynamicLDS.
contains(&Func)) {
563 OrderedKernels.push_back(&Func);
568 OrderedKernels = sortByName(std::move(OrderedKernels));
574 if (OrderedKernels.size() > UINT32_MAX) {
579 for (
size_t i = 0; i < OrderedKernels.size(); i++) {
583 OrderedKernels[i]->setMetadata(
"llvm.amdgcn.lds.kernel.id",
587 return OrderedKernels;
590 static void partitionVariablesIntoIndirectStrategies(
599 LoweringKindLoc != LoweringKind::hybrid
601 : chooseBestVariableForModuleStrategy(
602 M.getDataLayout(), LDSToKernelsThatNeedToAccessItIndirectly);
607 ? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot]
610 for (
auto &K : LDSToKernelsThatNeedToAccessItIndirectly) {
616 assert(K.second.size() != 0);
619 DynamicVariables.
insert(GV);
623 switch (LoweringKindLoc) {
624 case LoweringKind::module:
625 ModuleScopeVariables.insert(GV);
628 case LoweringKind::table:
629 TableLookupVariables.
insert(GV);
632 case LoweringKind::kernel:
633 if (K.second.size() == 1) {
634 KernelAccessVariables.
insert(GV);
637 "cannot lower LDS '" + GV->
getName() +
638 "' to kernel access as it is reachable from multiple kernels");
642 case LoweringKind::hybrid: {
643 if (GV == HybridModuleRoot) {
644 assert(K.second.size() != 1);
645 ModuleScopeVariables.insert(GV);
646 }
else if (K.second.size() == 1) {
647 KernelAccessVariables.
insert(GV);
648 }
else if (
set_is_subset(K.second, HybridModuleRootKernels)) {
649 ModuleScopeVariables.insert(GV);
651 TableLookupVariables.
insert(GV);
660 assert(ModuleScopeVariables.
size() + TableLookupVariables.
size() +
661 KernelAccessVariables.
size() + DynamicVariables.
size() ==
662 LDSToKernelsThatNeedToAccessItIndirectly.size());
675 if (ModuleScopeVariables.
empty()) {
681 LDSVariableReplacement ModuleScopeReplacement =
682 createLDSVariableReplacement(M,
"llvm.amdgcn.module.lds",
683 ModuleScopeVariables);
687 cast<Constant>(ModuleScopeReplacement.SGV),
688 PointerType::getUnqual(Ctx)))});
691 recordLDSAbsoluteAddress(&M, ModuleScopeReplacement.SGV, 0);
694 removeLocalVarsFromUsedLists(M, ModuleScopeVariables);
697 replaceLDSVariablesWithStruct(
698 M, ModuleScopeVariables, ModuleScopeReplacement, [&](
Use &U) {
711 for (
Function &Func : M.functions()) {
715 if (KernelsThatAllocateModuleLDS.
contains(&Func)) {
716 replaceLDSVariablesWithStruct(
717 M, ModuleScopeVariables, ModuleScopeReplacement, [&](
Use &U) {
726 markUsedByKernel(&Func, ModuleScopeReplacement.SGV);
730 return ModuleScopeReplacement.SGV;
734 lowerKernelScopeStructVariables(
743 for (
Function &Func : M.functions()) {
752 KernelUsedVariables.
insert(v);
760 KernelUsedVariables.
insert(v);
766 if (KernelsThatAllocateModuleLDS.
contains(&Func)) {
768 KernelUsedVariables.
erase(v);
772 if (KernelUsedVariables.
empty()) {
784 if (!Func.hasName()) {
788 std::string VarName =
789 (
Twine(
"llvm.amdgcn.kernel.") + Func.getName() +
".lds").str();
792 createLDSVariableReplacement(M, VarName, KernelUsedVariables);
799 !Accesses->second.empty())
800 markUsedByKernel(&Func, Replacement.SGV);
803 removeLocalVarsFromUsedLists(M, KernelUsedVariables);
804 KernelToReplacement[&Func] = Replacement;
807 replaceLDSVariablesWithStruct(
808 M, KernelUsedVariables, Replacement, [&Func](
Use &U) {
810 return I &&
I->getFunction() == &Func;
813 return KernelToReplacement;
833 Align MaxDynamicAlignment(1);
837 MaxDynamicAlignment =
843 UpdateMaxAlignment(GV);
847 UpdateMaxAlignment(GV);
856 N->setAlignment(MaxDynamicAlignment);
866 std::vector<Function *>
const &OrderedKernels) {
868 if (!KernelsThatIndirectlyAllocateDynamicLDS.
empty()) {
873 std::vector<Constant *> newDynamicLDS;
876 for (
auto &
func : OrderedKernels) {
878 if (KernelsThatIndirectlyAllocateDynamicLDS.
contains(
func)) {
880 if (!
func->hasName()) {
885 buildRepresentativeDynamicLDSInstance(M, LDSUsesInfo,
func);
887 KernelToCreatedDynamicLDS[
func] =
N;
889 markUsedByKernel(
func,
N);
893 emptyCharArray,
N, ConstantInt::get(I32, 0),
true);
899 assert(OrderedKernels.size() == newDynamicLDS.size());
901 ArrayType *t = ArrayType::get(I32, newDynamicLDS.size());
905 "llvm.amdgcn.dynlds.offset.table",
nullptr,
910 auto *
I = dyn_cast<Instruction>(U.getUser());
916 replaceUseWithTableLookup(M, Builder, table, GV, U,
nullptr);
920 return KernelToCreatedDynamicLDS;
925 bool NeedsReplacement =
false;
927 if (
auto *
I = dyn_cast<Instruction>(U.getUser())) {
930 NeedsReplacement =
true;
935 if (!NeedsReplacement)
944 if (
auto *
I = dyn_cast<Instruction>(U.getUser())) {
947 U.getUser()->replaceUsesOfWith(GV, NewGV);
954 bool lowerSpecialLDSVariables(
957 bool Changed =
false;
959 int NumAbsolutes = 0;
960 std::vector<GlobalVariable *> OrderedGVs;
961 for (
auto &K : LDSToKernelsThatNeedToAccessItIndirectly) {
968 if (LDSToKernelsThatNeedToAccessItIndirectly[GV].
size() > 1) {
969 OrderedGVs.push_back(GV);
975 LDSToKernelsThatNeedToAccessItIndirectly.
erase(GV);
977 OrderedGVs = sortByName(std::move(OrderedGVs));
979 int BarId = ++NumAbsolutes;
983 unsigned Offset = 0x802000u | BarrierScope << 9 | BarId << 4;
984 recordLDSAbsoluteAddress(&M, GV,
Offset);
991 std::vector<Function *> OrderedKernels;
995 OrderedKernels.push_back(
F);
997 OrderedKernels = sortByName(std::move(OrderedKernels));
1010 OrderedGVs.push_back(GV);
1012 OrderedGVs = sortByName(std::move(OrderedGVs));
1016 auto NewGV = uniquifyGVPerKernel(M, GV,
F);
1017 Changed |= (NewGV != GV);
1018 int BarId = (NumAbsolutes + 1);
1019 if (Kernel2BarId.
find(
F) != Kernel2BarId.
end()) {
1020 BarId = (Kernel2BarId[
F] + 1);
1022 Kernel2BarId[
F] = BarId;
1024 unsigned Offset = 0x802000u | BarrierScope << 9 | BarId << 4;
1025 recordLDSAbsoluteAddress(&M, NewGV,
Offset);
1040 bool runOnModule(
Module &M) {
1042 bool Changed = superAlignLDSGlobals(M);
1058 LDSToKernelsThatNeedToAccessItIndirectly[GV].
insert(
F);
1064 Changed |= lowerSpecialLDSVariables(
1065 M, LDSUsesInfo, LDSToKernelsThatNeedToAccessItIndirectly);
1073 partitionVariablesIntoIndirectStrategies(
1074 M, LDSUsesInfo, LDSToKernelsThatNeedToAccessItIndirectly,
1075 ModuleScopeVariables, TableLookupVariables, KernelAccessVariables,
1082 kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1083 ModuleScopeVariables);
1085 kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1086 TableLookupVariables);
1089 kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1092 GlobalVariable *MaybeModuleScopeStruct = lowerModuleScopeStructVariables(
1093 M, ModuleScopeVariables, KernelsThatAllocateModuleLDS);
1096 lowerKernelScopeStructVariables(M, LDSUsesInfo, ModuleScopeVariables,
1097 KernelsThatAllocateModuleLDS,
1098 MaybeModuleScopeStruct);
1101 for (
auto &GV : KernelAccessVariables) {
1102 auto &funcs = LDSToKernelsThatNeedToAccessItIndirectly[GV];
1103 assert(funcs.size() == 1);
1104 LDSVariableReplacement Replacement =
1105 KernelToReplacement[*(funcs.begin())];
1110 replaceLDSVariablesWithStruct(M, Vec, Replacement, [](
Use &U) {
1111 return isa<Instruction>(U.getUser());
1116 std::vector<Function *> OrderedKernels =
1117 assignLDSKernelIDToEachKernel(&M, KernelsThatAllocateTableLDS,
1118 KernelsThatIndirectlyAllocateDynamicLDS);
1120 if (!KernelsThatAllocateTableLDS.
empty()) {
1126 auto TableLookupVariablesOrdered =
1127 sortByName(std::vector<GlobalVariable *>(TableLookupVariables.
begin(),
1128 TableLookupVariables.
end()));
1131 M, TableLookupVariablesOrdered, OrderedKernels, KernelToReplacement);
1132 replaceUsesInInstructionsWithTableLookup(M, TableLookupVariablesOrdered,
1137 lowerDynamicLDSVariables(M, LDSUsesInfo,
1138 KernelsThatIndirectlyAllocateDynamicLDS,
1139 DynamicVariables, OrderedKernels);
1144 for (
auto *KernelSet : {&KernelsThatIndirectlyAllocateDynamicLDS,
1145 &KernelsThatAllocateTableLDS})
1154 for (
Function &Func : M.functions()) {
1169 const bool AllocateModuleScopeStruct =
1170 MaybeModuleScopeStruct &&
1171 KernelsThatAllocateModuleLDS.
contains(&Func);
1173 auto Replacement = KernelToReplacement.
find(&Func);
1174 const bool AllocateKernelScopeStruct =
1175 Replacement != KernelToReplacement.
end();
1177 const bool AllocateDynamicVariable =
1178 KernelToCreatedDynamicLDS.
contains(&Func);
1182 if (AllocateModuleScopeStruct) {
1188 if (AllocateKernelScopeStruct) {
1191 recordLDSAbsoluteAddress(&M, KernelStruct,
Offset);
1199 if (AllocateDynamicVariable) {
1200 GlobalVariable *DynamicVariable = KernelToCreatedDynamicLDS[&Func];
1202 recordLDSAbsoluteAddress(&M, DynamicVariable,
Offset);
1217 if (AllocateDynamicVariable)
1220 Func.addFnAttr(
"amdgpu-lds-size", Buffer);
1239 static bool superAlignLDSGlobals(
Module &M) {
1241 bool Changed =
false;
1242 if (!SuperAlignLDSGlobals) {
1246 for (
auto &GV : M.globals()) {
1266 Alignment = std::max(Alignment,
Align(16));
1267 }
else if (GVSize > 4) {
1269 Alignment = std::max(Alignment,
Align(8));
1270 }
else if (GVSize > 2) {
1272 Alignment = std::max(Alignment,
Align(4));
1273 }
else if (GVSize > 1) {
1275 Alignment = std::max(Alignment,
Align(2));
1286 static LDSVariableReplacement createLDSVariableReplacement(
1287 Module &M, std::string VarName,
1304 auto Sorted = sortByName(std::vector<GlobalVariable *>(
1305 LDSVarsToTransform.
begin(), LDSVarsToTransform.
end()));
1317 std::vector<GlobalVariable *> LocalVars;
1320 IsPaddingField.
reserve(LDSVarsToTransform.
size());
1323 for (
auto &
F : LayoutFields) {
1326 Align DataAlign =
F.Alignment;
1329 if (
uint64_t Rem = CurrentOffset % DataAlignV) {
1330 uint64_t Padding = DataAlignV - Rem;
1342 CurrentOffset += Padding;
1345 LocalVars.push_back(FGV);
1347 CurrentOffset +=
F.Size;
1351 std::vector<Type *> LocalVarTypes;
1352 LocalVarTypes.reserve(LocalVars.size());
1354 LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
1369 for (
size_t I = 0;
I < LocalVars.size();
I++) {
1371 Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32,
I)};
1373 if (IsPaddingField[
I]) {
1380 assert(Map.size() == LDSVarsToTransform.
size());
1381 return {SGV, std::move(Map)};
1384 template <
typename PredicateTy>
1385 static void replaceLDSVariablesWithStruct(
1387 const LDSVariableReplacement &Replacement, PredicateTy Predicate) {
1394 auto LDSVarsToTransform = sortByName(std::vector<GlobalVariable *>(
1395 LDSVarsToTransformArg.
begin(), LDSVarsToTransformArg.
end()));
1401 const size_t NumberVars = LDSVarsToTransform.
size();
1402 if (NumberVars > 1) {
1404 AliasScopes.
reserve(NumberVars);
1406 for (
size_t I = 0;
I < NumberVars;
I++) {
1410 NoAliasList.
append(&AliasScopes[1], AliasScopes.
end());
1415 for (
size_t I = 0;
I < NumberVars;
I++) {
1417 Constant *
GEP = Replacement.LDSVarsToConstantGEP.at(GV);
1421 APInt APOff(
DL.getIndexTypeSizeInBits(
GEP->getType()), 0);
1422 GEP->stripAndAccumulateInBoundsConstantOffsets(
DL, APOff);
1429 NoAliasList[
I - 1] = AliasScopes[
I - 1];
1435 refineUsesAlignmentAndAA(
GEP,
A,
DL, AliasScope, NoAlias);
1442 if (!
MaxDepth || (
A == 1 && !AliasScope))
1445 for (
User *U :
Ptr->users()) {
1446 if (
auto *
I = dyn_cast<Instruction>(U)) {
1447 if (AliasScope &&
I->mayReadOrWriteMemory()) {
1448 MDNode *AS =
I->getMetadata(LLVMContext::MD_alias_scope);
1451 I->setMetadata(LLVMContext::MD_alias_scope, AS);
1453 MDNode *NA =
I->getMetadata(LLVMContext::MD_noalias);
1455 I->setMetadata(LLVMContext::MD_noalias, NA);
1459 if (
auto *LI = dyn_cast<LoadInst>(U)) {
1460 LI->setAlignment(std::max(
A, LI->getAlign()));
1463 if (
auto *SI = dyn_cast<StoreInst>(U)) {
1464 if (SI->getPointerOperand() ==
Ptr)
1465 SI->setAlignment(std::max(
A, SI->getAlign()));
1468 if (
auto *AI = dyn_cast<AtomicRMWInst>(U)) {
1471 if (AI->getPointerOperand() ==
Ptr)
1472 AI->setAlignment(std::max(
A, AI->getAlign()));
1475 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) {
1476 if (AI->getPointerOperand() ==
Ptr)
1477 AI->setAlignment(std::max(
A, AI->getAlign()));
1480 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(U)) {
1481 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
1483 if (
GEP->getPointerOperand() ==
Ptr) {
1485 if (
GEP->accumulateConstantOffset(
DL, Off))
1487 refineUsesAlignmentAndAA(
GEP, GA,
DL, AliasScope, NoAlias,
1492 if (
auto *
I = dyn_cast<Instruction>(U)) {
1493 if (
I->getOpcode() == Instruction::BitCast ||
1494 I->getOpcode() == Instruction::AddrSpaceCast)
1495 refineUsesAlignmentAndAA(
I,
A,
DL, AliasScope, NoAlias,
MaxDepth - 1);
1501class AMDGPULowerModuleLDSLegacy :
public ModulePass {
1518 auto &TPC = getAnalysis<TargetPassConfig>();
1522 return AMDGPULowerModuleLDS(*TM).runOnModule(M);
1527char AMDGPULowerModuleLDSLegacy::ID = 0;
1532 "Lower uses of LDS variables from non-kernel functions",
1541 return new AMDGPULowerModuleLDSLegacy(TM);
Lower uses of LDS variables from non kernel functions
The AMDGPU TargetMachine interface definition for hw codegen targets.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
std::optional< std::vector< StOtherPiece > > Other
static const unsigned MaxDepth
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines generic set operations that may be used on set's of different types,...
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
A container for analyses that lazily runs them and caches their results.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
The basic data container for the call graph of a Module of IR.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
This is an important base class in LLVM.
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool erase(const KeyT &Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
void setMetadata(unsigned KindID, MDNode *Node)
Set a particular kind of metadata attachment.
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalObject.
LinkageTypes getLinkage() const
bool isAbsoluteSymbolRef() const
Returns whether this is a reference to an absolute symbol.
ThreadLocalMode getThreadLocalMode() const
PointerType * getType() const
Global values are always pointers.
@ InternalLinkage
Rename collisions when linking (static functions).
@ ExternalLinkage
Externally visible function.
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This is an important class for using LLVM in a threaded context.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
static MDNode * getMostGenericAliasScope(MDNode *A, MDNode *B)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
static MDNode * intersect(MDNode *A, MDNode *B)
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
virtual bool runOnModule(Module &M)=0
runOnModule - Virtual method overriden by subclasses to process the module being operated on.
A Module instance is used to store all the information related to an LLVM module.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Target-Independent Code Generator Pass Configuration Options.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
bool erase(const ValueT &V)
A raw_ostream that writes to an std::string.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ BARRIER_SCOPE_WORKGROUP
bool isDynamicLDS(const GlobalVariable &GV)
void removeFnAttrFromReachable(CallGraph &CG, Function *KernelRoot, ArrayRef< StringRef > FnAttrs)
Strip FnAttr attribute from any functions where we may have introduced its use.
LDSUsesInfoTy getTransitiveUsesOfLDS(const CallGraph &CG, Module &M)
TargetExtType * isNamedBarrier(const GlobalVariable &GV)
bool isLDSVariableToLower(const GlobalVariable &GV)
bool eliminateConstantExprUsesOfLDSFromAllInstructions(Module &M)
Align getAlign(const DataLayout &DL, const GlobalVariable *GV)
bool isKernelLDS(const Function *F)
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool operator<(int64_t V1, const APSInt &V2)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void sort(IteratorTy Start, IteratorTy End)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
char & AMDGPULowerModuleLDSLegacyPassID
void removeFromUsedLists(Module &M, function_ref< bool(Constant *)> ShouldRemove)
Removes global values from the llvm.used and llvm.compiler.used arrays.
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
constexpr unsigned BitWidth
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
const AMDGPUTargetMachine & TM
FunctionVariableMap direct_access
FunctionVariableMap indirect_access
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.