LLVM 17.0.0git
AMDGPUPromoteAlloca.cpp
Go to the documentation of this file.
1//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass eliminates allocas by either converting them into vectors or
10// by migrating them to local address space.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "GCNSubtarget.h"
20#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/IntrinsicsAMDGPU.h"
23#include "llvm/IR/IntrinsicsR600.h"
24#include "llvm/Pass.h"
26
27#define DEBUG_TYPE "amdgpu-promote-alloca"
28
29using namespace llvm;
30
31namespace {
32
33static cl::opt<bool> DisablePromoteAllocaToVector(
34 "disable-promote-alloca-to-vector",
35 cl::desc("Disable promote alloca to vector"),
36 cl::init(false));
37
38static cl::opt<bool> DisablePromoteAllocaToLDS(
39 "disable-promote-alloca-to-lds",
40 cl::desc("Disable promote alloca to LDS"),
41 cl::init(false));
42
43static cl::opt<unsigned> PromoteAllocaToVectorLimit(
44 "amdgpu-promote-alloca-to-vector-limit",
45 cl::desc("Maximum byte size to consider promote alloca to vector"),
46 cl::init(0));
47
48// FIXME: This can create globals so should be a module pass.
49class AMDGPUPromoteAlloca : public FunctionPass {
50public:
51 static char ID;
52
53 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
54
55 bool runOnFunction(Function &F) override;
56
57 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
58
59 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
60
61 void getAnalysisUsage(AnalysisUsage &AU) const override {
62 AU.setPreservesCFG();
64 }
65};
66
67class AMDGPUPromoteAllocaImpl {
68private:
69 const TargetMachine &TM;
70 Module *Mod = nullptr;
71 const DataLayout *DL = nullptr;
72
73 // FIXME: This should be per-kernel.
74 uint32_t LocalMemLimit = 0;
75 uint32_t CurrentLocalMemUsage = 0;
76 unsigned MaxVGPRs;
77
78 bool IsAMDGCN = false;
79 bool IsAMDHSA = false;
80
81 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
82 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
83
84 /// BaseAlloca is the alloca root the search started from.
85 /// Val may be that alloca or a recursive user of it.
86 bool collectUsesWithPtrTypes(Value *BaseAlloca,
87 Value *Val,
88 std::vector<Value*> &WorkList) const;
89
90 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
91 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
92 /// Returns true if both operands are derived from the same alloca. Val should
93 /// be the same value as one of the input operands of UseInst.
94 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
95 Instruction *UseInst,
96 int OpIdx0, int OpIdx1) const;
97
98 /// Check whether we have enough local memory for promotion.
99 bool hasSufficientLocalMem(const Function &F);
100
101 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
102
103public:
104 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {}
105 bool run(Function &F);
106};
107
108class AMDGPUPromoteAllocaToVector : public FunctionPass {
109public:
110 static char ID;
111
112 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
113
114 bool runOnFunction(Function &F) override;
115
116 StringRef getPassName() const override {
117 return "AMDGPU Promote Alloca to vector";
118 }
119
120 void getAnalysisUsage(AnalysisUsage &AU) const override {
121 AU.setPreservesCFG();
123 }
124};
125
126} // end anonymous namespace
127
128char AMDGPUPromoteAlloca::ID = 0;
129char AMDGPUPromoteAllocaToVector::ID = 0;
130
132 "AMDGPU promote alloca to vector or LDS", false, false)
133// Move LDS uses from functions to kernels before promote alloca for accurate
134// estimation of LDS available
135INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS)
136INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,
137 "AMDGPU promote alloca to vector or LDS", false, false)
138
139INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
140 "AMDGPU promote alloca to vector", false, false)
141
142char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
143char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
144
145bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
146 if (skipFunction(F))
147 return false;
148
149 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
150 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F);
151 }
152 return false;
153}
154
157 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F);
158 if (Changed) {
161 return PA;
162 }
163 return PreservedAnalyses::all();
164}
165
166bool AMDGPUPromoteAllocaImpl::run(Function &F) {
167 Mod = F.getParent();
168 DL = &Mod->getDataLayout();
169
170 const Triple &TT = TM.getTargetTriple();
171 IsAMDGCN = TT.getArch() == Triple::amdgcn;
172 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
173
175 if (!ST.isPromoteAllocaEnabled())
176 return false;
177
178 if (IsAMDGCN) {
179 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
180 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
181 // A non-entry function has only 32 caller preserved registers.
182 // Do not promote alloca which will force spilling.
183 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
184 MaxVGPRs = std::min(MaxVGPRs, 32u);
185 } else {
186 MaxVGPRs = 128;
187 }
188
189 bool SufficientLDS = hasSufficientLocalMem(F);
190 bool Changed = false;
191 BasicBlock &EntryBB = *F.begin();
192
194 for (Instruction &I : EntryBB) {
195 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
196 Allocas.push_back(AI);
197 }
198
199 for (AllocaInst *AI : Allocas) {
200 if (handleAlloca(*AI, SufficientLDS))
201 Changed = true;
202 }
203
204 return Changed;
205}
206
207std::pair<Value *, Value *>
208AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
209 Function &F = *Builder.GetInsertBlock()->getParent();
211
212 if (!IsAMDHSA) {
213 Function *LocalSizeYFn
214 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
215 Function *LocalSizeZFn
216 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
217
218 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
219 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
220
221 ST.makeLIDRangeMetadata(LocalSizeY);
222 ST.makeLIDRangeMetadata(LocalSizeZ);
223
224 return std::pair(LocalSizeY, LocalSizeZ);
225 }
226
227 // We must read the size out of the dispatch pointer.
228 assert(IsAMDGCN);
229
230 // We are indexing into this struct, and want to extract the workgroup_size_*
231 // fields.
232 //
233 // typedef struct hsa_kernel_dispatch_packet_s {
234 // uint16_t header;
235 // uint16_t setup;
236 // uint16_t workgroup_size_x ;
237 // uint16_t workgroup_size_y;
238 // uint16_t workgroup_size_z;
239 // uint16_t reserved0;
240 // uint32_t grid_size_x ;
241 // uint32_t grid_size_y ;
242 // uint32_t grid_size_z;
243 //
244 // uint32_t private_segment_size;
245 // uint32_t group_segment_size;
246 // uint64_t kernel_object;
247 //
248 // #ifdef HSA_LARGE_MODEL
249 // void *kernarg_address;
250 // #elif defined HSA_LITTLE_ENDIAN
251 // void *kernarg_address;
252 // uint32_t reserved1;
253 // #else
254 // uint32_t reserved1;
255 // void *kernarg_address;
256 // #endif
257 // uint64_t reserved2;
258 // hsa_signal_t completion_signal; // uint64_t wrapper
259 // } hsa_kernel_dispatch_packet_t
260 //
261 Function *DispatchPtrFn
262 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
263
264 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
265 DispatchPtr->addRetAttr(Attribute::NoAlias);
266 DispatchPtr->addRetAttr(Attribute::NonNull);
267 F.removeFnAttr("amdgpu-no-dispatch-ptr");
268
269 // Size of the dispatch packet struct.
270 DispatchPtr->addDereferenceableRetAttr(64);
271
272 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
273 Value *CastDispatchPtr = Builder.CreateBitCast(
274 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
275
276 // We could do a single 64-bit load here, but it's likely that the basic
277 // 32-bit and extract sequence is already present, and it is probably easier
278 // to CSE this. The loads should be mergeable later anyway.
279 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
280 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
281
282 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
283 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
284
285 MDNode *MD = MDNode::get(Mod->getContext(), std::nullopt);
286 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
287 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
288 ST.makeLIDRangeMetadata(LoadZU);
289
290 // Extract y component. Upper half of LoadZU should be zero already.
291 Value *Y = Builder.CreateLShr(LoadXY, 16);
292
293 return std::pair(Y, LoadZU);
294}
295
296Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
297 unsigned N) {
298 Function *F = Builder.GetInsertBlock()->getParent();
301 StringRef AttrName;
302
303 switch (N) {
304 case 0:
305 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
306 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
307 AttrName = "amdgpu-no-workitem-id-x";
308 break;
309 case 1:
310 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
311 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
312 AttrName = "amdgpu-no-workitem-id-y";
313 break;
314
315 case 2:
316 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
317 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
318 AttrName = "amdgpu-no-workitem-id-z";
319 break;
320 default:
321 llvm_unreachable("invalid dimension");
322 }
323
324 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
325 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
326 ST.makeLIDRangeMetadata(CI);
327 F->removeFnAttr(AttrName);
328
329 return CI;
330}
331
333 return FixedVectorType::get(ArrayTy->getElementType(),
334 ArrayTy->getNumElements());
335}
336
337static Value *
339 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
340 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
341 if (!GEP)
342 return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
343
344 auto I = GEPIdx.find(GEP);
345 assert(I != GEPIdx.end() && "Must have entry for GEP!");
346 return I->second;
347}
348
350 Type *VecElemTy, const DataLayout &DL) {
351 // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
352 // helper.
353 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
354 MapVector<Value *, APInt> VarOffsets;
355 APInt ConstOffset(BW, 0);
356 if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
357 !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
358 return nullptr;
359
360 unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
361 if (VarOffsets.size() > 1)
362 return nullptr;
363
364 if (VarOffsets.size() == 1) {
365 // Only handle cases where we don't need to insert extra arithmetic
366 // instructions.
367 const auto &VarOffset = VarOffsets.front();
368 if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
369 return nullptr;
370 return VarOffset.first;
371 }
372
373 APInt Quot;
374 uint64_t Rem;
375 APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
376 if (Rem != 0)
377 return nullptr;
378
379 return ConstantInt::get(GEP->getContext(), Quot);
380}
381
383 ConstantInt *SrcIndex = nullptr;
384 ConstantInt *DestIndex = nullptr;
385};
386
388 unsigned MaxVGPRs) {
389
390 if (DisablePromoteAllocaToVector) {
391 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
392 return false;
393 }
394
395 Type *AllocaTy = Alloca->getAllocatedType();
396 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
397 if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
398 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
399 ArrayTy->getNumElements() > 0)
400 VectorTy = arrayTypeToVecType(ArrayTy);
401 }
402
403 // Use up to 1/4 of available register budget for vectorization.
404 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
405 : (MaxVGPRs * 32);
406
407 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
408 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "
409 << MaxVGPRs << " registers available\n");
410 return false;
411 }
412
413 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
414
415 // FIXME: There is no reason why we can't support larger arrays, we
416 // are just being conservative for now.
417 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
418 // could also be promoted but we don't currently handle this case
419 if (!VectorTy || VectorTy->getNumElements() > 16 ||
420 VectorTy->getNumElements() < 2) {
421 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
422 return false;
423 }
424
425 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
427 SmallVector<Instruction *> DeferredInsts;
430
431 for (Use &U : Alloca->uses())
432 Uses.push_back(&U);
433
434 Type *VecEltTy = VectorTy->getElementType();
435 unsigned ElementSize = DL.getTypeSizeInBits(VecEltTy) / 8;
436 while (!Uses.empty()) {
437 Use *U = Uses.pop_back_val();
438 Instruction *Inst = cast<Instruction>(U->getUser());
439
440 if (Value *Ptr = getLoadStorePointerOperand(Inst)) {
441 // This is a store of the pointer, not to the pointer.
442 if (isa<StoreInst>(Inst) &&
443 U->getOperandNo() != StoreInst::getPointerOperandIndex())
444 return false;
445
446 Type *AccessTy = getLoadStoreType(Inst);
447 Ptr = Ptr->stripPointerCasts();
448
449 // Alloca already accessed as vector, leave alone.
450 if (Ptr == Alloca && DL.getTypeStoreSize(Alloca->getAllocatedType()) ==
451 DL.getTypeStoreSize(AccessTy))
452 continue;
453
454 // Check that this is a simple access of a vector element.
455 bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
456 : cast<StoreInst>(Inst)->isSimple();
457 if (!IsSimple ||
458 !CastInst::isBitOrNoopPointerCastable(VecEltTy, AccessTy, DL))
459 return false;
460
461 WorkList.push_back(Inst);
462 continue;
463 }
464
465 if (isa<BitCastInst>(Inst)) {
466 // Look through bitcasts.
467 for (Use &U : Inst->uses())
468 Uses.push_back(&U);
469 continue;
470 }
471
472 if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
473 // If we can't compute a vector index from this GEP, then we can't
474 // promote this alloca to vector.
475 Value *Index = GEPToVectorIndex(GEP, Alloca, VecEltTy, DL);
476 if (!Index) {
477 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP
478 << '\n');
479 return false;
480 }
481
482 GEPVectorIdx[GEP] = Index;
483 for (Use &U : Inst->uses())
484 Uses.push_back(&U);
485 continue;
486 }
487
488 if (MemTransferInst *TransferInst = dyn_cast<MemTransferInst>(Inst)) {
489 if (TransferInst->isVolatile())
490 return false;
491
492 ConstantInt *Len = dyn_cast<ConstantInt>(TransferInst->getLength());
493 if (!Len || !!(Len->getZExtValue() % ElementSize))
494 return false;
495
496 if (!TransferInfo.count(TransferInst)) {
497 DeferredInsts.push_back(Inst);
498 WorkList.push_back(Inst);
499 TransferInfo[TransferInst] = MemTransferInfo();
500 }
501
502 auto getPointerIndexOfAlloca = [&](Value *Ptr) -> ConstantInt * {
503 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
504 if (Ptr != Alloca && !GEPVectorIdx.count(GEP))
505 return nullptr;
506
507 return dyn_cast<ConstantInt>(calculateVectorIndex(Ptr, GEPVectorIdx));
508 };
509
510 unsigned OpNum = U->getOperandNo();
511 MemTransferInfo *TI = &TransferInfo[TransferInst];
512 if (OpNum == 0) {
513 Value *Dest = TransferInst->getDest();
514 ConstantInt *Index = getPointerIndexOfAlloca(Dest);
515 if (!Index)
516 return false;
517 TI->DestIndex = Index;
518 } else {
519 assert(OpNum == 1);
520 Value *Src = TransferInst->getSource();
521 ConstantInt *Index = getPointerIndexOfAlloca(Src);
522 if (!Index)
523 return false;
524 TI->SrcIndex = Index;
525 }
526 continue;
527 }
528
529 // Ignore assume-like intrinsics and comparisons used in assumes.
530 if (isAssumeLikeIntrinsic(Inst))
531 continue;
532
533 if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
534 return isAssumeLikeIntrinsic(cast<Instruction>(U));
535 }))
536 continue;
537
538 // Unknown user.
539 return false;
540 }
541
542 while (!DeferredInsts.empty()) {
543 Instruction *Inst = DeferredInsts.pop_back_val();
544 MemTransferInst *TransferInst = cast<MemTransferInst>(Inst);
545 // TODO: Support the case if the pointers are from different alloca or
546 // from different address spaces.
547 MemTransferInfo &Info = TransferInfo[TransferInst];
548 if (!Info.SrcIndex || !Info.DestIndex)
549 return false;
550 }
551
552 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "
553 << *VectorTy << '\n');
554
555 for (Instruction *Inst : WorkList) {
556 IRBuilder<> Builder(Inst);
557 switch (Inst->getOpcode()) {
558 case Instruction::Load: {
559 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
560 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
561 Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
562 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
563 Value *VecValue =
564 Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca->getAlign());
565 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
566 if (Inst->getType() != VecEltTy)
567 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
568 Inst->replaceAllUsesWith(ExtractElement);
569 Inst->eraseFromParent();
570 break;
571 }
572 case Instruction::Store: {
573 StoreInst *SI = cast<StoreInst>(Inst);
574 Value *Ptr = SI->getPointerOperand();
575 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
576 Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
577 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
578 Value *VecValue =
579 Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca->getAlign());
580 Value *Elt = SI->getValueOperand();
581 if (Elt->getType() != VecEltTy)
582 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
583 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
584 Builder.CreateAlignedStore(NewVecValue, BitCast, Alloca->getAlign());
585 Inst->eraseFromParent();
586 break;
587 }
588 case Instruction::Call: {
589 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst)) {
590 ConstantInt *Length = cast<ConstantInt>(MTI->getLength());
591 unsigned NumCopied = Length->getZExtValue() / ElementSize;
592 MemTransferInfo *TI = &TransferInfo[cast<MemTransferInst>(Inst)];
593 unsigned SrcBegin = TI->SrcIndex->getZExtValue();
594 unsigned DestBegin = TI->DestIndex->getZExtValue();
595
596 SmallVector<int> Mask;
597 for (unsigned Idx = 0; Idx < VectorTy->getNumElements(); ++Idx) {
598 if (Idx >= DestBegin && Idx < DestBegin + NumCopied) {
599 Mask.push_back(SrcBegin++);
600 } else {
601 Mask.push_back(Idx);
602 }
603 }
604 Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
605 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
606 Value *VecValue =
607 Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca->getAlign());
608 Value *NewVecValue = Builder.CreateShuffleVector(VecValue, Mask);
609 Builder.CreateAlignedStore(NewVecValue, BitCast, Alloca->getAlign());
610
611 Inst->eraseFromParent();
612 } else {
613 llvm_unreachable("Unsupported call when promoting alloca to vector");
614 }
615 break;
616 }
617
618 default:
619 llvm_unreachable("Inconsistency in instructions promotable to vector");
620 }
621 }
622 return true;
623}
624
625static bool isCallPromotable(CallInst *CI) {
626 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
627 if (!II)
628 return false;
629
630 switch (II->getIntrinsicID()) {
631 case Intrinsic::memcpy:
632 case Intrinsic::memmove:
633 case Intrinsic::memset:
634 case Intrinsic::lifetime_start:
635 case Intrinsic::lifetime_end:
636 case Intrinsic::invariant_start:
637 case Intrinsic::invariant_end:
638 case Intrinsic::launder_invariant_group:
639 case Intrinsic::strip_invariant_group:
640 case Intrinsic::objectsize:
641 return true;
642 default:
643 return false;
644 }
645}
646
647bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
648 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
649 int OpIdx1) const {
650 // Figure out which operand is the one we might not be promoting.
651 Value *OtherOp = Inst->getOperand(OpIdx0);
652 if (Val == OtherOp)
653 OtherOp = Inst->getOperand(OpIdx1);
654
655 if (isa<ConstantPointerNull>(OtherOp))
656 return true;
657
658 Value *OtherObj = getUnderlyingObject(OtherOp);
659 if (!isa<AllocaInst>(OtherObj))
660 return false;
661
662 // TODO: We should be able to replace undefs with the right pointer type.
663
664 // TODO: If we know the other base object is another promotable
665 // alloca, not necessarily this alloca, we can do this. The
666 // important part is both must have the same address space at
667 // the end.
668 if (OtherObj != BaseAlloca) {
670 dbgs() << "Found a binary instruction with another alloca object\n");
671 return false;
672 }
673
674 return true;
675}
676
677bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
678 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
679
680 for (User *User : Val->users()) {
681 if (is_contained(WorkList, User))
682 continue;
683
684 if (CallInst *CI = dyn_cast<CallInst>(User)) {
685 if (!isCallPromotable(CI))
686 return false;
687
688 WorkList.push_back(User);
689 continue;
690 }
691
692 Instruction *UseInst = cast<Instruction>(User);
693 if (UseInst->getOpcode() == Instruction::PtrToInt)
694 return false;
695
696 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
697 if (LI->isVolatile())
698 return false;
699
700 continue;
701 }
702
703 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
704 if (SI->isVolatile())
705 return false;
706
707 // Reject if the stored value is not the pointer operand.
708 if (SI->getPointerOperand() != Val)
709 return false;
710 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
711 if (RMW->isVolatile())
712 return false;
713 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
714 if (CAS->isVolatile())
715 return false;
716 }
717
718 // Only promote a select if we know that the other select operand
719 // is from another pointer that will also be promoted.
720 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
721 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
722 return false;
723
724 // May need to rewrite constant operands.
725 WorkList.push_back(ICmp);
726 }
727
728 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
729 // Give up if the pointer may be captured.
730 if (PointerMayBeCaptured(UseInst, true, true))
731 return false;
732 // Don't collect the users of this.
733 WorkList.push_back(User);
734 continue;
735 }
736
737 // Do not promote vector/aggregate type instructions. It is hard to track
738 // their users.
739 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
740 return false;
741
742 if (!User->getType()->isPointerTy())
743 continue;
744
745 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
746 // Be conservative if an address could be computed outside the bounds of
747 // the alloca.
748 if (!GEP->isInBounds())
749 return false;
750 }
751
752 // Only promote a select if we know that the other select operand is from
753 // another pointer that will also be promoted.
754 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
755 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
756 return false;
757 }
758
759 // Repeat for phis.
760 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
761 // TODO: Handle more complex cases. We should be able to replace loops
762 // over arrays.
763 switch (Phi->getNumIncomingValues()) {
764 case 1:
765 break;
766 case 2:
767 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
768 return false;
769 break;
770 default:
771 return false;
772 }
773 }
774
775 WorkList.push_back(User);
776 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
777 return false;
778 }
779
780 return true;
781}
782
783bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
784
785 FunctionType *FTy = F.getFunctionType();
787
788 // If the function has any arguments in the local address space, then it's
789 // possible these arguments require the entire local memory space, so
790 // we cannot use local memory in the pass.
791 for (Type *ParamTy : FTy->params()) {
792 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
793 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
794 LocalMemLimit = 0;
795 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
796 "local memory disabled.\n");
797 return false;
798 }
799 }
800
801 LocalMemLimit = ST.getAddressableLocalMemorySize();
802 if (LocalMemLimit == 0)
803 return false;
804
806 SmallPtrSet<const Constant *, 8> VisitedConstants;
808
809 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
810 for (const User *U : Val->users()) {
811 if (const Instruction *Use = dyn_cast<Instruction>(U)) {
812 if (Use->getParent()->getParent() == &F)
813 return true;
814 } else {
815 const Constant *C = cast<Constant>(U);
816 if (VisitedConstants.insert(C).second)
817 Stack.push_back(C);
818 }
819 }
820
821 return false;
822 };
823
824 for (GlobalVariable &GV : Mod->globals()) {
826 continue;
827
828 if (visitUsers(&GV, &GV)) {
829 UsedLDS.insert(&GV);
830 Stack.clear();
831 continue;
832 }
833
834 // For any ConstantExpr uses, we need to recursively search the users until
835 // we see a function.
836 while (!Stack.empty()) {
837 const Constant *C = Stack.pop_back_val();
838 if (visitUsers(&GV, C)) {
839 UsedLDS.insert(&GV);
840 Stack.clear();
841 break;
842 }
843 }
844 }
845
846 const DataLayout &DL = Mod->getDataLayout();
847 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
848 AllocatedSizes.reserve(UsedLDS.size());
849
850 for (const GlobalVariable *GV : UsedLDS) {
851 Align Alignment =
852 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
853 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
854
855 // HIP uses an extern unsized array in local address space for dynamically
856 // allocated shared memory. In that case, we have to disable the promotion.
857 if (GV->hasExternalLinkage() && AllocSize == 0) {
858 LocalMemLimit = 0;
859 LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
860 "local memory. Promoting to local memory "
861 "disabled.\n");
862 return false;
863 }
864
865 AllocatedSizes.emplace_back(AllocSize, Alignment);
866 }
867
868 // Sort to try to estimate the worst case alignment padding
869 //
870 // FIXME: We should really do something to fix the addresses to a more optimal
871 // value instead
872 llvm::sort(AllocatedSizes, llvm::less_second());
873
874 // Check how much local memory is being used by global objects
875 CurrentLocalMemUsage = 0;
876
877 // FIXME: Try to account for padding here. The real padding and address is
878 // currently determined from the inverse order of uses in the function when
879 // legalizing, which could also potentially change. We try to estimate the
880 // worst case here, but we probably should fix the addresses earlier.
881 for (auto Alloc : AllocatedSizes) {
882 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
883 CurrentLocalMemUsage += Alloc.first;
884 }
885
886 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
887 F);
888
889 // Restrict local memory usage so that we don't drastically reduce occupancy,
890 // unless it is already significantly reduced.
891
892 // TODO: Have some sort of hint or other heuristics to guess occupancy based
893 // on other factors..
894 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
895 if (OccupancyHint == 0)
896 OccupancyHint = 7;
897
898 // Clamp to max value.
899 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
900
901 // Check the hint but ignore it if it's obviously wrong from the existing LDS
902 // usage.
903 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
904
905
906 // Round up to the next tier of usage.
907 unsigned MaxSizeWithWaveCount
908 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
909
910 // Program is possibly broken by using more local mem than available.
911 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
912 return false;
913
914 LocalMemLimit = MaxSizeWithWaveCount;
915
916 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
917 << " bytes of LDS\n"
918 << " Rounding size to " << MaxSizeWithWaveCount
919 << " with a maximum occupancy of " << MaxOccupancy << '\n'
920 << " and " << (LocalMemLimit - CurrentLocalMemUsage)
921 << " available for promotion\n");
922
923 return true;
924}
925
926// FIXME: Should try to pick the most likely to be profitable allocas first.
927bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) {
928 // Array allocations are probably not worth handling, since an allocation of
929 // the array type is the canonical form.
930 if (!I.isStaticAlloca() || I.isArrayAllocation())
931 return false;
932
933 const DataLayout &DL = Mod->getDataLayout();
935
936 // First try to replace the alloca with a vector
937 Type *AllocaTy = I.getAllocatedType();
938
939 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
940
941 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
942 return true; // Promoted to vector.
943
944 if (DisablePromoteAllocaToLDS)
945 return false;
946
947 const Function &ContainingFunction = *I.getParent()->getParent();
948 CallingConv::ID CC = ContainingFunction.getCallingConv();
949
950 // Don't promote the alloca to LDS for shader calling conventions as the work
951 // item ID intrinsics are not supported for these calling conventions.
952 // Furthermore not all LDS is available for some of the stages.
953 switch (CC) {
956 break;
957 default:
959 dbgs()
960 << " promote alloca to LDS not supported with calling convention.\n");
961 return false;
962 }
963
964 // Not likely to have sufficient local memory for promotion.
965 if (!SufficientLDS)
966 return false;
967
968 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
969 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
970
971 Align Alignment =
972 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
973
974 // FIXME: This computed padding is likely wrong since it depends on inverse
975 // usage order.
976 //
977 // FIXME: It is also possible that if we're allowed to use all of the memory
978 // could end up using more than the maximum due to alignment padding.
979
980 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
981 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
982 NewSize += AllocSize;
983
984 if (NewSize > LocalMemLimit) {
985 LLVM_DEBUG(dbgs() << " " << AllocSize
986 << " bytes of local memory not available to promote\n");
987 return false;
988 }
989
990 CurrentLocalMemUsage = NewSize;
991
992 std::vector<Value*> WorkList;
993
994 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
995 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
996 return false;
997 }
998
999 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
1000
1001 Function *F = I.getParent()->getParent();
1002
1003 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
1006 Twine(F->getName()) + Twine('.') + I.getName(), nullptr,
1009 GV->setAlignment(I.getAlign());
1010
1011 Value *TCntY, *TCntZ;
1012
1013 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
1014 Value *TIdX = getWorkitemID(Builder, 0);
1015 Value *TIdY = getWorkitemID(Builder, 1);
1016 Value *TIdZ = getWorkitemID(Builder, 2);
1017
1018 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
1019 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
1020 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
1021 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
1022 TID = Builder.CreateAdd(TID, TIdZ);
1023
1024 Value *Indices[] = {
1026 TID
1027 };
1028
1029 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
1030 I.mutateType(Offset->getType());
1031 I.replaceAllUsesWith(Offset);
1032 I.eraseFromParent();
1033
1034 SmallVector<IntrinsicInst *> DeferredIntrs;
1035
1036 for (Value *V : WorkList) {
1037 CallInst *Call = dyn_cast<CallInst>(V);
1038 if (!Call) {
1039 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
1040 Value *Src0 = CI->getOperand(0);
1042 cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS);
1043
1044 if (isa<ConstantPointerNull>(CI->getOperand(0)))
1045 CI->setOperand(0, ConstantPointerNull::get(NewTy));
1046
1047 if (isa<ConstantPointerNull>(CI->getOperand(1)))
1048 CI->setOperand(1, ConstantPointerNull::get(NewTy));
1049
1050 continue;
1051 }
1052
1053 // The operand's value should be corrected on its own and we don't want to
1054 // touch the users.
1055 if (isa<AddrSpaceCastInst>(V))
1056 continue;
1057
1059 cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS);
1060
1061 // FIXME: It doesn't really make sense to try to do this for all
1062 // instructions.
1063 V->mutateType(NewTy);
1064
1065 // Adjust the types of any constant operands.
1066 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1067 if (isa<ConstantPointerNull>(SI->getOperand(1)))
1068 SI->setOperand(1, ConstantPointerNull::get(NewTy));
1069
1070 if (isa<ConstantPointerNull>(SI->getOperand(2)))
1071 SI->setOperand(2, ConstantPointerNull::get(NewTy));
1072 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1073 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1074 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1075 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1076 }
1077 }
1078
1079 continue;
1080 }
1081
1082 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1083 Builder.SetInsertPoint(Intr);
1084 switch (Intr->getIntrinsicID()) {
1085 case Intrinsic::lifetime_start:
1086 case Intrinsic::lifetime_end:
1087 // These intrinsics are for address space 0 only
1088 Intr->eraseFromParent();
1089 continue;
1090 case Intrinsic::memcpy:
1091 case Intrinsic::memmove:
1092 // These have 2 pointer operands. In case if second pointer also needs
1093 // to be replaced we defer processing of these intrinsics until all
1094 // other values are processed.
1095 DeferredIntrs.push_back(Intr);
1096 continue;
1097 case Intrinsic::memset: {
1098 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1099 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
1100 MemSet->getLength(), MemSet->getDestAlign(),
1101 MemSet->isVolatile());
1102 Intr->eraseFromParent();
1103 continue;
1104 }
1105 case Intrinsic::invariant_start:
1106 case Intrinsic::invariant_end:
1107 case Intrinsic::launder_invariant_group:
1108 case Intrinsic::strip_invariant_group:
1109 Intr->eraseFromParent();
1110 // FIXME: I think the invariant marker should still theoretically apply,
1111 // but the intrinsics need to be changed to accept pointers with any
1112 // address space.
1113 continue;
1114 case Intrinsic::objectsize: {
1115 Value *Src = Intr->getOperand(0);
1116 Function *ObjectSize = Intrinsic::getDeclaration(
1117 Mod, Intrinsic::objectsize,
1118 {Intr->getType(),
1120 cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)});
1121
1122 CallInst *NewCall = Builder.CreateCall(
1123 ObjectSize,
1124 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1125 Intr->replaceAllUsesWith(NewCall);
1126 Intr->eraseFromParent();
1127 continue;
1128 }
1129 default:
1130 Intr->print(errs());
1131 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
1132 }
1133 }
1134
1135 for (IntrinsicInst *Intr : DeferredIntrs) {
1136 Builder.SetInsertPoint(Intr);
1137 Intrinsic::ID ID = Intr->getIntrinsicID();
1138 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove);
1139
1140 MemTransferInst *MI = cast<MemTransferInst>(Intr);
1141 auto *B =
1142 Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(),
1143 MI->getRawSource(), MI->getSourceAlign(),
1144 MI->getLength(), MI->isVolatile());
1145
1146 for (unsigned I = 0; I != 2; ++I) {
1147 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1148 B->addDereferenceableParamAttr(I, Bytes);
1149 }
1150 }
1151
1152 Intr->eraseFromParent();
1153 }
1154
1155 return true;
1156}
1157
1158bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) {
1159 // Array allocations are probably not worth handling, since an allocation of
1160 // the array type is the canonical form.
1161 if (!I.isStaticAlloca() || I.isArrayAllocation())
1162 return false;
1163
1164 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
1165
1166 Module *Mod = I.getParent()->getParent()->getParent();
1167 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
1168}
1169
1171 if (DisablePromoteAllocaToVector)
1172 return false;
1173
1175 if (!ST.isPromoteAllocaEnabled())
1176 return false;
1177
1178 unsigned MaxVGPRs;
1179 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
1180 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
1181 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1182 // A non-entry function has only 32 caller preserved registers.
1183 // Do not promote alloca which will force spilling.
1184 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
1185 MaxVGPRs = std::min(MaxVGPRs, 32u);
1186 } else {
1187 MaxVGPRs = 128;
1188 }
1189
1190 bool Changed = false;
1191 BasicBlock &EntryBB = *F.begin();
1192
1194 for (Instruction &I : EntryBB) {
1195 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1196 Allocas.push_back(AI);
1197 }
1198
1199 for (AllocaInst *AI : Allocas) {
1200 if (handlePromoteAllocaToVector(*AI, MaxVGPRs))
1201 Changed = true;
1202 }
1203
1204 return Changed;
1205}
1206
1207bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) {
1208 if (skipFunction(F))
1209 return false;
1210 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
1211 return promoteAllocasToVector(F, TPC->getTM<TargetMachine>());
1212 }
1213 return false;
1214}
1215
1218 bool Changed = promoteAllocasToVector(F, TM);
1219 if (Changed) {
1222 return PA;
1223 }
1224 return PreservedAnalyses::all();
1225}
1226
1228 return new AMDGPUPromoteAlloca();
1229}
1230
1232 return new AMDGPUPromoteAllocaToVector();
1233}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
unsigned Intr
AMDGPU promote alloca to vector or LDS
bool promoteAllocasToVector(Function &F, TargetMachine &TM)
static Value * GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca, Type *VecElemTy, const DataLayout &DL)
static Value * calculateVectorIndex(Value *Ptr, const std::map< GetElementPtrInst *, Value * > &GEPIdx)
static FixedVectorType * arrayTypeToVecType(ArrayType *ArrayTy)
static bool isCallPromotable(CallInst *CI)
#define DEBUG_TYPE
bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs)
static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL, unsigned MaxVGPRs)
SmallPtrSet< MachineInstr *, 2 > Uses
assume Assume Builder
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
static bool runOnFunction(Function &F, bool PostInlining)
AMD GCN specific subclass of TargetSubtarget.
Hexagon Common GEP
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
Module * Mod
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Target-Independent Code Generator Pass Configuration Options pass.
static const AMDGPUSubtarget & get(const MachineFunction &MF)
Class for arbitrary precision integers.
Definition: APInt.h:75
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition: APInt.cpp:1756
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:366
an instruction to allocate memory on the stack
Definition: Instructions.h:58
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:125
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:118
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:105
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:265
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:658
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:513
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:718
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
Represents analyses that only rely on functions' control flow.
Definition: PassManager.h:113
void addDereferenceableRetAttr(uint64_t Bytes)
adds the dereferenceable attribute to the list of attributes.
Definition: InstrTypes.h:1602
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Definition: InstrTypes.h:1528
This class represents a function call, abstracting a target machine's calling convention.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:145
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1698
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:525
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:704
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:237
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:79
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalObject.
Definition: Globals.cpp:130
bool hasExternalLinkage() const
Definition: GlobalValue.h:506
void setUnnamedAddr(UnnamedAddr Val)
Definition: GlobalValue.h:227
unsigned getAddressSpace() const
Definition: GlobalValue.h:201
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:292
This instruction compares its operands according to the predicate given to the constructor.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2558
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1455
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:168
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
An instruction for reading from memory.
Definition: Instructions.h:177
Metadata node.
Definition: Metadata.h:943
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1399
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
size_type size() const
Definition: MapVector.h:61
std::pair< KeyT, ValueT > & front()
Definition: MapVector.h:84
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
bool isVolatile() const
Value * getValue() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getWithSamePointeeType(PointerType *PT, unsigned AddressSpace)
This constructs a pointer type with the same pointee type as input PointerType (or opaque pointer if ...
Definition: DerivedTypes.h:666
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1750
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:188
This class represents the LLVM 'select' instruction.
size_type size() const
Definition: SmallPtrSet.h:93
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
bool empty() const
Definition: SmallVector.h:94
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
static unsigned getPointerOperandIndex()
Definition: Instructions.h:395
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:258
static IntegerType * getInt32Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
iterator_range< user_iterator > users()
Definition: Value.h:421
iterator_range< use_iterator > uses()
Definition: Value.h:376
static bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
Definition: Type.cpp:695
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
TargetPassConfig.
@ LOCAL_ADDRESS
Address space for local memory.
Definition: AMDGPU.h:381
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
Definition: AMDGPU.h:380
bool isEntryFunctionCC(CallingConv::ID CC)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:197
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:141
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1506
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
@ Length
Definition: DWP.cpp:406
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1819
bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
FunctionPass * createAMDGPUPromoteAllocaToVector()
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1744
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
FunctionPass * createAMDGPUPromoteAlloca()
@ Mod
The access may modify the value stored in memory.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
char & AMDGPUPromoteAllocaID
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1976
char & AMDGPUPromoteAllocaToVectorID
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
#define N
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Function object to check whether the second component of a container supported by std::get (like std:...
Definition: STLExtras.h:1546