Bug Summary

File:llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
Warning:line 371, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUPromoteAlloca.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AMDGPU -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AMDGPU -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-08-28-193554-24367-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

1//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass eliminates allocas by either converting them into vectors or
10// by migrating them to local address space.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "GCNSubtarget.h"
16#include "llvm/Analysis/CaptureTracking.h"
17#include "llvm/Analysis/ValueTracking.h"
18#include "llvm/CodeGen/TargetPassConfig.h"
19#include "llvm/IR/IRBuilder.h"
20#include "llvm/IR/IntrinsicsAMDGPU.h"
21#include "llvm/IR/IntrinsicsR600.h"
22#include "llvm/Pass.h"
23#include "llvm/Target/TargetMachine.h"
24
25#define DEBUG_TYPE"amdgpu-promote-alloca" "amdgpu-promote-alloca"
26
27using namespace llvm;
28
29namespace {
30
31static cl::opt<bool> DisablePromoteAllocaToVector(
32 "disable-promote-alloca-to-vector",
33 cl::desc("Disable promote alloca to vector"),
34 cl::init(false));
35
36static cl::opt<bool> DisablePromoteAllocaToLDS(
37 "disable-promote-alloca-to-lds",
38 cl::desc("Disable promote alloca to LDS"),
39 cl::init(false));
40
41static cl::opt<unsigned> PromoteAllocaToVectorLimit(
42 "amdgpu-promote-alloca-to-vector-limit",
43 cl::desc("Maximum byte size to consider promote alloca to vector"),
44 cl::init(0));
45
46// FIXME: This can create globals so should be a module pass.
47class AMDGPUPromoteAlloca : public FunctionPass {
48public:
49 static char ID;
50
51 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
52
53 bool runOnFunction(Function &F) override;
54
55 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
56
57 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
58
59 void getAnalysisUsage(AnalysisUsage &AU) const override {
60 AU.setPreservesCFG();
61 FunctionPass::getAnalysisUsage(AU);
62 }
63};
64
65class AMDGPUPromoteAllocaImpl {
66private:
67 const TargetMachine &TM;
68 Module *Mod = nullptr;
69 const DataLayout *DL = nullptr;
70
71 // FIXME: This should be per-kernel.
72 uint32_t LocalMemLimit = 0;
73 uint32_t CurrentLocalMemUsage = 0;
74 unsigned MaxVGPRs;
75
76 bool IsAMDGCN = false;
77 bool IsAMDHSA = false;
78
79 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
80 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
81
82 /// BaseAlloca is the alloca root the search started from.
83 /// Val may be that alloca or a recursive user of it.
84 bool collectUsesWithPtrTypes(Value *BaseAlloca,
85 Value *Val,
86 std::vector<Value*> &WorkList) const;
87
88 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
89 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
90 /// Returns true if both operands are derived from the same alloca. Val should
91 /// be the same value as one of the input operands of UseInst.
92 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
93 Instruction *UseInst,
94 int OpIdx0, int OpIdx1) const;
95
96 /// Check whether we have enough local memory for promotion.
97 bool hasSufficientLocalMem(const Function &F);
98
99 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
100
101public:
102 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {}
103 bool run(Function &F);
104};
105
106class AMDGPUPromoteAllocaToVector : public FunctionPass {
107public:
108 static char ID;
109
110 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
111
112 bool runOnFunction(Function &F) override;
113
114 StringRef getPassName() const override {
115 return "AMDGPU Promote Alloca to vector";
116 }
117
118 void getAnalysisUsage(AnalysisUsage &AU) const override {
119 AU.setPreservesCFG();
120 FunctionPass::getAnalysisUsage(AU);
121 }
122};
123
124} // end anonymous namespace
125
126char AMDGPUPromoteAlloca::ID = 0;
127char AMDGPUPromoteAllocaToVector::ID = 0;
128
129INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) {
130 "AMDGPU promote alloca to vector or LDS", false, false)static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) {
131// Move LDS uses from functions to kernels before promote alloca for accurate
132// estimation of LDS available
133INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS)initializeAMDGPULowerModuleLDSPass(Registry);
134INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
135 "AMDGPU promote alloca to vector or LDS", false, false)PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
136
137INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
138 "AMDGPU promote alloca to vector", false, false)static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
139
140char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
141char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
142
143bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
144 if (skipFunction(F))
145 return false;
146
147 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
148 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F);
149 }
150 return false;
151}
152
153PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
154 FunctionAnalysisManager &AM) {
155 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F);
156 if (Changed) {
157 PreservedAnalyses PA;
158 PA.preserveSet<CFGAnalyses>();
159 return PA;
160 }
161 return PreservedAnalyses::all();
162}
163
164bool AMDGPUPromoteAllocaImpl::run(Function &F) {
165 Mod = F.getParent();
166 DL = &Mod->getDataLayout();
167
168 const Triple &TT = TM.getTargetTriple();
169 IsAMDGCN = TT.getArch() == Triple::amdgcn;
170 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
171
172 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
173 if (!ST.isPromoteAllocaEnabled())
174 return false;
175
176 if (IsAMDGCN) {
177 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
178 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
179 } else {
180 MaxVGPRs = 128;
181 }
182
183 bool SufficientLDS = hasSufficientLocalMem(F);
184 bool Changed = false;
185 BasicBlock &EntryBB = *F.begin();
186
187 SmallVector<AllocaInst *, 16> Allocas;
188 for (Instruction &I : EntryBB) {
189 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
190 Allocas.push_back(AI);
191 }
192
193 for (AllocaInst *AI : Allocas) {
194 if (handleAlloca(*AI, SufficientLDS))
195 Changed = true;
196 }
197
198 return Changed;
199}
200
201std::pair<Value *, Value *>
202AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
203 Function &F = *Builder.GetInsertBlock()->getParent();
204 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
205
206 if (!IsAMDHSA) {
207 Function *LocalSizeYFn
208 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
209 Function *LocalSizeZFn
210 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
211
212 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
213 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
214
215 ST.makeLIDRangeMetadata(LocalSizeY);
216 ST.makeLIDRangeMetadata(LocalSizeZ);
217
218 return std::make_pair(LocalSizeY, LocalSizeZ);
219 }
220
221 // We must read the size out of the dispatch pointer.
222 assert(IsAMDGCN)(static_cast <bool> (IsAMDGCN) ? void (0) : __assert_fail
("IsAMDGCN", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 222, __extension__ __PRETTY_FUNCTION__))
;
223
224 // We are indexing into this struct, and want to extract the workgroup_size_*
225 // fields.
226 //
227 // typedef struct hsa_kernel_dispatch_packet_s {
228 // uint16_t header;
229 // uint16_t setup;
230 // uint16_t workgroup_size_x ;
231 // uint16_t workgroup_size_y;
232 // uint16_t workgroup_size_z;
233 // uint16_t reserved0;
234 // uint32_t grid_size_x ;
235 // uint32_t grid_size_y ;
236 // uint32_t grid_size_z;
237 //
238 // uint32_t private_segment_size;
239 // uint32_t group_segment_size;
240 // uint64_t kernel_object;
241 //
242 // #ifdef HSA_LARGE_MODEL
243 // void *kernarg_address;
244 // #elif defined HSA_LITTLE_ENDIAN
245 // void *kernarg_address;
246 // uint32_t reserved1;
247 // #else
248 // uint32_t reserved1;
249 // void *kernarg_address;
250 // #endif
251 // uint64_t reserved2;
252 // hsa_signal_t completion_signal; // uint64_t wrapper
253 // } hsa_kernel_dispatch_packet_t
254 //
255 Function *DispatchPtrFn
256 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
257
258 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
259 DispatchPtr->addRetAttr(Attribute::NoAlias);
260 DispatchPtr->addRetAttr(Attribute::NonNull);
261 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
262 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
263 F.removeFnAttr("amdgpu-no-dispatch-ptr");
264
265 // Size of the dispatch packet struct.
266 DispatchPtr->addDereferenceableRetAttr(64);
267
268 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
269 Value *CastDispatchPtr = Builder.CreateBitCast(
270 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
271
272 // We could do a single 64-bit load here, but it's likely that the basic
273 // 32-bit and extract sequence is already present, and it is probably easier
274 // to CSE this. The loads should be mergable later anyway.
275 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
276 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
277
278 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
279 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
280
281 MDNode *MD = MDNode::get(Mod->getContext(), None);
282 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
283 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
284 ST.makeLIDRangeMetadata(LoadZU);
285
286 // Extract y component. Upper half of LoadZU should be zero already.
287 Value *Y = Builder.CreateLShr(LoadXY, 16);
288
289 return std::make_pair(Y, LoadZU);
290}
291
292Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
293 unsigned N) {
294 Function *F = Builder.GetInsertBlock()->getParent();
295 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
296 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
297 StringRef AttrName;
298
299 switch (N) {
300 case 0:
301 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
302 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
303 AttrName = "amdgpu-no-workitem-id-x";
304 break;
305 case 1:
306 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
307 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
308 AttrName = "amdgpu-no-workitem-id-y";
309 break;
310
311 case 2:
312 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
313 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
314 AttrName = "amdgpu-no-workitem-id-z";
315 break;
316 default:
317 llvm_unreachable("invalid dimension")::llvm::llvm_unreachable_internal("invalid dimension", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 317)
;
318 }
319
320 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
321 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
322 ST.makeLIDRangeMetadata(CI);
323 F->removeFnAttr(AttrName);
324
325 return CI;
326}
327
328static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
329 return FixedVectorType::get(ArrayTy->getElementType(),
330 ArrayTy->getNumElements());
331}
332
333static Value *stripBitcasts(Value *V) {
334 while (Instruction *I = dyn_cast<Instruction>(V)) {
335 if (I->getOpcode() != Instruction::BitCast)
336 break;
337 V = I->getOperand(0);
338 }
339 return V;
340}
341
342static Value *
343calculateVectorIndex(Value *Ptr,
344 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
345 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(stripBitcasts(Ptr));
346 if (!GEP)
347 return nullptr;
348
349 auto I = GEPIdx.find(GEP);
350 return I == GEPIdx.end() ? nullptr : I->second;
351}
352
353static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
354 // FIXME we only support simple cases
355 if (GEP->getNumOperands() != 3)
356 return nullptr;
357
358 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
359 if (!I0 || !I0->isZero())
360 return nullptr;
361
362 return GEP->getOperand(2);
363}
364
365// Not an instruction handled below to turn into a vector.
366//
367// TODO: Check isTriviallyVectorizable for calls and handle other
368// instructions.
369static bool canVectorizeInst(Instruction *Inst, User *User,
370 const DataLayout &DL) {
371 switch (Inst->getOpcode()) {
46
Called C++ object pointer is null
372 case Instruction::Load: {
373 // Currently only handle the case where the Pointer Operand is a GEP.
374 // Also we could not vectorize volatile or atomic loads.
375 LoadInst *LI = cast<LoadInst>(Inst);
376 if (isa<AllocaInst>(User) &&
377 LI->getPointerOperandType() == User->getType() &&
378 isa<VectorType>(LI->getType()))
379 return true;
380
381 Instruction *PtrInst = dyn_cast<Instruction>(LI->getPointerOperand());
382 if (!PtrInst)
383 return false;
384
385 return (PtrInst->getOpcode() == Instruction::GetElementPtr ||
386 PtrInst->getOpcode() == Instruction::BitCast) &&
387 LI->isSimple();
388 }
389 case Instruction::BitCast:
390 return true;
391 case Instruction::Store: {
392 // Must be the stored pointer operand, not a stored value, plus
393 // since it should be canonical form, the User should be a GEP.
394 // Also we could not vectorize volatile or atomic stores.
395 StoreInst *SI = cast<StoreInst>(Inst);
396 if (isa<AllocaInst>(User) &&
397 SI->getPointerOperandType() == User->getType() &&
398 isa<VectorType>(SI->getValueOperand()->getType()))
399 return true;
400
401 Instruction *UserInst = dyn_cast<Instruction>(User);
402 if (!UserInst)
403 return false;
404
405 return (SI->getPointerOperand() == User) &&
406 (UserInst->getOpcode() == Instruction::GetElementPtr ||
407 UserInst->getOpcode() == Instruction::BitCast) &&
408 SI->isSimple();
409 }
410 default:
411 return false;
412 }
413}
414
415static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL,
416 unsigned MaxVGPRs) {
417
418 if (DisablePromoteAllocaToVector) {
19
Assuming the condition is false
20
Taking false branch
419 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Promotion alloca to vector is disabled\n"
; } } while (false)
;
420 return false;
421 }
422
423 Type *AllocaTy = Alloca->getAllocatedType();
424 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
21
Assuming 'AllocaTy' is not a 'FixedVectorType'
425 if (auto *ArrayTy
22.1
'ArrayTy' is non-null
22.1
'ArrayTy' is non-null
= dyn_cast<ArrayType>(AllocaTy)) {
22
Assuming 'AllocaTy' is a 'ArrayType'
426 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
23
Assuming the condition is true
25
Taking true branch
427 ArrayTy->getNumElements() > 0)
24
Assuming the condition is true
428 VectorTy = arrayTypeToVecType(ArrayTy);
429 }
430
431 // Use up to 1/4 of available register budget for vectorization.
432 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
26
Assuming the condition is false
27
'?' condition is false
433 : (MaxVGPRs * 32);
434
435 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
28
Assuming the condition is false
436 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
437 << MaxVGPRs << " registers available\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
;
438 return false;
439 }
440
441 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Alloca candidate for vectorization\n"
; } } while (false)
;
29
Taking false branch
30
Assuming 'DebugFlag' is false
442
443 // FIXME: There is no reason why we can't support larger arrays, we
444 // are just being conservative for now.
445 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
446 // could also be promoted but we don't currently handle this case
447 if (!VectorTy || VectorTy->getNumElements() > 16 ||
31
Assuming 'VectorTy' is non-null
32
Assuming the condition is false
34
Taking false branch
448 VectorTy->getNumElements() < 2) {
33
Assuming the condition is false
449 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot convert type to vector\n"
; } } while (false)
;
450 return false;
451 }
452
453 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
454 std::vector<Value *> WorkList;
455 SmallVector<User *, 8> Users(Alloca->users());
456 SmallVector<User *, 8> UseUsers(Users.size(), Alloca);
457 Type *VecEltTy = VectorTy->getElementType();
458 while (!Users.empty()) {
35
Calling 'SmallVectorBase::empty'
38
Returning from 'SmallVectorBase::empty'
39
Loop condition is true. Entering loop body
459 User *AllocaUser = Users.pop_back_val();
460 User *UseUser = UseUsers.pop_back_val();
461 Instruction *Inst = dyn_cast<Instruction>(AllocaUser);
40
Assuming 'AllocaUser' is not a 'Instruction'
41
'Inst' initialized to a null pointer value
462
463 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
42
Assuming 'AllocaUser' is not a 'GetElementPtrInst'
464 if (!GEP
42.1
'GEP' is null
42.1
'GEP' is null
) {
43
Taking true branch
465 if (!canVectorizeInst(Inst, UseUser, DL))
44
Passing null pointer value via 1st parameter 'Inst'
45
Calling 'canVectorizeInst'
466 return false;
467
468 if (Inst->getOpcode() == Instruction::BitCast) {
469 Type *FromTy = Inst->getOperand(0)->getType()->getPointerElementType();
470 Type *ToTy = Inst->getType()->getPointerElementType();
471 if (FromTy->isAggregateType() || ToTy->isAggregateType() ||
472 DL.getTypeSizeInBits(FromTy) != DL.getTypeSizeInBits(ToTy))
473 continue;
474
475 for (User *CastUser : Inst->users()) {
476 if (isAssumeLikeIntrinsic(cast<Instruction>(CastUser)))
477 continue;
478 Users.push_back(CastUser);
479 UseUsers.push_back(Inst);
480 }
481
482 continue;
483 }
484
485 WorkList.push_back(AllocaUser);
486 continue;
487 }
488
489 Value *Index = GEPToVectorIndex(GEP);
490
491 // If we can't compute a vector index from this GEP, then we can't
492 // promote this alloca to vector.
493 if (!Index) {
494 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEPdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
495 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
;
496 return false;
497 }
498
499 GEPVectorIdx[GEP] = Index;
500 Users.append(GEP->user_begin(), GEP->user_end());
501 UseUsers.append(GEP->getNumUses(), GEP);
502 }
503
504 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
505 << *VectorTy << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
;
506
507 for (Value *V : WorkList) {
508 Instruction *Inst = cast<Instruction>(V);
509 IRBuilder<> Builder(Inst);
510 switch (Inst->getOpcode()) {
511 case Instruction::Load: {
512 if (Inst->getType() == AllocaTy || Inst->getType()->isVectorTy())
513 break;
514
515 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
516 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
517 if (!Index)
518 break;
519
520 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
521 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
522 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
523 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
524 if (Inst->getType() != VecEltTy)
525 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
526 Inst->replaceAllUsesWith(ExtractElement);
527 Inst->eraseFromParent();
528 break;
529 }
530 case Instruction::Store: {
531 StoreInst *SI = cast<StoreInst>(Inst);
532 if (SI->getValueOperand()->getType() == AllocaTy ||
533 SI->getValueOperand()->getType()->isVectorTy())
534 break;
535
536 Value *Ptr = SI->getPointerOperand();
537 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
538 if (!Index)
539 break;
540
541 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
542 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
543 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
544 Value *Elt = SI->getValueOperand();
545 if (Elt->getType() != VecEltTy)
546 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
547 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
548 Builder.CreateStore(NewVecValue, BitCast);
549 Inst->eraseFromParent();
550 break;
551 }
552
553 default:
554 llvm_unreachable("Inconsistency in instructions promotable to vector")::llvm::llvm_unreachable_internal("Inconsistency in instructions promotable to vector"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 554)
;
555 }
556 }
557 return true;
558}
559
560static bool isCallPromotable(CallInst *CI) {
561 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
562 if (!II)
563 return false;
564
565 switch (II->getIntrinsicID()) {
566 case Intrinsic::memcpy:
567 case Intrinsic::memmove:
568 case Intrinsic::memset:
569 case Intrinsic::lifetime_start:
570 case Intrinsic::lifetime_end:
571 case Intrinsic::invariant_start:
572 case Intrinsic::invariant_end:
573 case Intrinsic::launder_invariant_group:
574 case Intrinsic::strip_invariant_group:
575 case Intrinsic::objectsize:
576 return true;
577 default:
578 return false;
579 }
580}
581
582bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
583 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
584 int OpIdx1) const {
585 // Figure out which operand is the one we might not be promoting.
586 Value *OtherOp = Inst->getOperand(OpIdx0);
587 if (Val == OtherOp)
588 OtherOp = Inst->getOperand(OpIdx1);
589
590 if (isa<ConstantPointerNull>(OtherOp))
591 return true;
592
593 Value *OtherObj = getUnderlyingObject(OtherOp);
594 if (!isa<AllocaInst>(OtherObj))
595 return false;
596
597 // TODO: We should be able to replace undefs with the right pointer type.
598
599 // TODO: If we know the other base object is another promotable
600 // alloca, not necessarily this alloca, we can do this. The
601 // important part is both must have the same address space at
602 // the end.
603 if (OtherObj != BaseAlloca) {
604 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
605 dbgs() << "Found a binary instruction with another alloca object\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
;
606 return false;
607 }
608
609 return true;
610}
611
612bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
613 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
614
615 for (User *User : Val->users()) {
616 if (is_contained(WorkList, User))
617 continue;
618
619 if (CallInst *CI = dyn_cast<CallInst>(User)) {
620 if (!isCallPromotable(CI))
621 return false;
622
623 WorkList.push_back(User);
624 continue;
625 }
626
627 Instruction *UseInst = cast<Instruction>(User);
628 if (UseInst->getOpcode() == Instruction::PtrToInt)
629 return false;
630
631 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
632 if (LI->isVolatile())
633 return false;
634
635 continue;
636 }
637
638 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
639 if (SI->isVolatile())
640 return false;
641
642 // Reject if the stored value is not the pointer operand.
643 if (SI->getPointerOperand() != Val)
644 return false;
645 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
646 if (RMW->isVolatile())
647 return false;
648 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
649 if (CAS->isVolatile())
650 return false;
651 }
652
653 // Only promote a select if we know that the other select operand
654 // is from another pointer that will also be promoted.
655 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
656 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
657 return false;
658
659 // May need to rewrite constant operands.
660 WorkList.push_back(ICmp);
661 }
662
663 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
664 // Give up if the pointer may be captured.
665 if (PointerMayBeCaptured(UseInst, true, true))
666 return false;
667 // Don't collect the users of this.
668 WorkList.push_back(User);
669 continue;
670 }
671
672 // Do not promote vector/aggregate type instructions. It is hard to track
673 // their users.
674 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
675 return false;
676
677 if (!User->getType()->isPointerTy())
678 continue;
679
680 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
681 // Be conservative if an address could be computed outside the bounds of
682 // the alloca.
683 if (!GEP->isInBounds())
684 return false;
685 }
686
687 // Only promote a select if we know that the other select operand is from
688 // another pointer that will also be promoted.
689 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
690 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
691 return false;
692 }
693
694 // Repeat for phis.
695 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
696 // TODO: Handle more complex cases. We should be able to replace loops
697 // over arrays.
698 switch (Phi->getNumIncomingValues()) {
699 case 1:
700 break;
701 case 2:
702 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
703 return false;
704 break;
705 default:
706 return false;
707 }
708 }
709
710 WorkList.push_back(User);
711 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
712 return false;
713 }
714
715 return true;
716}
717
718bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
719
720 FunctionType *FTy = F.getFunctionType();
721 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
722
723 // If the function has any arguments in the local address space, then it's
724 // possible these arguments require the entire local memory space, so
725 // we cannot use local memory in the pass.
726 for (Type *ParamTy : FTy->params()) {
727 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
728 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
729 LocalMemLimit = 0;
730 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
731 "local memory disabled.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
;
732 return false;
733 }
734 }
735
736 LocalMemLimit = ST.getLocalMemorySize();
737 if (LocalMemLimit == 0)
738 return false;
739
740 SmallVector<const Constant *, 16> Stack;
741 SmallPtrSet<const Constant *, 8> VisitedConstants;
742 SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
743
744 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
745 for (const User *U : Val->users()) {
746 if (const Instruction *Use = dyn_cast<Instruction>(U)) {
747 if (Use->getParent()->getParent() == &F)
748 return true;
749 } else {
750 const Constant *C = cast<Constant>(U);
751 if (VisitedConstants.insert(C).second)
752 Stack.push_back(C);
753 }
754 }
755
756 return false;
757 };
758
759 for (GlobalVariable &GV : Mod->globals()) {
760 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
761 continue;
762
763 if (visitUsers(&GV, &GV)) {
764 UsedLDS.insert(&GV);
765 Stack.clear();
766 continue;
767 }
768
769 // For any ConstantExpr uses, we need to recursively search the users until
770 // we see a function.
771 while (!Stack.empty()) {
772 const Constant *C = Stack.pop_back_val();
773 if (visitUsers(&GV, C)) {
774 UsedLDS.insert(&GV);
775 Stack.clear();
776 break;
777 }
778 }
779 }
780
781 const DataLayout &DL = Mod->getDataLayout();
782 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
783 AllocatedSizes.reserve(UsedLDS.size());
784
785 for (const GlobalVariable *GV : UsedLDS) {
786 Align Alignment =
787 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
788 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
789 AllocatedSizes.emplace_back(AllocSize, Alignment);
790 }
791
792 // Sort to try to estimate the worst case alignment padding
793 //
794 // FIXME: We should really do something to fix the addresses to a more optimal
795 // value instead
796 llvm::sort(AllocatedSizes, [](std::pair<uint64_t, Align> LHS,
797 std::pair<uint64_t, Align> RHS) {
798 return LHS.second < RHS.second;
799 });
800
801 // Check how much local memory is being used by global objects
802 CurrentLocalMemUsage = 0;
803
804 // FIXME: Try to account for padding here. The real padding and address is
805 // currently determined from the inverse order of uses in the function when
806 // legalizing, which could also potentially change. We try to estimate the
807 // worst case here, but we probably should fix the addresses earlier.
808 for (auto Alloc : AllocatedSizes) {
809 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
810 CurrentLocalMemUsage += Alloc.first;
811 }
812
813 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
814 F);
815
816 // Restrict local memory usage so that we don't drastically reduce occupancy,
817 // unless it is already significantly reduced.
818
819 // TODO: Have some sort of hint or other heuristics to guess occupancy based
820 // on other factors..
821 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
822 if (OccupancyHint == 0)
823 OccupancyHint = 7;
824
825 // Clamp to max value.
826 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
827
828 // Check the hint but ignore it if it's obviously wrong from the existing LDS
829 // usage.
830 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
831
832
833 // Round up to the next tier of usage.
834 unsigned MaxSizeWithWaveCount
835 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
836
837 // Program is possibly broken by using more local mem than available.
838 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
839 return false;
840
841 LocalMemLimit = MaxSizeWithWaveCount;
842
843 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsagedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
844 << " bytes of LDS\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
845 << " Rounding size to " << MaxSizeWithWaveCountdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
846 << " with a maximum occupancy of " << MaxOccupancy << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
847 << " and " << (LocalMemLimit - CurrentLocalMemUsage)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
848 << " available for promotion\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
;
849
850 return true;
851}
852
853// FIXME: Should try to pick the most likely to be profitable allocas first.
854bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) {
855 // Array allocations are probably not worth handling, since an allocation of
856 // the array type is the canonical form.
857 if (!I.isStaticAlloca() || I.isArrayAllocation())
858 return false;
859
860 const DataLayout &DL = Mod->getDataLayout();
861 IRBuilder<> Builder(&I);
862
863 // First try to replace the alloca with a vector
864 Type *AllocaTy = I.getAllocatedType();
865
866 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
867
868 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
869 return true; // Promoted to vector.
870
871 if (DisablePromoteAllocaToLDS)
872 return false;
873
874 const Function &ContainingFunction = *I.getParent()->getParent();
875 CallingConv::ID CC = ContainingFunction.getCallingConv();
876
877 // Don't promote the alloca to LDS for shader calling conventions as the work
878 // item ID intrinsics are not supported for these calling conventions.
879 // Furthermore not all LDS is available for some of the stages.
880 switch (CC) {
881 case CallingConv::AMDGPU_KERNEL:
882 case CallingConv::SPIR_KERNEL:
883 break;
884 default:
885 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
886 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
887 << " promote alloca to LDS not supported with calling convention.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
;
888 return false;
889 }
890
891 // Not likely to have sufficient local memory for promotion.
892 if (!SufficientLDS)
893 return false;
894
895 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
896 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
897
898 Align Alignment =
899 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
900
901 // FIXME: This computed padding is likely wrong since it depends on inverse
902 // usage order.
903 //
904 // FIXME: It is also possible that if we're allowed to use all of the memory
905 // could could end up using more than the maximum due to alignment padding.
906
907 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
908 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
909 NewSize += AllocSize;
910
911 if (NewSize > LocalMemLimit) {
912 LLVM_DEBUG(dbgs() << " " << AllocSizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
913 << " bytes of local memory not available to promote\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
;
914 return false;
915 }
916
917 CurrentLocalMemUsage = NewSize;
918
919 std::vector<Value*> WorkList;
920
921 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
922 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Do not know how to convert all uses\n"
; } } while (false)
;
923 return false;
924 }
925
926 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Promoting alloca to local memory\n"
; } } while (false)
;
927
928 Function *F = I.getParent()->getParent();
929
930 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
931 GlobalVariable *GV = new GlobalVariable(
932 *Mod, GVTy, false, GlobalValue::InternalLinkage,
933 UndefValue::get(GVTy),
934 Twine(F->getName()) + Twine('.') + I.getName(),
935 nullptr,
936 GlobalVariable::NotThreadLocal,
937 AMDGPUAS::LOCAL_ADDRESS);
938 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
939 GV->setAlignment(MaybeAlign(I.getAlignment()));
940
941 Value *TCntY, *TCntZ;
942
943 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
944 Value *TIdX = getWorkitemID(Builder, 0);
945 Value *TIdY = getWorkitemID(Builder, 1);
946 Value *TIdZ = getWorkitemID(Builder, 2);
947
948 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
949 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
950 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
951 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
952 TID = Builder.CreateAdd(TID, TIdZ);
953
954 Value *Indices[] = {
955 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
956 TID
957 };
958
959 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
960 I.mutateType(Offset->getType());
961 I.replaceAllUsesWith(Offset);
962 I.eraseFromParent();
963
964 SmallVector<IntrinsicInst *> DeferredIntrs;
965
966 for (Value *V : WorkList) {
967 CallInst *Call = dyn_cast<CallInst>(V);
968 if (!Call) {
969 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
970 Value *Src0 = CI->getOperand(0);
971 PointerType *NewTy = PointerType::getWithSamePointeeType(
972 cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS);
973
974 if (isa<ConstantPointerNull>(CI->getOperand(0)))
975 CI->setOperand(0, ConstantPointerNull::get(NewTy));
976
977 if (isa<ConstantPointerNull>(CI->getOperand(1)))
978 CI->setOperand(1, ConstantPointerNull::get(NewTy));
979
980 continue;
981 }
982
983 // The operand's value should be corrected on its own and we don't want to
984 // touch the users.
985 if (isa<AddrSpaceCastInst>(V))
986 continue;
987
988 PointerType *NewTy = PointerType::getWithSamePointeeType(
989 cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS);
990
991 // FIXME: It doesn't really make sense to try to do this for all
992 // instructions.
993 V->mutateType(NewTy);
994
995 // Adjust the types of any constant operands.
996 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
997 if (isa<ConstantPointerNull>(SI->getOperand(1)))
998 SI->setOperand(1, ConstantPointerNull::get(NewTy));
999
1000 if (isa<ConstantPointerNull>(SI->getOperand(2)))
1001 SI->setOperand(2, ConstantPointerNull::get(NewTy));
1002 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1003 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1004 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1005 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1006 }
1007 }
1008
1009 continue;
1010 }
1011
1012 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1013 Builder.SetInsertPoint(Intr);
1014 switch (Intr->getIntrinsicID()) {
1015 case Intrinsic::lifetime_start:
1016 case Intrinsic::lifetime_end:
1017 // These intrinsics are for address space 0 only
1018 Intr->eraseFromParent();
1019 continue;
1020 case Intrinsic::memcpy:
1021 case Intrinsic::memmove:
1022 // These have 2 pointer operands. In case if second pointer also needs
1023 // to be replaced we defer processing of these intrinsics until all
1024 // other values are processed.
1025 DeferredIntrs.push_back(Intr);
1026 continue;
1027 case Intrinsic::memset: {
1028 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1029 Builder.CreateMemSet(
1030 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(),
1031 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile());
1032 Intr->eraseFromParent();
1033 continue;
1034 }
1035 case Intrinsic::invariant_start:
1036 case Intrinsic::invariant_end:
1037 case Intrinsic::launder_invariant_group:
1038 case Intrinsic::strip_invariant_group:
1039 Intr->eraseFromParent();
1040 // FIXME: I think the invariant marker should still theoretically apply,
1041 // but the intrinsics need to be changed to accept pointers with any
1042 // address space.
1043 continue;
1044 case Intrinsic::objectsize: {
1045 Value *Src = Intr->getOperand(0);
1046 Function *ObjectSize = Intrinsic::getDeclaration(
1047 Mod, Intrinsic::objectsize,
1048 {Intr->getType(),
1049 PointerType::getWithSamePointeeType(
1050 cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)});
1051
1052 CallInst *NewCall = Builder.CreateCall(
1053 ObjectSize,
1054 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1055 Intr->replaceAllUsesWith(NewCall);
1056 Intr->eraseFromParent();
1057 continue;
1058 }
1059 default:
1060 Intr->print(errs());
1061 llvm_unreachable("Don't know how to promote alloca intrinsic use.")::llvm::llvm_unreachable_internal("Don't know how to promote alloca intrinsic use."
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 1061)
;
1062 }
1063 }
1064
1065 for (IntrinsicInst *Intr : DeferredIntrs) {
1066 Builder.SetInsertPoint(Intr);
1067 Intrinsic::ID ID = Intr->getIntrinsicID();
1068 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove)(static_cast <bool> (ID == Intrinsic::memcpy || ID == Intrinsic
::memmove) ? void (0) : __assert_fail ("ID == Intrinsic::memcpy || ID == Intrinsic::memmove"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 1068, __extension__ __PRETTY_FUNCTION__))
;
1069
1070 MemTransferInst *MI = cast<MemTransferInst>(Intr);
1071 auto *B =
1072 Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(),
1073 MI->getRawSource(), MI->getSourceAlign(),
1074 MI->getLength(), MI->isVolatile());
1075
1076 for (unsigned I = 0; I != 2; ++I) {
1077 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1078 B->addDereferenceableParamAttr(I, Bytes);
1079 }
1080 }
1081
1082 Intr->eraseFromParent();
1083 }
1084
1085 return true;
1086}
1087
1088bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) {
1089 // Array allocations are probably not worth handling, since an allocation of
1090 // the array type is the canonical form.
1091 if (!I.isStaticAlloca() || I.isArrayAllocation())
13
Assuming the condition is false
14
Assuming the condition is false
1092 return false;
1093
1094 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
15
Taking false branch
16
Assuming 'DebugFlag' is false
17
Loop condition is false. Exiting loop
1095
1096 Module *Mod = I.getParent()->getParent()->getParent();
1097 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
18
Calling 'tryPromoteAllocaToVector'
1098}
1099
1100bool promoteAllocasToVector(Function &F, TargetMachine &TM) {
1101 if (DisablePromoteAllocaToVector)
2
Assuming the condition is false
3
Taking false branch
1102 return false;
1103
1104 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
1105 if (!ST.isPromoteAllocaEnabled())
4
Assuming the condition is false
5
Taking false branch
1106 return false;
1107
1108 unsigned MaxVGPRs;
1109 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
6
Assuming the condition is false
7
Taking false branch
1110 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
1111 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1112 } else {
1113 MaxVGPRs = 128;
1114 }
1115
1116 bool Changed = false;
1117 BasicBlock &EntryBB = *F.begin();
1118
1119 SmallVector<AllocaInst *, 16> Allocas;
1120 for (Instruction &I : EntryBB) {
1121 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1122 Allocas.push_back(AI);
1123 }
1124
1125 for (AllocaInst *AI : Allocas) {
8
Assuming '__begin1' is not equal to '__end1'
1126 if (handlePromoteAllocaToVector(*AI, MaxVGPRs))
9
Taking false branch
10
Taking false branch
11
Taking false branch
12
Calling 'handlePromoteAllocaToVector'
1127 Changed = true;
1128 }
1129
1130 return Changed;
1131}
1132
1133bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) {
1134 if (skipFunction(F))
1135 return false;
1136 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
1137 return promoteAllocasToVector(F, TPC->getTM<TargetMachine>());
1138 }
1139 return false;
1140}
1141
1142PreservedAnalyses
1143AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
1144 bool Changed = promoteAllocasToVector(F, TM);
1
Calling 'promoteAllocasToVector'
1145 if (Changed) {
1146 PreservedAnalyses PA;
1147 PA.preserveSet<CFGAnalyses>();
1148 return PA;
1149 }
1150 return PreservedAnalyses::all();
1151}
1152
1153FunctionPass *llvm::createAMDGPUPromoteAlloca() {
1154 return new AMDGPUPromoteAlloca();
1155}
1156
1157FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
1158 return new AMDGPUPromoteAllocaToVector();
1159}

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/ErrorHandling.h"
19#include "llvm/Support/MemAlloc.h"
20#include "llvm/Support/type_traits.h"
21#include <algorithm>
22#include <cassert>
23#include <cstddef>
24#include <cstdlib>
25#include <cstring>
26#include <functional>
27#include <initializer_list>
28#include <iterator>
29#include <limits>
30#include <memory>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37/// This is all the stuff common to all SmallVectors.
38///
39/// The template parameter specifies the type which should be used to hold the
40/// Size and Capacity of the SmallVector, so it can be adjusted.
41/// Using 32 bit size is desirable to shrink the size of the SmallVector.
42/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
43/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
44/// buffering bitcode output - which can exceed 4GB.
45template <class Size_T> class SmallVectorBase {
46protected:
47 void *BeginX;
48 Size_T Size = 0, Capacity;
49
50 /// The maximum value of the Size_T used.
51 static constexpr size_t SizeTypeMax() {
52 return std::numeric_limits<Size_T>::max();
53 }
54
55 SmallVectorBase() = delete;
56 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
57 : BeginX(FirstEl), Capacity(TotalCapacity) {}
58
59 /// This is a helper for \a grow() that's out of line to reduce code
60 /// duplication. This function will report a fatal error if it can't grow at
61 /// least to \p MinSize.
62 void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity);
63
64 /// This is an implementation of the grow() method which only works
65 /// on POD-like data types and is out of line to reduce code duplication.
66 /// This function will report a fatal error if it cannot increase capacity.
67 void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
68
69public:
70 size_t size() const { return Size; }
71 size_t capacity() const { return Capacity; }
72
73 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
36
Assuming field 'Size' is not equal to 0
37
Returning zero, which participates in a condition later
74
75 /// Set the array size to \p N, which the current array must have enough
76 /// capacity for.
77 ///
78 /// This does not construct or destroy any elements in the vector.
79 ///
80 /// Clients can use this in conjunction with capacity() to write past the end
81 /// of the buffer when they know that more elements are available, and only
82 /// update the size later. This avoids the cost of value initializing elements
83 /// which will only be overwritten.
84 void set_size(size_t N) {
85 assert(N <= capacity())(static_cast <bool> (N <= capacity()) ? void (0) : __assert_fail
("N <= capacity()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 85, __extension__ __PRETTY_FUNCTION__))
;
86 Size = N;
87 }
88};
89
90template <class T>
91using SmallVectorSizeType =
92 typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
93 uint32_t>::type;
94
95/// Figure out the offset of the first element.
96template <class T, typename = void> struct SmallVectorAlignmentAndSize {
97 alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
98 SmallVectorBase<SmallVectorSizeType<T>>)];
99 alignas(T) char FirstEl[sizeof(T)];
100};
101
102/// This is the part of SmallVectorTemplateBase which does not depend on whether
103/// the type T is a POD. The extra dummy template argument is used by ArrayRef
104/// to avoid unnecessarily requiring T to be complete.
105template <typename T, typename = void>
106class SmallVectorTemplateCommon
107 : public SmallVectorBase<SmallVectorSizeType<T>> {
108 using Base = SmallVectorBase<SmallVectorSizeType<T>>;
109
110 /// Find the address of the first element. For this pointer math to be valid
111 /// with small-size of 0 for T with lots of alignment, it's important that
112 /// SmallVectorStorage is properly-aligned even for small-size of 0.
113 void *getFirstEl() const {
114 return const_cast<void *>(reinterpret_cast<const void *>(
115 reinterpret_cast<const char *>(this) +
116 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
117 }
118 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
119
120protected:
121 SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
122
123 void grow_pod(size_t MinSize, size_t TSize) {
124 Base::grow_pod(getFirstEl(), MinSize, TSize);
125 }
126
127 /// Return true if this is a smallvector which has not had dynamic
128 /// memory allocated for it.
129 bool isSmall() const { return this->BeginX == getFirstEl(); }
130
131 /// Put this vector in a state of being small.
132 void resetToSmall() {
133 this->BeginX = getFirstEl();
134 this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
135 }
136
137 /// Return true if V is an internal reference to the given range.
138 bool isReferenceToRange(const void *V, const void *First, const void *Last) const {
139 // Use std::less to avoid UB.
140 std::less<> LessThan;
141 return !LessThan(V, First) && LessThan(V, Last);
142 }
143
144 /// Return true if V is an internal reference to this vector.
145 bool isReferenceToStorage(const void *V) const {
146 return isReferenceToRange(V, this->begin(), this->end());
147 }
148
149 /// Return true if First and Last form a valid (possibly empty) range in this
150 /// vector's storage.
151 bool isRangeInStorage(const void *First, const void *Last) const {
152 // Use std::less to avoid UB.
153 std::less<> LessThan;
154 return !LessThan(First, this->begin()) && !LessThan(Last, First) &&
155 !LessThan(this->end(), Last);
156 }
157
158 /// Return true unless Elt will be invalidated by resizing the vector to
159 /// NewSize.
160 bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
161 // Past the end.
162 if (LLVM_LIKELY(!isReferenceToStorage(Elt))__builtin_expect((bool)(!isReferenceToStorage(Elt)), true))
163 return true;
164
165 // Return false if Elt will be destroyed by shrinking.
166 if (NewSize <= this->size())
167 return Elt < this->begin() + NewSize;
168
169 // Return false if we need to grow.
170 return NewSize <= this->capacity();
171 }
172
173 /// Check whether Elt will be invalidated by resizing the vector to NewSize.
174 void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
175 assert(isSafeToReferenceAfterResize(Elt, NewSize) &&(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 177, __extension__ __PRETTY_FUNCTION__))
176 "Attempting to reference an element of the vector in an operation "(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 177, __extension__ __PRETTY_FUNCTION__))
177 "that invalidates it")(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 177, __extension__ __PRETTY_FUNCTION__))
;
178 }
179
180 /// Check whether Elt will be invalidated by increasing the size of the
181 /// vector by N.
182 void assertSafeToAdd(const void *Elt, size_t N = 1) {
183 this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
184 }
185
186 /// Check whether any part of the range will be invalidated by clearing.
187 void assertSafeToReferenceAfterClear(const T *From, const T *To) {
188 if (From == To)
189 return;
190 this->assertSafeToReferenceAfterResize(From, 0);
191 this->assertSafeToReferenceAfterResize(To - 1, 0);
192 }
193 template <
194 class ItTy,
195 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
196 bool> = false>
197 void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
198
199 /// Check whether any part of the range will be invalidated by growing.
200 void assertSafeToAddRange(const T *From, const T *To) {
201 if (From == To)
202 return;
203 this->assertSafeToAdd(From, To - From);
204 this->assertSafeToAdd(To - 1, To - From);
205 }
206 template <
207 class ItTy,
208 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
209 bool> = false>
210 void assertSafeToAddRange(ItTy, ItTy) {}
211
212 /// Reserve enough space to add one element, and return the updated element
213 /// pointer in case it was a reference to the storage.
214 template <class U>
215 static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt,
216 size_t N) {
217 size_t NewSize = This->size() + N;
218 if (LLVM_LIKELY(NewSize <= This->capacity())__builtin_expect((bool)(NewSize <= This->capacity()), true
)
)
219 return &Elt;
220
221 bool ReferencesStorage = false;
222 int64_t Index = -1;
223 if (!U::TakesParamByValue) {
224 if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))__builtin_expect((bool)(This->isReferenceToStorage(&Elt
)), false)
) {
225 ReferencesStorage = true;
226 Index = &Elt - This->begin();
227 }
228 }
229 This->grow(NewSize);
230 return ReferencesStorage ? This->begin() + Index : &Elt;
231 }
232
233public:
234 using size_type = size_t;
235 using difference_type = ptrdiff_t;
236 using value_type = T;
237 using iterator = T *;
238 using const_iterator = const T *;
239
240 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
241 using reverse_iterator = std::reverse_iterator<iterator>;
242
243 using reference = T &;
244 using const_reference = const T &;
245 using pointer = T *;
246 using const_pointer = const T *;
247
248 using Base::capacity;
249 using Base::empty;
250 using Base::size;
251
252 // forward iterator creation methods.
253 iterator begin() { return (iterator)this->BeginX; }
254 const_iterator begin() const { return (const_iterator)this->BeginX; }
255 iterator end() { return begin() + size(); }
256 const_iterator end() const { return begin() + size(); }
257
258 // reverse iterator creation methods.
259 reverse_iterator rbegin() { return reverse_iterator(end()); }
260 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
261 reverse_iterator rend() { return reverse_iterator(begin()); }
262 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
263
264 size_type size_in_bytes() const { return size() * sizeof(T); }
265 size_type max_size() const {
266 return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
267 }
268
269 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
270
271 /// Return a pointer to the vector's buffer, even if empty().
272 pointer data() { return pointer(begin()); }
273 /// Return a pointer to the vector's buffer, even if empty().
274 const_pointer data() const { return const_pointer(begin()); }
275
276 reference operator[](size_type idx) {
277 assert(idx < size())(static_cast <bool> (idx < size()) ? void (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 277, __extension__ __PRETTY_FUNCTION__))
;
278 return begin()[idx];
279 }
280 const_reference operator[](size_type idx) const {
281 assert(idx < size())(static_cast <bool> (idx < size()) ? void (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 281, __extension__ __PRETTY_FUNCTION__))
;
282 return begin()[idx];
283 }
284
285 reference front() {
286 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 286, __extension__ __PRETTY_FUNCTION__))
;
287 return begin()[0];
288 }
289 const_reference front() const {
290 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 290, __extension__ __PRETTY_FUNCTION__))
;
291 return begin()[0];
292 }
293
294 reference back() {
295 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 295, __extension__ __PRETTY_FUNCTION__))
;
296 return end()[-1];
297 }
298 const_reference back() const {
299 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 299, __extension__ __PRETTY_FUNCTION__))
;
300 return end()[-1];
301 }
302};
303
304/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
305/// method implementations that are designed to work with non-trivial T's.
306///
307/// We approximate is_trivially_copyable with trivial move/copy construction and
308/// trivial destruction. While the standard doesn't specify that you're allowed
309/// copy these types with memcpy, there is no way for the type to observe this.
310/// This catches the important case of std::pair<POD, POD>, which is not
311/// trivially assignable.
312template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
313 (is_trivially_move_constructible<T>::value) &&
314 std::is_trivially_destructible<T>::value>
315class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
316 friend class SmallVectorTemplateCommon<T>;
317
318protected:
319 static constexpr bool TakesParamByValue = false;
320 using ValueParamT = const T &;
321
322 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
323
324 static void destroy_range(T *S, T *E) {
325 while (S != E) {
326 --E;
327 E->~T();
328 }
329 }
330
331 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
332 /// constructing elements as needed.
333 template<typename It1, typename It2>
334 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
335 std::uninitialized_copy(std::make_move_iterator(I),
336 std::make_move_iterator(E), Dest);
337 }
338
339 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
340 /// constructing elements as needed.
341 template<typename It1, typename It2>
342 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
343 std::uninitialized_copy(I, E, Dest);
344 }
345
346 /// Grow the allocated memory (without initializing new elements), doubling
347 /// the size of the allocated memory. Guarantees space for at least one more
348 /// element, or MinSize more elements if specified.
349 void grow(size_t MinSize = 0);
350
351 /// Create a new allocation big enough for \p MinSize and pass back its size
352 /// in \p NewCapacity. This is the first section of \a grow().
353 T *mallocForGrow(size_t MinSize, size_t &NewCapacity) {
354 return static_cast<T *>(
355 SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow(
356 MinSize, sizeof(T), NewCapacity));
357 }
358
359 /// Move existing elements over to the new allocation \p NewElts, the middle
360 /// section of \a grow().
361 void moveElementsForGrow(T *NewElts);
362
363 /// Transfer ownership of the allocation, finishing up \a grow().
364 void takeAllocationForGrow(T *NewElts, size_t NewCapacity);
365
366 /// Reserve enough space to add one element, and return the updated element
367 /// pointer in case it was a reference to the storage.
368 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
369 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
370 }
371
372 /// Reserve enough space to add one element, and return the updated element
373 /// pointer in case it was a reference to the storage.
374 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
375 return const_cast<T *>(
376 this->reserveForParamAndGetAddressImpl(this, Elt, N));
377 }
378
379 static T &&forward_value_param(T &&V) { return std::move(V); }
380 static const T &forward_value_param(const T &V) { return V; }
381
382 void growAndAssign(size_t NumElts, const T &Elt) {
383 // Grow manually in case Elt is an internal reference.
384 size_t NewCapacity;
385 T *NewElts = mallocForGrow(NumElts, NewCapacity);
386 std::uninitialized_fill_n(NewElts, NumElts, Elt);
387 this->destroy_range(this->begin(), this->end());
388 takeAllocationForGrow(NewElts, NewCapacity);
389 this->set_size(NumElts);
390 }
391
392 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
393 // Grow manually in case one of Args is an internal reference.
394 size_t NewCapacity;
395 T *NewElts = mallocForGrow(0, NewCapacity);
396 ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...);
397 moveElementsForGrow(NewElts);
398 takeAllocationForGrow(NewElts, NewCapacity);
399 this->set_size(this->size() + 1);
400 return this->back();
401 }
402
403public:
404 void push_back(const T &Elt) {
405 const T *EltPtr = reserveForParamAndGetAddress(Elt);
406 ::new ((void *)this->end()) T(*EltPtr);
407 this->set_size(this->size() + 1);
408 }
409
410 void push_back(T &&Elt) {
411 T *EltPtr = reserveForParamAndGetAddress(Elt);
412 ::new ((void *)this->end()) T(::std::move(*EltPtr));
413 this->set_size(this->size() + 1);
414 }
415
416 void pop_back() {
417 this->set_size(this->size() - 1);
418 this->end()->~T();
419 }
420};
421
422// Define this out-of-line to dissuade the C++ compiler from inlining it.
423template <typename T, bool TriviallyCopyable>
424void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
425 size_t NewCapacity;
426 T *NewElts = mallocForGrow(MinSize, NewCapacity);
427 moveElementsForGrow(NewElts);
428 takeAllocationForGrow(NewElts, NewCapacity);
429}
430
431// Define this out-of-line to dissuade the C++ compiler from inlining it.
432template <typename T, bool TriviallyCopyable>
433void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow(
434 T *NewElts) {
435 // Move the elements over.
436 this->uninitialized_move(this->begin(), this->end(), NewElts);
437
438 // Destroy the original elements.
439 destroy_range(this->begin(), this->end());
440}
441
442// Define this out-of-line to dissuade the C++ compiler from inlining it.
443template <typename T, bool TriviallyCopyable>
444void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow(
445 T *NewElts, size_t NewCapacity) {
446 // If this wasn't grown from the inline copy, deallocate the old space.
447 if (!this->isSmall())
448 free(this->begin());
449
450 this->BeginX = NewElts;
451 this->Capacity = NewCapacity;
452}
453
454/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
455/// method implementations that are designed to work with trivially copyable
456/// T's. This allows using memcpy in place of copy/move construction and
457/// skipping destruction.
458template <typename T>
459class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
460 friend class SmallVectorTemplateCommon<T>;
461
462protected:
463 /// True if it's cheap enough to take parameters by value. Doing so avoids
464 /// overhead related to mitigations for reference invalidation.
465 static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *);
466
467 /// Either const T& or T, depending on whether it's cheap enough to take
468 /// parameters by value.
469 using ValueParamT =
470 typename std::conditional<TakesParamByValue, T, const T &>::type;
471
472 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
473
474 // No need to do a destroy loop for POD's.
475 static void destroy_range(T *, T *) {}
476
477 /// Move the range [I, E) onto the uninitialized memory
478 /// starting with "Dest", constructing elements into it as needed.
479 template<typename It1, typename It2>
480 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
481 // Just do a copy.
482 uninitialized_copy(I, E, Dest);
483 }
484
485 /// Copy the range [I, E) onto the uninitialized memory
486 /// starting with "Dest", constructing elements into it as needed.
487 template<typename It1, typename It2>
488 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
489 // Arbitrary iterator types; just use the basic implementation.
490 std::uninitialized_copy(I, E, Dest);
491 }
492
493 /// Copy the range [I, E) onto the uninitialized memory
494 /// starting with "Dest", constructing elements into it as needed.
495 template <typename T1, typename T2>
496 static void uninitialized_copy(
497 T1 *I, T1 *E, T2 *Dest,
498 std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
499 T2>::value> * = nullptr) {
500 // Use memcpy for PODs iterated by pointers (which includes SmallVector
501 // iterators): std::uninitialized_copy optimizes to memmove, but we can
502 // use memcpy here. Note that I and E are iterators and thus might be
503 // invalid for memcpy if they are equal.
504 if (I != E)
505 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
506 }
507
508 /// Double the size of the allocated memory, guaranteeing space for at
509 /// least one more element or MinSize if specified.
510 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
511
512 /// Reserve enough space to add one element, and return the updated element
513 /// pointer in case it was a reference to the storage.
514 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
515 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
516 }
517
518 /// Reserve enough space to add one element, and return the updated element
519 /// pointer in case it was a reference to the storage.
520 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
521 return const_cast<T *>(
522 this->reserveForParamAndGetAddressImpl(this, Elt, N));
523 }
524
525 /// Copy \p V or return a reference, depending on \a ValueParamT.
526 static ValueParamT forward_value_param(ValueParamT V) { return V; }
527
528 void growAndAssign(size_t NumElts, T Elt) {
529 // Elt has been copied in case it's an internal reference, side-stepping
530 // reference invalidation problems without losing the realloc optimization.
531 this->set_size(0);
532 this->grow(NumElts);
533 std::uninitialized_fill_n(this->begin(), NumElts, Elt);
534 this->set_size(NumElts);
535 }
536
537 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
538 // Use push_back with a copy in case Args has an internal reference,
539 // side-stepping reference invalidation problems without losing the realloc
540 // optimization.
541 push_back(T(std::forward<ArgTypes>(Args)...));
542 return this->back();
543 }
544
545public:
546 void push_back(ValueParamT Elt) {
547 const T *EltPtr = reserveForParamAndGetAddress(Elt);
548 memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
549 this->set_size(this->size() + 1);
550 }
551
552 void pop_back() { this->set_size(this->size() - 1); }
553};
554
555/// This class consists of common code factored out of the SmallVector class to
556/// reduce code duplication based on the SmallVector 'N' template parameter.
557template <typename T>
558class SmallVectorImpl : public SmallVectorTemplateBase<T> {
559 using SuperClass = SmallVectorTemplateBase<T>;
560
561public:
562 using iterator = typename SuperClass::iterator;
563 using const_iterator = typename SuperClass::const_iterator;
564 using reference = typename SuperClass::reference;
565 using size_type = typename SuperClass::size_type;
566
567protected:
568 using SmallVectorTemplateBase<T>::TakesParamByValue;
569 using ValueParamT = typename SuperClass::ValueParamT;
570
571 // Default ctor - Initialize to empty.
572 explicit SmallVectorImpl(unsigned N)
573 : SmallVectorTemplateBase<T>(N) {}
574
575public:
576 SmallVectorImpl(const SmallVectorImpl &) = delete;
577
578 ~SmallVectorImpl() {
579 // Subclass has already destructed this vector's elements.
580 // If this wasn't grown from the inline copy, deallocate the old space.
581 if (!this->isSmall())
582 free(this->begin());
583 }
584
585 void clear() {
586 this->destroy_range(this->begin(), this->end());
587 this->Size = 0;
588 }
589
590private:
591 template <bool ForOverwrite> void resizeImpl(size_type N) {
592 if (N < this->size()) {
593 this->pop_back_n(this->size() - N);
594 } else if (N > this->size()) {
595 this->reserve(N);
596 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
597 if (ForOverwrite)
598 new (&*I) T;
599 else
600 new (&*I) T();
601 this->set_size(N);
602 }
603 }
604
605public:
606 void resize(size_type N) { resizeImpl<false>(N); }
607
608 /// Like resize, but \ref T is POD, the new values won't be initialized.
609 void resize_for_overwrite(size_type N) { resizeImpl<true>(N); }
610
611 void resize(size_type N, ValueParamT NV) {
612 if (N == this->size())
613 return;
614
615 if (N < this->size()) {
616 this->pop_back_n(this->size() - N);
617 return;
618 }
619
620 // N > this->size(). Defer to append.
621 this->append(N - this->size(), NV);
622 }
623
624 void reserve(size_type N) {
625 if (this->capacity() < N)
626 this->grow(N);
627 }
628
629 void pop_back_n(size_type NumItems) {
630 assert(this->size() >= NumItems)(static_cast <bool> (this->size() >= NumItems) ? void
(0) : __assert_fail ("this->size() >= NumItems", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 630, __extension__ __PRETTY_FUNCTION__))
;
631 this->destroy_range(this->end() - NumItems, this->end());
632 this->set_size(this->size() - NumItems);
633 }
634
635 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
636 T Result = ::std::move(this->back());
637 this->pop_back();
638 return Result;
639 }
640
641 void swap(SmallVectorImpl &RHS);
642
643 /// Add the specified range to the end of the SmallVector.
644 template <typename in_iter,
645 typename = std::enable_if_t<std::is_convertible<
646 typename std::iterator_traits<in_iter>::iterator_category,
647 std::input_iterator_tag>::value>>
648 void append(in_iter in_start, in_iter in_end) {
649 this->assertSafeToAddRange(in_start, in_end);
650 size_type NumInputs = std::distance(in_start, in_end);
651 this->reserve(this->size() + NumInputs);
652 this->uninitialized_copy(in_start, in_end, this->end());
653 this->set_size(this->size() + NumInputs);
654 }
655
656 /// Append \p NumInputs copies of \p Elt to the end.
657 void append(size_type NumInputs, ValueParamT Elt) {
658 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
659 std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr);
660 this->set_size(this->size() + NumInputs);
661 }
662
663 void append(std::initializer_list<T> IL) {
664 append(IL.begin(), IL.end());
665 }
666
667 void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); }
668
669 void assign(size_type NumElts, ValueParamT Elt) {
670 // Note that Elt could be an internal reference.
671 if (NumElts > this->capacity()) {
672 this->growAndAssign(NumElts, Elt);
673 return;
674 }
675
676 // Assign over existing elements.
677 std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt);
678 if (NumElts > this->size())
679 std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt);
680 else if (NumElts < this->size())
681 this->destroy_range(this->begin() + NumElts, this->end());
682 this->set_size(NumElts);
683 }
684
685 // FIXME: Consider assigning over existing elements, rather than clearing &
686 // re-initializing them - for all assign(...) variants.
687
688 template <typename in_iter,
689 typename = std::enable_if_t<std::is_convertible<
690 typename std::iterator_traits<in_iter>::iterator_category,
691 std::input_iterator_tag>::value>>
692 void assign(in_iter in_start, in_iter in_end) {
693 this->assertSafeToReferenceAfterClear(in_start, in_end);
694 clear();
695 append(in_start, in_end);
696 }
697
698 void assign(std::initializer_list<T> IL) {
699 clear();
700 append(IL);
701 }
702
703 void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); }
704
705 iterator erase(const_iterator CI) {
706 // Just cast away constness because this is a non-const member function.
707 iterator I = const_cast<iterator>(CI);
708
709 assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(CI) &&
"Iterator to erase is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(CI) && \"Iterator to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 709, __extension__ __PRETTY_FUNCTION__))
;
710
711 iterator N = I;
712 // Shift all elts down one.
713 std::move(I+1, this->end(), I);
714 // Drop the last elt.
715 this->pop_back();
716 return(N);
717 }
718
719 iterator erase(const_iterator CS, const_iterator CE) {
720 // Just cast away constness because this is a non-const member function.
721 iterator S = const_cast<iterator>(CS);
722 iterator E = const_cast<iterator>(CE);
723
724 assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.")(static_cast <bool> (this->isRangeInStorage(S, E) &&
"Range to erase is out of bounds.") ? void (0) : __assert_fail
("this->isRangeInStorage(S, E) && \"Range to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 724, __extension__ __PRETTY_FUNCTION__))
;
725
726 iterator N = S;
727 // Shift all elts down.
728 iterator I = std::move(E, this->end(), S);
729 // Drop the last elts.
730 this->destroy_range(I, this->end());
731 this->set_size(I - this->begin());
732 return(N);
733 }
734
735private:
736 template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) {
737 // Callers ensure that ArgType is derived from T.
738 static_assert(
739 std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>,
740 T>::value,
741 "ArgType must be derived from T!");
742
743 if (I == this->end()) { // Important special case for empty vector.
744 this->push_back(::std::forward<ArgType>(Elt));
745 return this->end()-1;
746 }
747
748 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 748, __extension__ __PRETTY_FUNCTION__))
;
749
750 // Grow if necessary.
751 size_t Index = I - this->begin();
752 std::remove_reference_t<ArgType> *EltPtr =
753 this->reserveForParamAndGetAddress(Elt);
754 I = this->begin() + Index;
755
756 ::new ((void*) this->end()) T(::std::move(this->back()));
757 // Push everything else over.
758 std::move_backward(I, this->end()-1, this->end());
759 this->set_size(this->size() + 1);
760
761 // If we just moved the element we're inserting, be sure to update
762 // the reference (never happens if TakesParamByValue).
763 static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value,
764 "ArgType must be 'T' when taking by value!");
765 if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
766 ++EltPtr;
767
768 *I = ::std::forward<ArgType>(*EltPtr);
769 return I;
770 }
771
772public:
773 iterator insert(iterator I, T &&Elt) {
774 return insert_one_impl(I, this->forward_value_param(std::move(Elt)));
775 }
776
777 iterator insert(iterator I, const T &Elt) {
778 return insert_one_impl(I, this->forward_value_param(Elt));
779 }
780
781 iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) {
782 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
783 size_t InsertElt = I - this->begin();
784
785 if (I == this->end()) { // Important special case for empty vector.
786 append(NumToInsert, Elt);
787 return this->begin()+InsertElt;
788 }
789
790 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 790, __extension__ __PRETTY_FUNCTION__))
;
791
792 // Ensure there is enough space, and get the (maybe updated) address of
793 // Elt.
794 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);
795
796 // Uninvalidate the iterator.
797 I = this->begin()+InsertElt;
798
799 // If there are more elements between the insertion point and the end of the
800 // range than there are being inserted, we can use a simple approach to
801 // insertion. Since we already reserved space, we know that this won't
802 // reallocate the vector.
803 if (size_t(this->end()-I) >= NumToInsert) {
804 T *OldEnd = this->end();
805 append(std::move_iterator<iterator>(this->end() - NumToInsert),
806 std::move_iterator<iterator>(this->end()));
807
808 // Copy the existing elements that get replaced.
809 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
810
811 // If we just moved the element we're inserting, be sure to update
812 // the reference (never happens if TakesParamByValue).
813 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
814 EltPtr += NumToInsert;
815
816 std::fill_n(I, NumToInsert, *EltPtr);
817 return I;
818 }
819
820 // Otherwise, we're inserting more elements than exist already, and we're
821 // not inserting at the end.
822
823 // Move over the elements that we're about to overwrite.
824 T *OldEnd = this->end();
825 this->set_size(this->size() + NumToInsert);
826 size_t NumOverwritten = OldEnd-I;
827 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
828
829 // If we just moved the element we're inserting, be sure to update
830 // the reference (never happens if TakesParamByValue).
831 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
832 EltPtr += NumToInsert;
833
834 // Replace the overwritten part.
835 std::fill_n(I, NumOverwritten, *EltPtr);
836
837 // Insert the non-overwritten middle part.
838 std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
839 return I;
840 }
841
842 template <typename ItTy,
843 typename = std::enable_if_t<std::is_convertible<
844 typename std::iterator_traits<ItTy>::iterator_category,
845 std::input_iterator_tag>::value>>
846 iterator insert(iterator I, ItTy From, ItTy To) {
847 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
848 size_t InsertElt = I - this->begin();
849
850 if (I == this->end()) { // Important special case for empty vector.
851 append(From, To);
852 return this->begin()+InsertElt;
853 }
854
855 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/ADT/SmallVector.h"
, 855, __extension__ __PRETTY_FUNCTION__))
;
856
857 // Check that the reserve that follows doesn't invalidate the iterators.
858 this->assertSafeToAddRange(From, To);
859
860 size_t NumToInsert = std::distance(From, To);
861
862 // Ensure there is enough space.
863 reserve(this->size() + NumToInsert);
864
865 // Uninvalidate the iterator.
866 I = this->begin()+InsertElt;
867
868 // If there are more elements between the insertion point and the end of the
869 // range than there are being inserted, we can use a simple approach to
870 // insertion. Since we already reserved space, we know that this won't
871 // reallocate the vector.
872 if (size_t(this->end()-I) >= NumToInsert) {
873 T *OldEnd = this->end();
874 append(std::move_iterator<iterator>(this->end() - NumToInsert),
875 std::move_iterator<iterator>(this->end()));
876
877 // Copy the existing elements that get replaced.
878 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
879
880 std::copy(From, To, I);
881 return I;
882 }
883
884 // Otherwise, we're inserting more elements than exist already, and we're
885 // not inserting at the end.
886
887 // Move over the elements that we're about to overwrite.
888 T *OldEnd = this->end();
889 this->set_size(this->size() + NumToInsert);
890 size_t NumOverwritten = OldEnd-I;
891 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
892
893 // Replace the overwritten part.
894 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
895 *J = *From;
896 ++J; ++From;
897 }
898
899 // Insert the non-overwritten middle part.
900 this->uninitialized_copy(From, To, OldEnd);
901 return I;
902 }
903
904 void insert(iterator I, std::initializer_list<T> IL) {
905 insert(I, IL.begin(), IL.end());
906 }
907
908 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
909 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
910 return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...);
911
912 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
913 this->set_size(this->size() + 1);
914 return this->back();
915 }
916
917 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
918
919 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
920
921 bool operator==(const SmallVectorImpl &RHS) const {
922 if (this->size() != RHS.size()) return false;
923 return std::equal(this->begin(), this->end(), RHS.begin());
924 }
925 bool operator!=(const SmallVectorImpl &RHS) const {
926 return !(*this == RHS);
927 }
928
929 bool operator<(const SmallVectorImpl &RHS) const {
930 return std::lexicographical_compare(this->begin(), this->end(),
931 RHS.begin(), RHS.end());
932 }
933};
934
935template <typename T>
936void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
937 if (this == &RHS) return;
938
939 // We can only avoid copying elements if neither vector is small.
940 if (!this->isSmall() && !RHS.isSmall()) {
941 std::swap(this->BeginX, RHS.BeginX);
942 std::swap(this->Size, RHS.Size);
943 std::swap(this->Capacity, RHS.Capacity);
944 return;
945 }
946 this->reserve(RHS.size());
947 RHS.reserve(this->size());
948
949 // Swap the shared elements.
950 size_t NumShared = this->size();
951 if (NumShared > RHS.size()) NumShared = RHS.size();
952 for (size_type i = 0; i != NumShared; ++i)
953 std::swap((*this)[i], RHS[i]);
954
955 // Copy over the extra elts.
956 if (this->size() > RHS.size()) {
957 size_t EltDiff = this->size() - RHS.size();
958 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
959 RHS.set_size(RHS.size() + EltDiff);
960 this->destroy_range(this->begin()+NumShared, this->end());
961 this->set_size(NumShared);
962 } else if (RHS.size() > this->size()) {
963 size_t EltDiff = RHS.size() - this->size();
964 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
965 this->set_size(this->size() + EltDiff);
966 this->destroy_range(RHS.begin()+NumShared, RHS.end());
967 RHS.set_size(NumShared);
968 }
969}
970
971template <typename T>
972SmallVectorImpl<T> &SmallVectorImpl<T>::
973 operator=(const SmallVectorImpl<T> &RHS) {
974 // Avoid self-assignment.
975 if (this == &RHS) return *this;
976
977 // If we already have sufficient space, assign the common elements, then
978 // destroy any excess.
979 size_t RHSSize = RHS.size();
980 size_t CurSize = this->size();
981 if (CurSize >= RHSSize) {
982 // Assign common elements.
983 iterator NewEnd;
984 if (RHSSize)
985 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
986 else
987 NewEnd = this->begin();
988
989 // Destroy excess elements.
990 this->destroy_range(NewEnd, this->end());
991
992 // Trim.
993 this->set_size(RHSSize);
994 return *this;
995 }
996
997 // If we have to grow to have enough elements, destroy the current elements.
998 // This allows us to avoid copying them during the grow.
999 // FIXME: don't do this if they're efficiently moveable.
1000 if (this->capacity() < RHSSize) {
1001 // Destroy current elements.
1002 this->clear();
1003 CurSize = 0;
1004 this->grow(RHSSize);
1005 } else if (CurSize) {
1006 // Otherwise, use assignment for the already-constructed elements.
1007 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
1008 }
1009
1010 // Copy construct the new elements in place.
1011 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
1012 this->begin()+CurSize);
1013
1014 // Set end.
1015 this->set_size(RHSSize);
1016 return *this;
1017}
1018
1019template <typename T>
1020SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
1021 // Avoid self-assignment.
1022 if (this == &RHS) return *this;
1023
1024 // If the RHS isn't small, clear this vector and then steal its buffer.
1025 if (!RHS.isSmall()) {
1026 this->destroy_range(this->begin(), this->end());
1027 if (!this->isSmall()) free(this->begin());
1028 this->BeginX = RHS.BeginX;
1029 this->Size = RHS.Size;
1030 this->Capacity = RHS.Capacity;
1031 RHS.resetToSmall();
1032 return *this;
1033 }
1034
1035 // If we already have sufficient space, assign the common elements, then
1036 // destroy any excess.
1037 size_t RHSSize = RHS.size();
1038 size_t CurSize = this->size();
1039 if (CurSize >= RHSSize) {
1040 // Assign common elements.
1041 iterator NewEnd = this->begin();
1042 if (RHSSize)
1043 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
1044
1045 // Destroy excess elements and trim the bounds.
1046 this->destroy_range(NewEnd, this->end());
1047 this->set_size(RHSSize);
1048
1049 // Clear the RHS.
1050 RHS.clear();
1051
1052 return *this;
1053 }
1054
1055 // If we have to grow to have enough elements, destroy the current elements.
1056 // This allows us to avoid copying them during the grow.
1057 // FIXME: this may not actually make any sense if we can efficiently move
1058 // elements.
1059 if (this->capacity() < RHSSize) {
1060 // Destroy current elements.
1061 this->clear();
1062 CurSize = 0;
1063 this->grow(RHSSize);
1064 } else if (CurSize) {
1065 // Otherwise, use assignment for the already-constructed elements.
1066 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
1067 }
1068
1069 // Move-construct the new elements in place.
1070 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
1071 this->begin()+CurSize);
1072
1073 // Set end.
1074 this->set_size(RHSSize);
1075
1076 RHS.clear();
1077 return *this;
1078}
1079
1080/// Storage for the SmallVector elements. This is specialized for the N=0 case
1081/// to avoid allocating unnecessary storage.
1082template <typename T, unsigned N>
1083struct SmallVectorStorage {
1084 alignas(T) char InlineElts[N * sizeof(T)];
1085};
1086
1087/// We need the storage to be properly aligned even for small-size of 0 so that
1088/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
1089/// well-defined.
1090template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};
1091
1092/// Forward declaration of SmallVector so that
1093/// calculateSmallVectorDefaultInlinedElements can reference
1094/// `sizeof(SmallVector<T, 0>)`.
1095template <typename T, unsigned N> class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector;
1096
1097/// Helper class for calculating the default number of inline elements for
1098/// `SmallVector<T>`.
1099///
1100/// This should be migrated to a constexpr function when our minimum
1101/// compiler support is enough for multi-statement constexpr functions.
1102template <typename T> struct CalculateSmallVectorDefaultInlinedElements {
1103 // Parameter controlling the default number of inlined elements
1104 // for `SmallVector<T>`.
1105 //
1106 // The default number of inlined elements ensures that
1107 // 1. There is at least one inlined element.
1108 // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless
1109 // it contradicts 1.
1110 static constexpr size_t kPreferredSmallVectorSizeof = 64;
1111
1112 // static_assert that sizeof(T) is not "too big".
1113 //
1114 // Because our policy guarantees at least one inlined element, it is possible
1115 // for an arbitrarily large inlined element to allocate an arbitrarily large
1116 // amount of inline storage. We generally consider it an antipattern for a
1117 // SmallVector to allocate an excessive amount of inline storage, so we want
1118 // to call attention to these cases and make sure that users are making an
1119 // intentional decision if they request a lot of inline storage.
1120 //
1121 // We want this assertion to trigger in pathological cases, but otherwise
1122 // not be too easy to hit. To accomplish that, the cutoff is actually somewhat
1123 // larger than kPreferredSmallVectorSizeof (otherwise,
1124 // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that
1125 // pattern seems useful in practice).
1126 //
1127 // One wrinkle is that this assertion is in theory non-portable, since
1128 // sizeof(T) is in general platform-dependent. However, we don't expect this
1129 // to be much of an issue, because most LLVM development happens on 64-bit
1130 // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for
1131 // 32-bit hosts, dodging the issue. The reverse situation, where development
1132 // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a
1133 // 64-bit host, is expected to be very rare.
1134 static_assert(
1135 sizeof(T) <= 256,
1136 "You are trying to use a default number of inlined elements for "
1137 "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
1138 "explicit number of inlined elements with `SmallVector<T, N>` to make "
1139 "sure you really want that much inline storage.");
1140
1141 // Discount the size of the header itself when calculating the maximum inline
1142 // bytes.
1143 static constexpr size_t PreferredInlineBytes =
1144 kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>);
1145 static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T);
1146 static constexpr size_t value =
1147 NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
1148};
1149
1150/// This is a 'vector' (really, a variable-sized array), optimized
1151/// for the case when the array is small. It contains some number of elements
1152/// in-place, which allows it to avoid heap allocation when the actual number of
1153/// elements is below that threshold. This allows normal "small" cases to be
1154/// fast without losing generality for large inputs.
1155///
1156/// \note
1157/// In the absence of a well-motivated choice for the number of inlined
1158/// elements \p N, it is recommended to use \c SmallVector<T> (that is,
1159/// omitting the \p N). This will choose a default number of inlined elements
1160/// reasonable for allocation on the stack (for example, trying to keep \c
1161/// sizeof(SmallVector<T>) around 64 bytes).
1162///
1163/// \warning This does not attempt to be exception safe.
1164///
1165/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
1166template <typename T,
1167 unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value>
1168class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>,
1169 SmallVectorStorage<T, N> {
1170public:
1171 SmallVector() : SmallVectorImpl<T>(N) {}
1172
1173 ~SmallVector() {
1174 // Destroy the constructed elements in the vector.
1175 this->destroy_range(this->begin(), this->end());
1176 }
1177
1178 explicit SmallVector(size_t Size, const T &Value = T())
1179 : SmallVectorImpl<T>(N) {
1180 this->assign(Size, Value);
1181 }
1182
1183 template <typename ItTy,
1184 typename = std::enable_if_t<std::is_convertible<
1185 typename std::iterator_traits<ItTy>::iterator_category,
1186 std::input_iterator_tag>::value>>
1187 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
1188 this->append(S, E);
1189 }
1190
1191 template <typename RangeTy>
1192 explicit SmallVector(const iterator_range<RangeTy> &R)
1193 : SmallVectorImpl<T>(N) {
1194 this->append(R.begin(), R.end());
1195 }
1196
1197 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
1198 this->assign(IL);
1199 }
1200
1201 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
1202 if (!RHS.empty())
1203 SmallVectorImpl<T>::operator=(RHS);
1204 }
1205
1206 SmallVector &operator=(const SmallVector &RHS) {
1207 SmallVectorImpl<T>::operator=(RHS);
1208 return *this;
1209 }
1210
1211 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
1212 if (!RHS.empty())
1213 SmallVectorImpl<T>::operator=(::std::move(RHS));
1214 }
1215
1216 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
1217 if (!RHS.empty())
1218 SmallVectorImpl<T>::operator=(::std::move(RHS));
1219 }
1220
1221 SmallVector &operator=(SmallVector &&RHS) {
1222 SmallVectorImpl<T>::operator=(::std::move(RHS));
1223 return *this;
1224 }
1225
1226 SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
1227 SmallVectorImpl<T>::operator=(::std::move(RHS));
1228 return *this;
1229 }
1230
1231 SmallVector &operator=(std::initializer_list<T> IL) {
1232 this->assign(IL);
1233 return *this;
1234 }
1235};
1236
1237template <typename T, unsigned N>
1238inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
1239 return X.capacity_in_bytes();
1240}
1241
1242/// Given a range of type R, iterate the entire range and return a
1243/// SmallVector with elements of the vector. This is useful, for example,
1244/// when you want to iterate a range and then sort the results.
1245template <unsigned Size, typename R>
1246SmallVector<typename std::remove_const<typename std::remove_reference<
1247 decltype(*std::begin(std::declval<R &>()))>::type>::type,
1248 Size>
1249to_vector(R &&Range) {
1250 return {std::begin(Range), std::end(Range)};
1251}
1252
1253} // end namespace llvm
1254
1255namespace std {
1256
1257 /// Implement std::swap in terms of SmallVector swap.
1258 template<typename T>
1259 inline void
1260 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
1261 LHS.swap(RHS);
1262 }
1263
1264 /// Implement std::swap in terms of SmallVector swap.
1265 template<typename T, unsigned N>
1266 inline void
1267 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
1268 LHS.swap(RHS);
1269 }
1270
1271} // end namespace std
1272
1273#endif // LLVM_ADT_SMALLVECTOR_H