Bug Summary

File:llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
Warning:line 374, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUPromoteAlloca.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-16-232930-107970-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

1//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass eliminates allocas by either converting them into vectors or
10// by migrating them to local address space.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "GCNSubtarget.h"
16#include "llvm/Analysis/CaptureTracking.h"
17#include "llvm/Analysis/ValueTracking.h"
18#include "llvm/CodeGen/TargetPassConfig.h"
19#include "llvm/IR/IRBuilder.h"
20#include "llvm/IR/IntrinsicsAMDGPU.h"
21#include "llvm/IR/IntrinsicsR600.h"
22#include "llvm/Pass.h"
23#include "llvm/Target/TargetMachine.h"
24#include "Utils/AMDGPUBaseInfo.h"
25
26#define DEBUG_TYPE"amdgpu-promote-alloca" "amdgpu-promote-alloca"
27
28using namespace llvm;
29
30namespace {
31
32static cl::opt<bool> DisablePromoteAllocaToVector(
33 "disable-promote-alloca-to-vector",
34 cl::desc("Disable promote alloca to vector"),
35 cl::init(false));
36
37static cl::opt<bool> DisablePromoteAllocaToLDS(
38 "disable-promote-alloca-to-lds",
39 cl::desc("Disable promote alloca to LDS"),
40 cl::init(false));
41
42static cl::opt<unsigned> PromoteAllocaToVectorLimit(
43 "amdgpu-promote-alloca-to-vector-limit",
44 cl::desc("Maximum byte size to consider promote alloca to vector"),
45 cl::init(0));
46
47// FIXME: This can create globals so should be a module pass.
48class AMDGPUPromoteAlloca : public FunctionPass {
49public:
50 static char ID;
51
52 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
53
54 bool runOnFunction(Function &F) override;
55
56 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
57
58 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
59
60 void getAnalysisUsage(AnalysisUsage &AU) const override {
61 AU.setPreservesCFG();
62 FunctionPass::getAnalysisUsage(AU);
63 }
64};
65
66class AMDGPUPromoteAllocaImpl {
67private:
68 const TargetMachine &TM;
69 Module *Mod = nullptr;
70 const DataLayout *DL = nullptr;
71
72 // FIXME: This should be per-kernel.
73 uint32_t LocalMemLimit = 0;
74 uint32_t CurrentLocalMemUsage = 0;
75 unsigned MaxVGPRs;
76
77 bool IsAMDGCN = false;
78 bool IsAMDHSA = false;
79
80 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
81 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
82
83 /// BaseAlloca is the alloca root the search started from.
84 /// Val may be that alloca or a recursive user of it.
85 bool collectUsesWithPtrTypes(Value *BaseAlloca,
86 Value *Val,
87 std::vector<Value*> &WorkList) const;
88
89 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
90 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
91 /// Returns true if both operands are derived from the same alloca. Val should
92 /// be the same value as one of the input operands of UseInst.
93 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
94 Instruction *UseInst,
95 int OpIdx0, int OpIdx1) const;
96
97 /// Check whether we have enough local memory for promotion.
98 bool hasSufficientLocalMem(const Function &F);
99
100 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
101
102public:
103 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {}
104 bool run(Function &F);
105};
106
107class AMDGPUPromoteAllocaToVector : public FunctionPass {
108public:
109 static char ID;
110
111 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
112
113 bool runOnFunction(Function &F) override;
114
115 StringRef getPassName() const override {
116 return "AMDGPU Promote Alloca to vector";
117 }
118
119 void getAnalysisUsage(AnalysisUsage &AU) const override {
120 AU.setPreservesCFG();
121 FunctionPass::getAnalysisUsage(AU);
122 }
123};
124
125} // end anonymous namespace
126
127char AMDGPUPromoteAlloca::ID = 0;
128char AMDGPUPromoteAllocaToVector::ID = 0;
129
130INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) {
131 "AMDGPU promote alloca to vector or LDS", false, false)static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) {
132// Move LDS uses from functions to kernels before promote alloca for accurate
133// estimation of LDS available
134INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS)initializeAMDGPULowerModuleLDSPass(Registry);
135INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
136 "AMDGPU promote alloca to vector or LDS", false, false)PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
137
138INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
139 "AMDGPU promote alloca to vector", false, false)static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
140
141char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
142char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
143
144bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
145 if (skipFunction(F))
146 return false;
147
148 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
149 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F);
150 }
151 return false;
152}
153
154PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
155 FunctionAnalysisManager &AM) {
156 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F);
157 if (Changed) {
158 PreservedAnalyses PA;
159 PA.preserveSet<CFGAnalyses>();
160 return PA;
161 }
162 return PreservedAnalyses::all();
163}
164
165bool AMDGPUPromoteAllocaImpl::run(Function &F) {
166 Mod = F.getParent();
167 DL = &Mod->getDataLayout();
168
169 const Triple &TT = TM.getTargetTriple();
170 IsAMDGCN = TT.getArch() == Triple::amdgcn;
171 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
172
173 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
174 if (!ST.isPromoteAllocaEnabled())
175 return false;
176
177 if (IsAMDGCN) {
178 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
179 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
180 // A non-entry function has only 32 caller preserved registers.
181 // Do not promote alloca which will force spilling.
182 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
183 MaxVGPRs = std::min(MaxVGPRs, 32u);
184 } else {
185 MaxVGPRs = 128;
186 }
187
188 bool SufficientLDS = hasSufficientLocalMem(F);
189 bool Changed = false;
190 BasicBlock &EntryBB = *F.begin();
191
192 SmallVector<AllocaInst *, 16> Allocas;
193 for (Instruction &I : EntryBB) {
194 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
195 Allocas.push_back(AI);
196 }
197
198 for (AllocaInst *AI : Allocas) {
199 if (handleAlloca(*AI, SufficientLDS))
200 Changed = true;
201 }
202
203 return Changed;
204}
205
206std::pair<Value *, Value *>
207AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
208 Function &F = *Builder.GetInsertBlock()->getParent();
209 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
210
211 if (!IsAMDHSA) {
212 Function *LocalSizeYFn
213 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
214 Function *LocalSizeZFn
215 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
216
217 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
218 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
219
220 ST.makeLIDRangeMetadata(LocalSizeY);
221 ST.makeLIDRangeMetadata(LocalSizeZ);
222
223 return std::make_pair(LocalSizeY, LocalSizeZ);
224 }
225
226 // We must read the size out of the dispatch pointer.
227 assert(IsAMDGCN)(static_cast <bool> (IsAMDGCN) ? void (0) : __assert_fail
("IsAMDGCN", "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 227, __extension__ __PRETTY_FUNCTION__))
;
228
229 // We are indexing into this struct, and want to extract the workgroup_size_*
230 // fields.
231 //
232 // typedef struct hsa_kernel_dispatch_packet_s {
233 // uint16_t header;
234 // uint16_t setup;
235 // uint16_t workgroup_size_x ;
236 // uint16_t workgroup_size_y;
237 // uint16_t workgroup_size_z;
238 // uint16_t reserved0;
239 // uint32_t grid_size_x ;
240 // uint32_t grid_size_y ;
241 // uint32_t grid_size_z;
242 //
243 // uint32_t private_segment_size;
244 // uint32_t group_segment_size;
245 // uint64_t kernel_object;
246 //
247 // #ifdef HSA_LARGE_MODEL
248 // void *kernarg_address;
249 // #elif defined HSA_LITTLE_ENDIAN
250 // void *kernarg_address;
251 // uint32_t reserved1;
252 // #else
253 // uint32_t reserved1;
254 // void *kernarg_address;
255 // #endif
256 // uint64_t reserved2;
257 // hsa_signal_t completion_signal; // uint64_t wrapper
258 // } hsa_kernel_dispatch_packet_t
259 //
260 Function *DispatchPtrFn
261 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
262
263 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
264 DispatchPtr->addRetAttr(Attribute::NoAlias);
265 DispatchPtr->addRetAttr(Attribute::NonNull);
266 F.removeFnAttr("amdgpu-no-dispatch-ptr");
267
268 // Size of the dispatch packet struct.
269 DispatchPtr->addDereferenceableRetAttr(64);
270
271 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
272 Value *CastDispatchPtr = Builder.CreateBitCast(
273 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
274
275 // We could do a single 64-bit load here, but it's likely that the basic
276 // 32-bit and extract sequence is already present, and it is probably easier
277 // to CSE this. The loads should be mergeable later anyway.
278 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
279 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
280
281 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
282 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
283
284 MDNode *MD = MDNode::get(Mod->getContext(), None);
285 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
286 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
287 ST.makeLIDRangeMetadata(LoadZU);
288
289 // Extract y component. Upper half of LoadZU should be zero already.
290 Value *Y = Builder.CreateLShr(LoadXY, 16);
291
292 return std::make_pair(Y, LoadZU);
293}
294
295Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
296 unsigned N) {
297 Function *F = Builder.GetInsertBlock()->getParent();
298 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
299 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
300 StringRef AttrName;
301
302 switch (N) {
303 case 0:
304 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
305 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
306 AttrName = "amdgpu-no-workitem-id-x";
307 break;
308 case 1:
309 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
310 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
311 AttrName = "amdgpu-no-workitem-id-y";
312 break;
313
314 case 2:
315 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
316 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
317 AttrName = "amdgpu-no-workitem-id-z";
318 break;
319 default:
320 llvm_unreachable("invalid dimension")::llvm::llvm_unreachable_internal("invalid dimension", "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 320)
;
321 }
322
323 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
324 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
325 ST.makeLIDRangeMetadata(CI);
326 F->removeFnAttr(AttrName);
327
328 return CI;
329}
330
331static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
332 return FixedVectorType::get(ArrayTy->getElementType(),
333 ArrayTy->getNumElements());
334}
335
336static Value *stripBitcasts(Value *V) {
337 while (Instruction *I = dyn_cast<Instruction>(V)) {
338 if (I->getOpcode() != Instruction::BitCast)
339 break;
340 V = I->getOperand(0);
341 }
342 return V;
343}
344
345static Value *
346calculateVectorIndex(Value *Ptr,
347 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
348 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(stripBitcasts(Ptr));
349 if (!GEP)
350 return nullptr;
351
352 auto I = GEPIdx.find(GEP);
353 return I == GEPIdx.end() ? nullptr : I->second;
354}
355
356static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
357 // FIXME we only support simple cases
358 if (GEP->getNumOperands() != 3)
359 return nullptr;
360
361 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
362 if (!I0 || !I0->isZero())
363 return nullptr;
364
365 return GEP->getOperand(2);
366}
367
368// Not an instruction handled below to turn into a vector.
369//
370// TODO: Check isTriviallyVectorizable for calls and handle other
371// instructions.
372static bool canVectorizeInst(Instruction *Inst, User *User,
373 const DataLayout &DL) {
374 switch (Inst->getOpcode()) {
46
Called C++ object pointer is null
375 case Instruction::Load: {
376 // Currently only handle the case where the Pointer Operand is a GEP.
377 // Also we could not vectorize volatile or atomic loads.
378 LoadInst *LI = cast<LoadInst>(Inst);
379 if (isa<AllocaInst>(User) &&
380 LI->getPointerOperandType() == User->getType() &&
381 isa<VectorType>(LI->getType()))
382 return true;
383
384 Instruction *PtrInst = dyn_cast<Instruction>(LI->getPointerOperand());
385 if (!PtrInst)
386 return false;
387
388 return (PtrInst->getOpcode() == Instruction::GetElementPtr ||
389 PtrInst->getOpcode() == Instruction::BitCast) &&
390 LI->isSimple();
391 }
392 case Instruction::BitCast:
393 return true;
394 case Instruction::Store: {
395 // Must be the stored pointer operand, not a stored value, plus
396 // since it should be canonical form, the User should be a GEP.
397 // Also we could not vectorize volatile or atomic stores.
398 StoreInst *SI = cast<StoreInst>(Inst);
399 if (isa<AllocaInst>(User) &&
400 SI->getPointerOperandType() == User->getType() &&
401 isa<VectorType>(SI->getValueOperand()->getType()))
402 return true;
403
404 Instruction *UserInst = dyn_cast<Instruction>(User);
405 if (!UserInst)
406 return false;
407
408 return (SI->getPointerOperand() == User) &&
409 (UserInst->getOpcode() == Instruction::GetElementPtr ||
410 UserInst->getOpcode() == Instruction::BitCast) &&
411 SI->isSimple();
412 }
413 default:
414 return false;
415 }
416}
417
418static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL,
419 unsigned MaxVGPRs) {
420
421 if (DisablePromoteAllocaToVector) {
19
Assuming the condition is false
20
Taking false branch
422 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Promotion alloca to vector is disabled\n"
; } } while (false)
;
423 return false;
424 }
425
426 Type *AllocaTy = Alloca->getAllocatedType();
427 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
21
Assuming 'AllocaTy' is not a 'FixedVectorType'
428 if (auto *ArrayTy
22.1
'ArrayTy' is non-null
22.1
'ArrayTy' is non-null
= dyn_cast<ArrayType>(AllocaTy)) {
22
Assuming 'AllocaTy' is a 'ArrayType'
429 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
23
Assuming the condition is true
25
Taking true branch
430 ArrayTy->getNumElements() > 0)
24
Assuming the condition is true
431 VectorTy = arrayTypeToVecType(ArrayTy);
432 }
433
434 // Use up to 1/4 of available register budget for vectorization.
435 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
26
Assuming the condition is false
27
'?' condition is false
436 : (MaxVGPRs * 32);
437
438 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
28
Assuming the condition is false
439 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
440 << MaxVGPRs << " registers available\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
;
441 return false;
442 }
443
444 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Alloca candidate for vectorization\n"
; } } while (false)
;
29
Taking false branch
30
Assuming 'DebugFlag' is false
445
446 // FIXME: There is no reason why we can't support larger arrays, we
447 // are just being conservative for now.
448 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
449 // could also be promoted but we don't currently handle this case
450 if (!VectorTy || VectorTy->getNumElements() > 16 ||
31
Assuming 'VectorTy' is non-null
32
Assuming the condition is false
34
Taking false branch
451 VectorTy->getNumElements() < 2) {
33
Assuming the condition is false
452 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot convert type to vector\n"
; } } while (false)
;
453 return false;
454 }
455
456 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
457 std::vector<Value *> WorkList;
458 SmallVector<User *, 8> Users(Alloca->users());
459 SmallVector<User *, 8> UseUsers(Users.size(), Alloca);
460 Type *VecEltTy = VectorTy->getElementType();
461 while (!Users.empty()) {
35
Calling 'SmallVectorBase::empty'
38
Returning from 'SmallVectorBase::empty'
39
Loop condition is true. Entering loop body
462 User *AllocaUser = Users.pop_back_val();
463 User *UseUser = UseUsers.pop_back_val();
464 Instruction *Inst = dyn_cast<Instruction>(AllocaUser);
40
Assuming 'AllocaUser' is not a 'Instruction'
41
'Inst' initialized to a null pointer value
465
466 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
42
Assuming 'AllocaUser' is not a 'GetElementPtrInst'
467 if (!GEP
42.1
'GEP' is null
42.1
'GEP' is null
) {
43
Taking true branch
468 if (!canVectorizeInst(Inst, UseUser, DL))
44
Passing null pointer value via 1st parameter 'Inst'
45
Calling 'canVectorizeInst'
469 return false;
470
471 if (Inst->getOpcode() == Instruction::BitCast) {
472 Type *FromTy = Inst->getOperand(0)->getType()->getPointerElementType();
473 Type *ToTy = Inst->getType()->getPointerElementType();
474 if (FromTy->isAggregateType() || ToTy->isAggregateType() ||
475 DL.getTypeSizeInBits(FromTy) != DL.getTypeSizeInBits(ToTy))
476 continue;
477
478 for (User *CastUser : Inst->users()) {
479 if (isAssumeLikeIntrinsic(cast<Instruction>(CastUser)))
480 continue;
481 Users.push_back(CastUser);
482 UseUsers.push_back(Inst);
483 }
484
485 continue;
486 }
487
488 WorkList.push_back(AllocaUser);
489 continue;
490 }
491
492 Value *Index = GEPToVectorIndex(GEP);
493
494 // If we can't compute a vector index from this GEP, then we can't
495 // promote this alloca to vector.
496 if (!Index) {
497 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEPdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
498 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
;
499 return false;
500 }
501
502 GEPVectorIdx[GEP] = Index;
503 Users.append(GEP->user_begin(), GEP->user_end());
504 UseUsers.append(GEP->getNumUses(), GEP);
505 }
506
507 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
508 << *VectorTy << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
;
509
510 for (Value *V : WorkList) {
511 Instruction *Inst = cast<Instruction>(V);
512 IRBuilder<> Builder(Inst);
513 switch (Inst->getOpcode()) {
514 case Instruction::Load: {
515 if (Inst->getType() == AllocaTy || Inst->getType()->isVectorTy())
516 break;
517
518 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
519 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
520 if (!Index)
521 break;
522
523 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
524 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
525 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
526 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
527 if (Inst->getType() != VecEltTy)
528 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
529 Inst->replaceAllUsesWith(ExtractElement);
530 Inst->eraseFromParent();
531 break;
532 }
533 case Instruction::Store: {
534 StoreInst *SI = cast<StoreInst>(Inst);
535 if (SI->getValueOperand()->getType() == AllocaTy ||
536 SI->getValueOperand()->getType()->isVectorTy())
537 break;
538
539 Value *Ptr = SI->getPointerOperand();
540 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
541 if (!Index)
542 break;
543
544 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
545 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
546 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
547 Value *Elt = SI->getValueOperand();
548 if (Elt->getType() != VecEltTy)
549 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
550 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
551 Builder.CreateStore(NewVecValue, BitCast);
552 Inst->eraseFromParent();
553 break;
554 }
555
556 default:
557 llvm_unreachable("Inconsistency in instructions promotable to vector")::llvm::llvm_unreachable_internal("Inconsistency in instructions promotable to vector"
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 557)
;
558 }
559 }
560 return true;
561}
562
563static bool isCallPromotable(CallInst *CI) {
564 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
565 if (!II)
566 return false;
567
568 switch (II->getIntrinsicID()) {
569 case Intrinsic::memcpy:
570 case Intrinsic::memmove:
571 case Intrinsic::memset:
572 case Intrinsic::lifetime_start:
573 case Intrinsic::lifetime_end:
574 case Intrinsic::invariant_start:
575 case Intrinsic::invariant_end:
576 case Intrinsic::launder_invariant_group:
577 case Intrinsic::strip_invariant_group:
578 case Intrinsic::objectsize:
579 return true;
580 default:
581 return false;
582 }
583}
584
585bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
586 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
587 int OpIdx1) const {
588 // Figure out which operand is the one we might not be promoting.
589 Value *OtherOp = Inst->getOperand(OpIdx0);
590 if (Val == OtherOp)
591 OtherOp = Inst->getOperand(OpIdx1);
592
593 if (isa<ConstantPointerNull>(OtherOp))
594 return true;
595
596 Value *OtherObj = getUnderlyingObject(OtherOp);
597 if (!isa<AllocaInst>(OtherObj))
598 return false;
599
600 // TODO: We should be able to replace undefs with the right pointer type.
601
602 // TODO: If we know the other base object is another promotable
603 // alloca, not necessarily this alloca, we can do this. The
604 // important part is both must have the same address space at
605 // the end.
606 if (OtherObj != BaseAlloca) {
607 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
608 dbgs() << "Found a binary instruction with another alloca object\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
;
609 return false;
610 }
611
612 return true;
613}
614
615bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
616 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
617
618 for (User *User : Val->users()) {
619 if (is_contained(WorkList, User))
620 continue;
621
622 if (CallInst *CI = dyn_cast<CallInst>(User)) {
623 if (!isCallPromotable(CI))
624 return false;
625
626 WorkList.push_back(User);
627 continue;
628 }
629
630 Instruction *UseInst = cast<Instruction>(User);
631 if (UseInst->getOpcode() == Instruction::PtrToInt)
632 return false;
633
634 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
635 if (LI->isVolatile())
636 return false;
637
638 continue;
639 }
640
641 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
642 if (SI->isVolatile())
643 return false;
644
645 // Reject if the stored value is not the pointer operand.
646 if (SI->getPointerOperand() != Val)
647 return false;
648 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
649 if (RMW->isVolatile())
650 return false;
651 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
652 if (CAS->isVolatile())
653 return false;
654 }
655
656 // Only promote a select if we know that the other select operand
657 // is from another pointer that will also be promoted.
658 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
659 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
660 return false;
661
662 // May need to rewrite constant operands.
663 WorkList.push_back(ICmp);
664 }
665
666 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
667 // Give up if the pointer may be captured.
668 if (PointerMayBeCaptured(UseInst, true, true))
669 return false;
670 // Don't collect the users of this.
671 WorkList.push_back(User);
672 continue;
673 }
674
675 // Do not promote vector/aggregate type instructions. It is hard to track
676 // their users.
677 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
678 return false;
679
680 if (!User->getType()->isPointerTy())
681 continue;
682
683 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
684 // Be conservative if an address could be computed outside the bounds of
685 // the alloca.
686 if (!GEP->isInBounds())
687 return false;
688 }
689
690 // Only promote a select if we know that the other select operand is from
691 // another pointer that will also be promoted.
692 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
693 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
694 return false;
695 }
696
697 // Repeat for phis.
698 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
699 // TODO: Handle more complex cases. We should be able to replace loops
700 // over arrays.
701 switch (Phi->getNumIncomingValues()) {
702 case 1:
703 break;
704 case 2:
705 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
706 return false;
707 break;
708 default:
709 return false;
710 }
711 }
712
713 WorkList.push_back(User);
714 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
715 return false;
716 }
717
718 return true;
719}
720
721bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
722
723 FunctionType *FTy = F.getFunctionType();
724 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
725
726 // If the function has any arguments in the local address space, then it's
727 // possible these arguments require the entire local memory space, so
728 // we cannot use local memory in the pass.
729 for (Type *ParamTy : FTy->params()) {
730 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
731 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
732 LocalMemLimit = 0;
733 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
734 "local memory disabled.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
;
735 return false;
736 }
737 }
738
739 LocalMemLimit = ST.getLocalMemorySize();
740 if (LocalMemLimit == 0)
741 return false;
742
743 SmallVector<const Constant *, 16> Stack;
744 SmallPtrSet<const Constant *, 8> VisitedConstants;
745 SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
746
747 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
748 for (const User *U : Val->users()) {
749 if (const Instruction *Use = dyn_cast<Instruction>(U)) {
750 if (Use->getParent()->getParent() == &F)
751 return true;
752 } else {
753 const Constant *C = cast<Constant>(U);
754 if (VisitedConstants.insert(C).second)
755 Stack.push_back(C);
756 }
757 }
758
759 return false;
760 };
761
762 for (GlobalVariable &GV : Mod->globals()) {
763 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
764 continue;
765
766 if (visitUsers(&GV, &GV)) {
767 UsedLDS.insert(&GV);
768 Stack.clear();
769 continue;
770 }
771
772 // For any ConstantExpr uses, we need to recursively search the users until
773 // we see a function.
774 while (!Stack.empty()) {
775 const Constant *C = Stack.pop_back_val();
776 if (visitUsers(&GV, C)) {
777 UsedLDS.insert(&GV);
778 Stack.clear();
779 break;
780 }
781 }
782 }
783
784 const DataLayout &DL = Mod->getDataLayout();
785 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
786 AllocatedSizes.reserve(UsedLDS.size());
787
788 for (const GlobalVariable *GV : UsedLDS) {
789 Align Alignment =
790 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
791 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
792 AllocatedSizes.emplace_back(AllocSize, Alignment);
793 }
794
795 // Sort to try to estimate the worst case alignment padding
796 //
797 // FIXME: We should really do something to fix the addresses to a more optimal
798 // value instead
799 llvm::sort(AllocatedSizes, [](std::pair<uint64_t, Align> LHS,
800 std::pair<uint64_t, Align> RHS) {
801 return LHS.second < RHS.second;
802 });
803
804 // Check how much local memory is being used by global objects
805 CurrentLocalMemUsage = 0;
806
807 // FIXME: Try to account for padding here. The real padding and address is
808 // currently determined from the inverse order of uses in the function when
809 // legalizing, which could also potentially change. We try to estimate the
810 // worst case here, but we probably should fix the addresses earlier.
811 for (auto Alloc : AllocatedSizes) {
812 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
813 CurrentLocalMemUsage += Alloc.first;
814 }
815
816 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
817 F);
818
819 // Restrict local memory usage so that we don't drastically reduce occupancy,
820 // unless it is already significantly reduced.
821
822 // TODO: Have some sort of hint or other heuristics to guess occupancy based
823 // on other factors..
824 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
825 if (OccupancyHint == 0)
826 OccupancyHint = 7;
827
828 // Clamp to max value.
829 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
830
831 // Check the hint but ignore it if it's obviously wrong from the existing LDS
832 // usage.
833 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
834
835
836 // Round up to the next tier of usage.
837 unsigned MaxSizeWithWaveCount
838 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
839
840 // Program is possibly broken by using more local mem than available.
841 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
842 return false;
843
844 LocalMemLimit = MaxSizeWithWaveCount;
845
846 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsagedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
847 << " bytes of LDS\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
848 << " Rounding size to " << MaxSizeWithWaveCountdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
849 << " with a maximum occupancy of " << MaxOccupancy << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
850 << " and " << (LocalMemLimit - CurrentLocalMemUsage)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
851 << " available for promotion\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
;
852
853 return true;
854}
855
856// FIXME: Should try to pick the most likely to be profitable allocas first.
857bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) {
858 // Array allocations are probably not worth handling, since an allocation of
859 // the array type is the canonical form.
860 if (!I.isStaticAlloca() || I.isArrayAllocation())
861 return false;
862
863 const DataLayout &DL = Mod->getDataLayout();
864 IRBuilder<> Builder(&I);
865
866 // First try to replace the alloca with a vector
867 Type *AllocaTy = I.getAllocatedType();
868
869 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
870
871 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
872 return true; // Promoted to vector.
873
874 if (DisablePromoteAllocaToLDS)
875 return false;
876
877 const Function &ContainingFunction = *I.getParent()->getParent();
878 CallingConv::ID CC = ContainingFunction.getCallingConv();
879
880 // Don't promote the alloca to LDS for shader calling conventions as the work
881 // item ID intrinsics are not supported for these calling conventions.
882 // Furthermore not all LDS is available for some of the stages.
883 switch (CC) {
884 case CallingConv::AMDGPU_KERNEL:
885 case CallingConv::SPIR_KERNEL:
886 break;
887 default:
888 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
889 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
890 << " promote alloca to LDS not supported with calling convention.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
;
891 return false;
892 }
893
894 // Not likely to have sufficient local memory for promotion.
895 if (!SufficientLDS)
896 return false;
897
898 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
899 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
900
901 Align Alignment =
902 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
903
904 // FIXME: This computed padding is likely wrong since it depends on inverse
905 // usage order.
906 //
907 // FIXME: It is also possible that if we're allowed to use all of the memory
908 // could could end up using more than the maximum due to alignment padding.
909
910 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
911 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
912 NewSize += AllocSize;
913
914 if (NewSize > LocalMemLimit) {
915 LLVM_DEBUG(dbgs() << " " << AllocSizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
916 << " bytes of local memory not available to promote\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
;
917 return false;
918 }
919
920 CurrentLocalMemUsage = NewSize;
921
922 std::vector<Value*> WorkList;
923
924 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
925 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Do not know how to convert all uses\n"
; } } while (false)
;
926 return false;
927 }
928
929 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Promoting alloca to local memory\n"
; } } while (false)
;
930
931 Function *F = I.getParent()->getParent();
932
933 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
934 GlobalVariable *GV = new GlobalVariable(
935 *Mod, GVTy, false, GlobalValue::InternalLinkage,
936 UndefValue::get(GVTy),
937 Twine(F->getName()) + Twine('.') + I.getName(),
938 nullptr,
939 GlobalVariable::NotThreadLocal,
940 AMDGPUAS::LOCAL_ADDRESS);
941 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
942 GV->setAlignment(I.getAlign());
943
944 Value *TCntY, *TCntZ;
945
946 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
947 Value *TIdX = getWorkitemID(Builder, 0);
948 Value *TIdY = getWorkitemID(Builder, 1);
949 Value *TIdZ = getWorkitemID(Builder, 2);
950
951 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
952 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
953 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
954 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
955 TID = Builder.CreateAdd(TID, TIdZ);
956
957 Value *Indices[] = {
958 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
959 TID
960 };
961
962 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
963 I.mutateType(Offset->getType());
964 I.replaceAllUsesWith(Offset);
965 I.eraseFromParent();
966
967 SmallVector<IntrinsicInst *> DeferredIntrs;
968
969 for (Value *V : WorkList) {
970 CallInst *Call = dyn_cast<CallInst>(V);
971 if (!Call) {
972 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
973 Value *Src0 = CI->getOperand(0);
974 PointerType *NewTy = PointerType::getWithSamePointeeType(
975 cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS);
976
977 if (isa<ConstantPointerNull>(CI->getOperand(0)))
978 CI->setOperand(0, ConstantPointerNull::get(NewTy));
979
980 if (isa<ConstantPointerNull>(CI->getOperand(1)))
981 CI->setOperand(1, ConstantPointerNull::get(NewTy));
982
983 continue;
984 }
985
986 // The operand's value should be corrected on its own and we don't want to
987 // touch the users.
988 if (isa<AddrSpaceCastInst>(V))
989 continue;
990
991 PointerType *NewTy = PointerType::getWithSamePointeeType(
992 cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS);
993
994 // FIXME: It doesn't really make sense to try to do this for all
995 // instructions.
996 V->mutateType(NewTy);
997
998 // Adjust the types of any constant operands.
999 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1000 if (isa<ConstantPointerNull>(SI->getOperand(1)))
1001 SI->setOperand(1, ConstantPointerNull::get(NewTy));
1002
1003 if (isa<ConstantPointerNull>(SI->getOperand(2)))
1004 SI->setOperand(2, ConstantPointerNull::get(NewTy));
1005 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1006 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1007 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1008 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1009 }
1010 }
1011
1012 continue;
1013 }
1014
1015 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1016 Builder.SetInsertPoint(Intr);
1017 switch (Intr->getIntrinsicID()) {
1018 case Intrinsic::lifetime_start:
1019 case Intrinsic::lifetime_end:
1020 // These intrinsics are for address space 0 only
1021 Intr->eraseFromParent();
1022 continue;
1023 case Intrinsic::memcpy:
1024 case Intrinsic::memmove:
1025 // These have 2 pointer operands. In case if second pointer also needs
1026 // to be replaced we defer processing of these intrinsics until all
1027 // other values are processed.
1028 DeferredIntrs.push_back(Intr);
1029 continue;
1030 case Intrinsic::memset: {
1031 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1032 Builder.CreateMemSet(
1033 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(),
1034 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile());
1035 Intr->eraseFromParent();
1036 continue;
1037 }
1038 case Intrinsic::invariant_start:
1039 case Intrinsic::invariant_end:
1040 case Intrinsic::launder_invariant_group:
1041 case Intrinsic::strip_invariant_group:
1042 Intr->eraseFromParent();
1043 // FIXME: I think the invariant marker should still theoretically apply,
1044 // but the intrinsics need to be changed to accept pointers with any
1045 // address space.
1046 continue;
1047 case Intrinsic::objectsize: {
1048 Value *Src = Intr->getOperand(0);
1049 Function *ObjectSize = Intrinsic::getDeclaration(
1050 Mod, Intrinsic::objectsize,
1051 {Intr->getType(),
1052 PointerType::getWithSamePointeeType(
1053 cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)});
1054
1055 CallInst *NewCall = Builder.CreateCall(
1056 ObjectSize,
1057 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1058 Intr->replaceAllUsesWith(NewCall);
1059 Intr->eraseFromParent();
1060 continue;
1061 }
1062 default:
1063 Intr->print(errs());
1064 llvm_unreachable("Don't know how to promote alloca intrinsic use.")::llvm::llvm_unreachable_internal("Don't know how to promote alloca intrinsic use."
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 1064)
;
1065 }
1066 }
1067
1068 for (IntrinsicInst *Intr : DeferredIntrs) {
1069 Builder.SetInsertPoint(Intr);
1070 Intrinsic::ID ID = Intr->getIntrinsicID();
1071 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove)(static_cast <bool> (ID == Intrinsic::memcpy || ID == Intrinsic
::memmove) ? void (0) : __assert_fail ("ID == Intrinsic::memcpy || ID == Intrinsic::memmove"
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 1071, __extension__
__PRETTY_FUNCTION__))
;
1072
1073 MemTransferInst *MI = cast<MemTransferInst>(Intr);
1074 auto *B =
1075 Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(),
1076 MI->getRawSource(), MI->getSourceAlign(),
1077 MI->getLength(), MI->isVolatile());
1078
1079 for (unsigned I = 0; I != 2; ++I) {
1080 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1081 B->addDereferenceableParamAttr(I, Bytes);
1082 }
1083 }
1084
1085 Intr->eraseFromParent();
1086 }
1087
1088 return true;
1089}
1090
1091bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) {
1092 // Array allocations are probably not worth handling, since an allocation of
1093 // the array type is the canonical form.
1094 if (!I.isStaticAlloca() || I.isArrayAllocation())
13
Assuming the condition is false
14
Assuming the condition is false
1095 return false;
1096
1097 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
15
Taking false branch
16
Assuming 'DebugFlag' is false
17
Loop condition is false. Exiting loop
1098
1099 Module *Mod = I.getParent()->getParent()->getParent();
1100 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
18
Calling 'tryPromoteAllocaToVector'
1101}
1102
1103bool promoteAllocasToVector(Function &F, TargetMachine &TM) {
1104 if (DisablePromoteAllocaToVector)
2
Assuming the condition is false
3
Taking false branch
1105 return false;
1106
1107 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
1108 if (!ST.isPromoteAllocaEnabled())
4
Assuming the condition is false
5
Taking false branch
1109 return false;
1110
1111 unsigned MaxVGPRs;
1112 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
6
Assuming the condition is false
7
Taking false branch
1113 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
1114 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1115 // A non-entry function has only 32 caller preserved registers.
1116 // Do not promote alloca which will force spilling.
1117 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
1118 MaxVGPRs = std::min(MaxVGPRs, 32u);
1119 } else {
1120 MaxVGPRs = 128;
1121 }
1122
1123 bool Changed = false;
1124 BasicBlock &EntryBB = *F.begin();
1125
1126 SmallVector<AllocaInst *, 16> Allocas;
1127 for (Instruction &I : EntryBB) {
1128 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1129 Allocas.push_back(AI);
1130 }
1131
1132 for (AllocaInst *AI : Allocas) {
8
Assuming '__begin1' is not equal to '__end1'
1133 if (handlePromoteAllocaToVector(*AI, MaxVGPRs))
9
Taking false branch
10
Taking false branch
11
Taking false branch
12
Calling 'handlePromoteAllocaToVector'
1134 Changed = true;
1135 }
1136
1137 return Changed;
1138}
1139
1140bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) {
1141 if (skipFunction(F))
1142 return false;
1143 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
1144 return promoteAllocasToVector(F, TPC->getTM<TargetMachine>());
1145 }
1146 return false;
1147}
1148
1149PreservedAnalyses
1150AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
1151 bool Changed = promoteAllocasToVector(F, TM);
1
Calling 'promoteAllocasToVector'
1152 if (Changed) {
1153 PreservedAnalyses PA;
1154 PA.preserveSet<CFGAnalyses>();
1155 return PA;
1156 }
1157 return PreservedAnalyses::all();
1158}
1159
1160FunctionPass *llvm::createAMDGPUPromoteAlloca() {
1161 return new AMDGPUPromoteAlloca();
1162}
1163
1164FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
1165 return new AMDGPUPromoteAllocaToVector();
1166}

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/ErrorHandling.h"
19#include "llvm/Support/MemAlloc.h"
20#include "llvm/Support/type_traits.h"
21#include <algorithm>
22#include <cassert>
23#include <cstddef>
24#include <cstdlib>
25#include <cstring>
26#include <functional>
27#include <initializer_list>
28#include <iterator>
29#include <limits>
30#include <memory>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37/// This is all the stuff common to all SmallVectors.
38///
39/// The template parameter specifies the type which should be used to hold the
40/// Size and Capacity of the SmallVector, so it can be adjusted.
41/// Using 32 bit size is desirable to shrink the size of the SmallVector.
42/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
43/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
44/// buffering bitcode output - which can exceed 4GB.
45template <class Size_T> class SmallVectorBase {
46protected:
47 void *BeginX;
48 Size_T Size = 0, Capacity;
49
50 /// The maximum value of the Size_T used.
51 static constexpr size_t SizeTypeMax() {
52 return std::numeric_limits<Size_T>::max();
53 }
54
55 SmallVectorBase() = delete;
56 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
57 : BeginX(FirstEl), Capacity(TotalCapacity) {}
58
59 /// This is a helper for \a grow() that's out of line to reduce code
60 /// duplication. This function will report a fatal error if it can't grow at
61 /// least to \p MinSize.
62 void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity);
63
64 /// This is an implementation of the grow() method which only works
65 /// on POD-like data types and is out of line to reduce code duplication.
66 /// This function will report a fatal error if it cannot increase capacity.
67 void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
68
69public:
70 size_t size() const { return Size; }
71 size_t capacity() const { return Capacity; }
72
73 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
36
Assuming field 'Size' is not equal to 0
37
Returning zero, which participates in a condition later
74
75protected:
76 /// Set the array size to \p N, which the current array must have enough
77 /// capacity for.
78 ///
79 /// This does not construct or destroy any elements in the vector.
80 void set_size(size_t N) {
81 assert(N <= capacity())(static_cast <bool> (N <= capacity()) ? void (0) : __assert_fail
("N <= capacity()", "llvm/include/llvm/ADT/SmallVector.h"
, 81, __extension__ __PRETTY_FUNCTION__))
;
82 Size = N;
83 }
84};
85
86template <class T>
87using SmallVectorSizeType =
88 typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
89 uint32_t>::type;
90
91/// Figure out the offset of the first element.
92template <class T, typename = void> struct SmallVectorAlignmentAndSize {
93 alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
94 SmallVectorBase<SmallVectorSizeType<T>>)];
95 alignas(T) char FirstEl[sizeof(T)];
96};
97
98/// This is the part of SmallVectorTemplateBase which does not depend on whether
99/// the type T is a POD. The extra dummy template argument is used by ArrayRef
100/// to avoid unnecessarily requiring T to be complete.
101template <typename T, typename = void>
102class SmallVectorTemplateCommon
103 : public SmallVectorBase<SmallVectorSizeType<T>> {
104 using Base = SmallVectorBase<SmallVectorSizeType<T>>;
105
106 /// Find the address of the first element. For this pointer math to be valid
107 /// with small-size of 0 for T with lots of alignment, it's important that
108 /// SmallVectorStorage is properly-aligned even for small-size of 0.
109 void *getFirstEl() const {
110 return const_cast<void *>(reinterpret_cast<const void *>(
111 reinterpret_cast<const char *>(this) +
112 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
113 }
114 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
115
116protected:
117 SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
118
119 void grow_pod(size_t MinSize, size_t TSize) {
120 Base::grow_pod(getFirstEl(), MinSize, TSize);
121 }
122
123 /// Return true if this is a smallvector which has not had dynamic
124 /// memory allocated for it.
125 bool isSmall() const { return this->BeginX == getFirstEl(); }
126
127 /// Put this vector in a state of being small.
128 void resetToSmall() {
129 this->BeginX = getFirstEl();
130 this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
131 }
132
133 /// Return true if V is an internal reference to the given range.
134 bool isReferenceToRange(const void *V, const void *First, const void *Last) const {
135 // Use std::less to avoid UB.
136 std::less<> LessThan;
137 return !LessThan(V, First) && LessThan(V, Last);
138 }
139
140 /// Return true if V is an internal reference to this vector.
141 bool isReferenceToStorage(const void *V) const {
142 return isReferenceToRange(V, this->begin(), this->end());
143 }
144
145 /// Return true if First and Last form a valid (possibly empty) range in this
146 /// vector's storage.
147 bool isRangeInStorage(const void *First, const void *Last) const {
148 // Use std::less to avoid UB.
149 std::less<> LessThan;
150 return !LessThan(First, this->begin()) && !LessThan(Last, First) &&
151 !LessThan(this->end(), Last);
152 }
153
154 /// Return true unless Elt will be invalidated by resizing the vector to
155 /// NewSize.
156 bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
157 // Past the end.
158 if (LLVM_LIKELY(!isReferenceToStorage(Elt))__builtin_expect((bool)(!isReferenceToStorage(Elt)), true))
159 return true;
160
161 // Return false if Elt will be destroyed by shrinking.
162 if (NewSize <= this->size())
163 return Elt < this->begin() + NewSize;
164
165 // Return false if we need to grow.
166 return NewSize <= this->capacity();
167 }
168
169 /// Check whether Elt will be invalidated by resizing the vector to NewSize.
170 void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
171 assert(isSafeToReferenceAfterResize(Elt, NewSize) &&(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "llvm/include/llvm/ADT/SmallVector.h", 173, __extension__ __PRETTY_FUNCTION__
))
172 "Attempting to reference an element of the vector in an operation "(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "llvm/include/llvm/ADT/SmallVector.h", 173, __extension__ __PRETTY_FUNCTION__
))
173 "that invalidates it")(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "llvm/include/llvm/ADT/SmallVector.h", 173, __extension__ __PRETTY_FUNCTION__
))
;
174 }
175
176 /// Check whether Elt will be invalidated by increasing the size of the
177 /// vector by N.
178 void assertSafeToAdd(const void *Elt, size_t N = 1) {
179 this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
180 }
181
182 /// Check whether any part of the range will be invalidated by clearing.
183 void assertSafeToReferenceAfterClear(const T *From, const T *To) {
184 if (From == To)
185 return;
186 this->assertSafeToReferenceAfterResize(From, 0);
187 this->assertSafeToReferenceAfterResize(To - 1, 0);
188 }
189 template <
190 class ItTy,
191 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
192 bool> = false>
193 void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
194
195 /// Check whether any part of the range will be invalidated by growing.
196 void assertSafeToAddRange(const T *From, const T *To) {
197 if (From == To)
198 return;
199 this->assertSafeToAdd(From, To - From);
200 this->assertSafeToAdd(To - 1, To - From);
201 }
202 template <
203 class ItTy,
204 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
205 bool> = false>
206 void assertSafeToAddRange(ItTy, ItTy) {}
207
208 /// Reserve enough space to add one element, and return the updated element
209 /// pointer in case it was a reference to the storage.
210 template <class U>
211 static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt,
212 size_t N) {
213 size_t NewSize = This->size() + N;
214 if (LLVM_LIKELY(NewSize <= This->capacity())__builtin_expect((bool)(NewSize <= This->capacity()), true
)
)
215 return &Elt;
216
217 bool ReferencesStorage = false;
218 int64_t Index = -1;
219 if (!U::TakesParamByValue) {
220 if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))__builtin_expect((bool)(This->isReferenceToStorage(&Elt
)), false)
) {
221 ReferencesStorage = true;
222 Index = &Elt - This->begin();
223 }
224 }
225 This->grow(NewSize);
226 return ReferencesStorage ? This->begin() + Index : &Elt;
227 }
228
229public:
230 using size_type = size_t;
231 using difference_type = ptrdiff_t;
232 using value_type = T;
233 using iterator = T *;
234 using const_iterator = const T *;
235
236 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
237 using reverse_iterator = std::reverse_iterator<iterator>;
238
239 using reference = T &;
240 using const_reference = const T &;
241 using pointer = T *;
242 using const_pointer = const T *;
243
244 using Base::capacity;
245 using Base::empty;
246 using Base::size;
247
248 // forward iterator creation methods.
249 iterator begin() { return (iterator)this->BeginX; }
250 const_iterator begin() const { return (const_iterator)this->BeginX; }
251 iterator end() { return begin() + size(); }
252 const_iterator end() const { return begin() + size(); }
253
254 // reverse iterator creation methods.
255 reverse_iterator rbegin() { return reverse_iterator(end()); }
256 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
257 reverse_iterator rend() { return reverse_iterator(begin()); }
258 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
259
260 size_type size_in_bytes() const { return size() * sizeof(T); }
261 size_type max_size() const {
262 return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
263 }
264
265 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
266
267 /// Return a pointer to the vector's buffer, even if empty().
268 pointer data() { return pointer(begin()); }
269 /// Return a pointer to the vector's buffer, even if empty().
270 const_pointer data() const { return const_pointer(begin()); }
271
272 reference operator[](size_type idx) {
273 assert(idx < size())(static_cast <bool> (idx < size()) ? void (0) : __assert_fail
("idx < size()", "llvm/include/llvm/ADT/SmallVector.h", 273
, __extension__ __PRETTY_FUNCTION__))
;
274 return begin()[idx];
275 }
276 const_reference operator[](size_type idx) const {
277 assert(idx < size())(static_cast <bool> (idx < size()) ? void (0) : __assert_fail
("idx < size()", "llvm/include/llvm/ADT/SmallVector.h", 277
, __extension__ __PRETTY_FUNCTION__))
;
278 return begin()[idx];
279 }
280
281 reference front() {
282 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 282, __extension__
__PRETTY_FUNCTION__))
;
283 return begin()[0];
284 }
285 const_reference front() const {
286 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 286, __extension__
__PRETTY_FUNCTION__))
;
287 return begin()[0];
288 }
289
290 reference back() {
291 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 291, __extension__
__PRETTY_FUNCTION__))
;
292 return end()[-1];
293 }
294 const_reference back() const {
295 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 295, __extension__
__PRETTY_FUNCTION__))
;
296 return end()[-1];
297 }
298};
299
300/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
301/// method implementations that are designed to work with non-trivial T's.
302///
303/// We approximate is_trivially_copyable with trivial move/copy construction and
304/// trivial destruction. While the standard doesn't specify that you're allowed
305/// copy these types with memcpy, there is no way for the type to observe this.
306/// This catches the important case of std::pair<POD, POD>, which is not
307/// trivially assignable.
308template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
309 (is_trivially_move_constructible<T>::value) &&
310 std::is_trivially_destructible<T>::value>
311class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
312 friend class SmallVectorTemplateCommon<T>;
313
314protected:
315 static constexpr bool TakesParamByValue = false;
316 using ValueParamT = const T &;
317
318 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
319
320 static void destroy_range(T *S, T *E) {
321 while (S != E) {
322 --E;
323 E->~T();
324 }
325 }
326
327 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
328 /// constructing elements as needed.
329 template<typename It1, typename It2>
330 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
331 std::uninitialized_copy(std::make_move_iterator(I),
332 std::make_move_iterator(E), Dest);
333 }
334
335 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
336 /// constructing elements as needed.
337 template<typename It1, typename It2>
338 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
339 std::uninitialized_copy(I, E, Dest);
340 }
341
342 /// Grow the allocated memory (without initializing new elements), doubling
343 /// the size of the allocated memory. Guarantees space for at least one more
344 /// element, or MinSize more elements if specified.
345 void grow(size_t MinSize = 0);
346
347 /// Create a new allocation big enough for \p MinSize and pass back its size
348 /// in \p NewCapacity. This is the first section of \a grow().
349 T *mallocForGrow(size_t MinSize, size_t &NewCapacity) {
350 return static_cast<T *>(
351 SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow(
352 MinSize, sizeof(T), NewCapacity));
353 }
354
355 /// Move existing elements over to the new allocation \p NewElts, the middle
356 /// section of \a grow().
357 void moveElementsForGrow(T *NewElts);
358
359 /// Transfer ownership of the allocation, finishing up \a grow().
360 void takeAllocationForGrow(T *NewElts, size_t NewCapacity);
361
362 /// Reserve enough space to add one element, and return the updated element
363 /// pointer in case it was a reference to the storage.
364 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
365 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
366 }
367
368 /// Reserve enough space to add one element, and return the updated element
369 /// pointer in case it was a reference to the storage.
370 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
371 return const_cast<T *>(
372 this->reserveForParamAndGetAddressImpl(this, Elt, N));
373 }
374
375 static T &&forward_value_param(T &&V) { return std::move(V); }
376 static const T &forward_value_param(const T &V) { return V; }
377
378 void growAndAssign(size_t NumElts, const T &Elt) {
379 // Grow manually in case Elt is an internal reference.
380 size_t NewCapacity;
381 T *NewElts = mallocForGrow(NumElts, NewCapacity);
382 std::uninitialized_fill_n(NewElts, NumElts, Elt);
383 this->destroy_range(this->begin(), this->end());
384 takeAllocationForGrow(NewElts, NewCapacity);
385 this->set_size(NumElts);
386 }
387
388 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
389 // Grow manually in case one of Args is an internal reference.
390 size_t NewCapacity;
391 T *NewElts = mallocForGrow(0, NewCapacity);
392 ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...);
393 moveElementsForGrow(NewElts);
394 takeAllocationForGrow(NewElts, NewCapacity);
395 this->set_size(this->size() + 1);
396 return this->back();
397 }
398
399public:
400 void push_back(const T &Elt) {
401 const T *EltPtr = reserveForParamAndGetAddress(Elt);
402 ::new ((void *)this->end()) T(*EltPtr);
403 this->set_size(this->size() + 1);
404 }
405
406 void push_back(T &&Elt) {
407 T *EltPtr = reserveForParamAndGetAddress(Elt);
408 ::new ((void *)this->end()) T(::std::move(*EltPtr));
409 this->set_size(this->size() + 1);
410 }
411
412 void pop_back() {
413 this->set_size(this->size() - 1);
414 this->end()->~T();
415 }
416};
417
418// Define this out-of-line to dissuade the C++ compiler from inlining it.
419template <typename T, bool TriviallyCopyable>
420void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
421 size_t NewCapacity;
422 T *NewElts = mallocForGrow(MinSize, NewCapacity);
423 moveElementsForGrow(NewElts);
424 takeAllocationForGrow(NewElts, NewCapacity);
425}
426
427// Define this out-of-line to dissuade the C++ compiler from inlining it.
428template <typename T, bool TriviallyCopyable>
429void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow(
430 T *NewElts) {
431 // Move the elements over.
432 this->uninitialized_move(this->begin(), this->end(), NewElts);
433
434 // Destroy the original elements.
435 destroy_range(this->begin(), this->end());
436}
437
438// Define this out-of-line to dissuade the C++ compiler from inlining it.
439template <typename T, bool TriviallyCopyable>
440void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow(
441 T *NewElts, size_t NewCapacity) {
442 // If this wasn't grown from the inline copy, deallocate the old space.
443 if (!this->isSmall())
444 free(this->begin());
445
446 this->BeginX = NewElts;
447 this->Capacity = NewCapacity;
448}
449
450/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
451/// method implementations that are designed to work with trivially copyable
452/// T's. This allows using memcpy in place of copy/move construction and
453/// skipping destruction.
454template <typename T>
455class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
456 friend class SmallVectorTemplateCommon<T>;
457
458protected:
459 /// True if it's cheap enough to take parameters by value. Doing so avoids
460 /// overhead related to mitigations for reference invalidation.
461 static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *);
462
463 /// Either const T& or T, depending on whether it's cheap enough to take
464 /// parameters by value.
465 using ValueParamT =
466 typename std::conditional<TakesParamByValue, T, const T &>::type;
467
468 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
469
470 // No need to do a destroy loop for POD's.
471 static void destroy_range(T *, T *) {}
472
473 /// Move the range [I, E) onto the uninitialized memory
474 /// starting with "Dest", constructing elements into it as needed.
475 template<typename It1, typename It2>
476 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
477 // Just do a copy.
478 uninitialized_copy(I, E, Dest);
479 }
480
481 /// Copy the range [I, E) onto the uninitialized memory
482 /// starting with "Dest", constructing elements into it as needed.
483 template<typename It1, typename It2>
484 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
485 // Arbitrary iterator types; just use the basic implementation.
486 std::uninitialized_copy(I, E, Dest);
487 }
488
489 /// Copy the range [I, E) onto the uninitialized memory
490 /// starting with "Dest", constructing elements into it as needed.
491 template <typename T1, typename T2>
492 static void uninitialized_copy(
493 T1 *I, T1 *E, T2 *Dest,
494 std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
495 T2>::value> * = nullptr) {
496 // Use memcpy for PODs iterated by pointers (which includes SmallVector
497 // iterators): std::uninitialized_copy optimizes to memmove, but we can
498 // use memcpy here. Note that I and E are iterators and thus might be
499 // invalid for memcpy if they are equal.
500 if (I != E)
501 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
502 }
503
504 /// Double the size of the allocated memory, guaranteeing space for at
505 /// least one more element or MinSize if specified.
506 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
507
508 /// Reserve enough space to add one element, and return the updated element
509 /// pointer in case it was a reference to the storage.
510 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
511 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
512 }
513
514 /// Reserve enough space to add one element, and return the updated element
515 /// pointer in case it was a reference to the storage.
516 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
517 return const_cast<T *>(
518 this->reserveForParamAndGetAddressImpl(this, Elt, N));
519 }
520
521 /// Copy \p V or return a reference, depending on \a ValueParamT.
522 static ValueParamT forward_value_param(ValueParamT V) { return V; }
523
524 void growAndAssign(size_t NumElts, T Elt) {
525 // Elt has been copied in case it's an internal reference, side-stepping
526 // reference invalidation problems without losing the realloc optimization.
527 this->set_size(0);
528 this->grow(NumElts);
529 std::uninitialized_fill_n(this->begin(), NumElts, Elt);
530 this->set_size(NumElts);
531 }
532
533 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
534 // Use push_back with a copy in case Args has an internal reference,
535 // side-stepping reference invalidation problems without losing the realloc
536 // optimization.
537 push_back(T(std::forward<ArgTypes>(Args)...));
538 return this->back();
539 }
540
541public:
542 void push_back(ValueParamT Elt) {
543 const T *EltPtr = reserveForParamAndGetAddress(Elt);
544 memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
545 this->set_size(this->size() + 1);
546 }
547
548 void pop_back() { this->set_size(this->size() - 1); }
549};
550
551/// This class consists of common code factored out of the SmallVector class to
552/// reduce code duplication based on the SmallVector 'N' template parameter.
553template <typename T>
554class SmallVectorImpl : public SmallVectorTemplateBase<T> {
555 using SuperClass = SmallVectorTemplateBase<T>;
556
557public:
558 using iterator = typename SuperClass::iterator;
559 using const_iterator = typename SuperClass::const_iterator;
560 using reference = typename SuperClass::reference;
561 using size_type = typename SuperClass::size_type;
562
563protected:
564 using SmallVectorTemplateBase<T>::TakesParamByValue;
565 using ValueParamT = typename SuperClass::ValueParamT;
566
567 // Default ctor - Initialize to empty.
568 explicit SmallVectorImpl(unsigned N)
569 : SmallVectorTemplateBase<T>(N) {}
570
571public:
572 SmallVectorImpl(const SmallVectorImpl &) = delete;
573
574 ~SmallVectorImpl() {
575 // Subclass has already destructed this vector's elements.
576 // If this wasn't grown from the inline copy, deallocate the old space.
577 if (!this->isSmall())
578 free(this->begin());
579 }
580
581 void clear() {
582 this->destroy_range(this->begin(), this->end());
583 this->Size = 0;
584 }
585
586private:
587 // Make set_size() private to avoid misuse in subclasses.
588 using SuperClass::set_size;
589
590 template <bool ForOverwrite> void resizeImpl(size_type N) {
591 if (N == this->size())
592 return;
593
594 if (N < this->size()) {
595 this->truncate(N);
596 return;
597 }
598
599 this->reserve(N);
600 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
601 if (ForOverwrite)
602 new (&*I) T;
603 else
604 new (&*I) T();
605 this->set_size(N);
606 }
607
608public:
609 void resize(size_type N) { resizeImpl<false>(N); }
610
611 /// Like resize, but \ref T is POD, the new values won't be initialized.
612 void resize_for_overwrite(size_type N) { resizeImpl<true>(N); }
613
614 /// Like resize, but requires that \p N is less than \a size().
615 void truncate(size_type N) {
616 assert(this->size() >= N && "Cannot increase size with truncate")(static_cast <bool> (this->size() >= N &&
"Cannot increase size with truncate") ? void (0) : __assert_fail
("this->size() >= N && \"Cannot increase size with truncate\""
, "llvm/include/llvm/ADT/SmallVector.h", 616, __extension__ __PRETTY_FUNCTION__
))
;
617 this->destroy_range(this->begin() + N, this->end());
618 this->set_size(N);
619 }
620
621 void resize(size_type N, ValueParamT NV) {
622 if (N == this->size())
623 return;
624
625 if (N < this->size()) {
626 this->truncate(N);
627 return;
628 }
629
630 // N > this->size(). Defer to append.
631 this->append(N - this->size(), NV);
632 }
633
634 void reserve(size_type N) {
635 if (this->capacity() < N)
636 this->grow(N);
637 }
638
639 void pop_back_n(size_type NumItems) {
640 assert(this->size() >= NumItems)(static_cast <bool> (this->size() >= NumItems) ? void
(0) : __assert_fail ("this->size() >= NumItems", "llvm/include/llvm/ADT/SmallVector.h"
, 640, __extension__ __PRETTY_FUNCTION__))
;
641 truncate(this->size() - NumItems);
642 }
643
644 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
645 T Result = ::std::move(this->back());
646 this->pop_back();
647 return Result;
648 }
649
650 void swap(SmallVectorImpl &RHS);
651
652 /// Add the specified range to the end of the SmallVector.
653 template <typename in_iter,
654 typename = std::enable_if_t<std::is_convertible<
655 typename std::iterator_traits<in_iter>::iterator_category,
656 std::input_iterator_tag>::value>>
657 void append(in_iter in_start, in_iter in_end) {
658 this->assertSafeToAddRange(in_start, in_end);
659 size_type NumInputs = std::distance(in_start, in_end);
660 this->reserve(this->size() + NumInputs);
661 this->uninitialized_copy(in_start, in_end, this->end());
662 this->set_size(this->size() + NumInputs);
663 }
664
665 /// Append \p NumInputs copies of \p Elt to the end.
666 void append(size_type NumInputs, ValueParamT Elt) {
667 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
668 std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr);
669 this->set_size(this->size() + NumInputs);
670 }
671
672 void append(std::initializer_list<T> IL) {
673 append(IL.begin(), IL.end());
674 }
675
676 void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); }
677
678 void assign(size_type NumElts, ValueParamT Elt) {
679 // Note that Elt could be an internal reference.
680 if (NumElts > this->capacity()) {
681 this->growAndAssign(NumElts, Elt);
682 return;
683 }
684
685 // Assign over existing elements.
686 std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt);
687 if (NumElts > this->size())
688 std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt);
689 else if (NumElts < this->size())
690 this->destroy_range(this->begin() + NumElts, this->end());
691 this->set_size(NumElts);
692 }
693
694 // FIXME: Consider assigning over existing elements, rather than clearing &
695 // re-initializing them - for all assign(...) variants.
696
697 template <typename in_iter,
698 typename = std::enable_if_t<std::is_convertible<
699 typename std::iterator_traits<in_iter>::iterator_category,
700 std::input_iterator_tag>::value>>
701 void assign(in_iter in_start, in_iter in_end) {
702 this->assertSafeToReferenceAfterClear(in_start, in_end);
703 clear();
704 append(in_start, in_end);
705 }
706
707 void assign(std::initializer_list<T> IL) {
708 clear();
709 append(IL);
710 }
711
712 void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); }
713
714 iterator erase(const_iterator CI) {
715 // Just cast away constness because this is a non-const member function.
716 iterator I = const_cast<iterator>(CI);
717
718 assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(CI) &&
"Iterator to erase is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(CI) && \"Iterator to erase is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 718, __extension__ __PRETTY_FUNCTION__
))
;
719
720 iterator N = I;
721 // Shift all elts down one.
722 std::move(I+1, this->end(), I);
723 // Drop the last elt.
724 this->pop_back();
725 return(N);
726 }
727
728 iterator erase(const_iterator CS, const_iterator CE) {
729 // Just cast away constness because this is a non-const member function.
730 iterator S = const_cast<iterator>(CS);
731 iterator E = const_cast<iterator>(CE);
732
733 assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.")(static_cast <bool> (this->isRangeInStorage(S, E) &&
"Range to erase is out of bounds.") ? void (0) : __assert_fail
("this->isRangeInStorage(S, E) && \"Range to erase is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 733, __extension__ __PRETTY_FUNCTION__
))
;
734
735 iterator N = S;
736 // Shift all elts down.
737 iterator I = std::move(E, this->end(), S);
738 // Drop the last elts.
739 this->destroy_range(I, this->end());
740 this->set_size(I - this->begin());
741 return(N);
742 }
743
744private:
745 template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) {
746 // Callers ensure that ArgType is derived from T.
747 static_assert(
748 std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>,
749 T>::value,
750 "ArgType must be derived from T!");
751
752 if (I == this->end()) { // Important special case for empty vector.
753 this->push_back(::std::forward<ArgType>(Elt));
754 return this->end()-1;
755 }
756
757 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 757, __extension__ __PRETTY_FUNCTION__
))
;
758
759 // Grow if necessary.
760 size_t Index = I - this->begin();
761 std::remove_reference_t<ArgType> *EltPtr =
762 this->reserveForParamAndGetAddress(Elt);
763 I = this->begin() + Index;
764
765 ::new ((void*) this->end()) T(::std::move(this->back()));
766 // Push everything else over.
767 std::move_backward(I, this->end()-1, this->end());
768 this->set_size(this->size() + 1);
769
770 // If we just moved the element we're inserting, be sure to update
771 // the reference (never happens if TakesParamByValue).
772 static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value,
773 "ArgType must be 'T' when taking by value!");
774 if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
775 ++EltPtr;
776
777 *I = ::std::forward<ArgType>(*EltPtr);
778 return I;
779 }
780
781public:
782 iterator insert(iterator I, T &&Elt) {
783 return insert_one_impl(I, this->forward_value_param(std::move(Elt)));
784 }
785
786 iterator insert(iterator I, const T &Elt) {
787 return insert_one_impl(I, this->forward_value_param(Elt));
788 }
789
790 iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) {
791 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
792 size_t InsertElt = I - this->begin();
793
794 if (I == this->end()) { // Important special case for empty vector.
795 append(NumToInsert, Elt);
796 return this->begin()+InsertElt;
797 }
798
799 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 799, __extension__ __PRETTY_FUNCTION__
))
;
800
801 // Ensure there is enough space, and get the (maybe updated) address of
802 // Elt.
803 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);
804
805 // Uninvalidate the iterator.
806 I = this->begin()+InsertElt;
807
808 // If there are more elements between the insertion point and the end of the
809 // range than there are being inserted, we can use a simple approach to
810 // insertion. Since we already reserved space, we know that this won't
811 // reallocate the vector.
812 if (size_t(this->end()-I) >= NumToInsert) {
813 T *OldEnd = this->end();
814 append(std::move_iterator<iterator>(this->end() - NumToInsert),
815 std::move_iterator<iterator>(this->end()));
816
817 // Copy the existing elements that get replaced.
818 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
819
820 // If we just moved the element we're inserting, be sure to update
821 // the reference (never happens if TakesParamByValue).
822 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
823 EltPtr += NumToInsert;
824
825 std::fill_n(I, NumToInsert, *EltPtr);
826 return I;
827 }
828
829 // Otherwise, we're inserting more elements than exist already, and we're
830 // not inserting at the end.
831
832 // Move over the elements that we're about to overwrite.
833 T *OldEnd = this->end();
834 this->set_size(this->size() + NumToInsert);
835 size_t NumOverwritten = OldEnd-I;
836 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
837
838 // If we just moved the element we're inserting, be sure to update
839 // the reference (never happens if TakesParamByValue).
840 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
841 EltPtr += NumToInsert;
842
843 // Replace the overwritten part.
844 std::fill_n(I, NumOverwritten, *EltPtr);
845
846 // Insert the non-overwritten middle part.
847 std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
848 return I;
849 }
850
851 template <typename ItTy,
852 typename = std::enable_if_t<std::is_convertible<
853 typename std::iterator_traits<ItTy>::iterator_category,
854 std::input_iterator_tag>::value>>
855 iterator insert(iterator I, ItTy From, ItTy To) {
856 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
857 size_t InsertElt = I - this->begin();
858
859 if (I == this->end()) { // Important special case for empty vector.
860 append(From, To);
861 return this->begin()+InsertElt;
862 }
863
864 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 864, __extension__ __PRETTY_FUNCTION__
))
;
865
866 // Check that the reserve that follows doesn't invalidate the iterators.
867 this->assertSafeToAddRange(From, To);
868
869 size_t NumToInsert = std::distance(From, To);
870
871 // Ensure there is enough space.
872 reserve(this->size() + NumToInsert);
873
874 // Uninvalidate the iterator.
875 I = this->begin()+InsertElt;
876
877 // If there are more elements between the insertion point and the end of the
878 // range than there are being inserted, we can use a simple approach to
879 // insertion. Since we already reserved space, we know that this won't
880 // reallocate the vector.
881 if (size_t(this->end()-I) >= NumToInsert) {
882 T *OldEnd = this->end();
883 append(std::move_iterator<iterator>(this->end() - NumToInsert),
884 std::move_iterator<iterator>(this->end()));
885
886 // Copy the existing elements that get replaced.
887 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
888
889 std::copy(From, To, I);
890 return I;
891 }
892
893 // Otherwise, we're inserting more elements than exist already, and we're
894 // not inserting at the end.
895
896 // Move over the elements that we're about to overwrite.
897 T *OldEnd = this->end();
898 this->set_size(this->size() + NumToInsert);
899 size_t NumOverwritten = OldEnd-I;
900 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
901
902 // Replace the overwritten part.
903 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
904 *J = *From;
905 ++J; ++From;
906 }
907
908 // Insert the non-overwritten middle part.
909 this->uninitialized_copy(From, To, OldEnd);
910 return I;
911 }
912
913 void insert(iterator I, std::initializer_list<T> IL) {
914 insert(I, IL.begin(), IL.end());
915 }
916
917 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
918 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
919 return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...);
920
921 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
922 this->set_size(this->size() + 1);
923 return this->back();
924 }
925
926 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
927
928 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
929
930 bool operator==(const SmallVectorImpl &RHS) const {
931 if (this->size() != RHS.size()) return false;
932 return std::equal(this->begin(), this->end(), RHS.begin());
933 }
934 bool operator!=(const SmallVectorImpl &RHS) const {
935 return !(*this == RHS);
936 }
937
938 bool operator<(const SmallVectorImpl &RHS) const {
939 return std::lexicographical_compare(this->begin(), this->end(),
940 RHS.begin(), RHS.end());
941 }
942};
943
944template <typename T>
945void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
946 if (this == &RHS) return;
947
948 // We can only avoid copying elements if neither vector is small.
949 if (!this->isSmall() && !RHS.isSmall()) {
950 std::swap(this->BeginX, RHS.BeginX);
951 std::swap(this->Size, RHS.Size);
952 std::swap(this->Capacity, RHS.Capacity);
953 return;
954 }
955 this->reserve(RHS.size());
956 RHS.reserve(this->size());
957
958 // Swap the shared elements.
959 size_t NumShared = this->size();
960 if (NumShared > RHS.size()) NumShared = RHS.size();
961 for (size_type i = 0; i != NumShared; ++i)
962 std::swap((*this)[i], RHS[i]);
963
964 // Copy over the extra elts.
965 if (this->size() > RHS.size()) {
966 size_t EltDiff = this->size() - RHS.size();
967 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
968 RHS.set_size(RHS.size() + EltDiff);
969 this->destroy_range(this->begin()+NumShared, this->end());
970 this->set_size(NumShared);
971 } else if (RHS.size() > this->size()) {
972 size_t EltDiff = RHS.size() - this->size();
973 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
974 this->set_size(this->size() + EltDiff);
975 this->destroy_range(RHS.begin()+NumShared, RHS.end());
976 RHS.set_size(NumShared);
977 }
978}
979
980template <typename T>
981SmallVectorImpl<T> &SmallVectorImpl<T>::
982 operator=(const SmallVectorImpl<T> &RHS) {
983 // Avoid self-assignment.
984 if (this == &RHS) return *this;
985
986 // If we already have sufficient space, assign the common elements, then
987 // destroy any excess.
988 size_t RHSSize = RHS.size();
989 size_t CurSize = this->size();
990 if (CurSize >= RHSSize) {
991 // Assign common elements.
992 iterator NewEnd;
993 if (RHSSize)
994 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
995 else
996 NewEnd = this->begin();
997
998 // Destroy excess elements.
999 this->destroy_range(NewEnd, this->end());
1000
1001 // Trim.
1002 this->set_size(RHSSize);
1003 return *this;
1004 }
1005
1006 // If we have to grow to have enough elements, destroy the current elements.
1007 // This allows us to avoid copying them during the grow.
1008 // FIXME: don't do this if they're efficiently moveable.
1009 if (this->capacity() < RHSSize) {
1010 // Destroy current elements.
1011 this->clear();
1012 CurSize = 0;
1013 this->grow(RHSSize);
1014 } else if (CurSize) {
1015 // Otherwise, use assignment for the already-constructed elements.
1016 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
1017 }
1018
1019 // Copy construct the new elements in place.
1020 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
1021 this->begin()+CurSize);
1022
1023 // Set end.
1024 this->set_size(RHSSize);
1025 return *this;
1026}
1027
1028template <typename T>
1029SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
1030 // Avoid self-assignment.
1031 if (this == &RHS) return *this;
1032
1033 // If the RHS isn't small, clear this vector and then steal its buffer.
1034 if (!RHS.isSmall()) {
1035 this->destroy_range(this->begin(), this->end());
1036 if (!this->isSmall()) free(this->begin());
1037 this->BeginX = RHS.BeginX;
1038 this->Size = RHS.Size;
1039 this->Capacity = RHS.Capacity;
1040 RHS.resetToSmall();
1041 return *this;
1042 }
1043
1044 // If we already have sufficient space, assign the common elements, then
1045 // destroy any excess.
1046 size_t RHSSize = RHS.size();
1047 size_t CurSize = this->size();
1048 if (CurSize >= RHSSize) {
1049 // Assign common elements.
1050 iterator NewEnd = this->begin();
1051 if (RHSSize)
1052 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
1053
1054 // Destroy excess elements and trim the bounds.
1055 this->destroy_range(NewEnd, this->end());
1056 this->set_size(RHSSize);
1057
1058 // Clear the RHS.
1059 RHS.clear();
1060
1061 return *this;
1062 }
1063
1064 // If we have to grow to have enough elements, destroy the current elements.
1065 // This allows us to avoid copying them during the grow.
1066 // FIXME: this may not actually make any sense if we can efficiently move
1067 // elements.
1068 if (this->capacity() < RHSSize) {
1069 // Destroy current elements.
1070 this->clear();
1071 CurSize = 0;
1072 this->grow(RHSSize);
1073 } else if (CurSize) {
1074 // Otherwise, use assignment for the already-constructed elements.
1075 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
1076 }
1077
1078 // Move-construct the new elements in place.
1079 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
1080 this->begin()+CurSize);
1081
1082 // Set end.
1083 this->set_size(RHSSize);
1084
1085 RHS.clear();
1086 return *this;
1087}
1088
1089/// Storage for the SmallVector elements. This is specialized for the N=0 case
1090/// to avoid allocating unnecessary storage.
1091template <typename T, unsigned N>
1092struct SmallVectorStorage {
1093 alignas(T) char InlineElts[N * sizeof(T)];
1094};
1095
1096/// We need the storage to be properly aligned even for small-size of 0 so that
1097/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
1098/// well-defined.
1099template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};
1100
1101/// Forward declaration of SmallVector so that
1102/// calculateSmallVectorDefaultInlinedElements can reference
1103/// `sizeof(SmallVector<T, 0>)`.
1104template <typename T, unsigned N> class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector;
1105
1106/// Helper class for calculating the default number of inline elements for
1107/// `SmallVector<T>`.
1108///
1109/// This should be migrated to a constexpr function when our minimum
1110/// compiler support is enough for multi-statement constexpr functions.
1111template <typename T> struct CalculateSmallVectorDefaultInlinedElements {
1112 // Parameter controlling the default number of inlined elements
1113 // for `SmallVector<T>`.
1114 //
1115 // The default number of inlined elements ensures that
1116 // 1. There is at least one inlined element.
1117 // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless
1118 // it contradicts 1.
1119 static constexpr size_t kPreferredSmallVectorSizeof = 64;
1120
1121 // static_assert that sizeof(T) is not "too big".
1122 //
1123 // Because our policy guarantees at least one inlined element, it is possible
1124 // for an arbitrarily large inlined element to allocate an arbitrarily large
1125 // amount of inline storage. We generally consider it an antipattern for a
1126 // SmallVector to allocate an excessive amount of inline storage, so we want
1127 // to call attention to these cases and make sure that users are making an
1128 // intentional decision if they request a lot of inline storage.
1129 //
1130 // We want this assertion to trigger in pathological cases, but otherwise
1131 // not be too easy to hit. To accomplish that, the cutoff is actually somewhat
1132 // larger than kPreferredSmallVectorSizeof (otherwise,
1133 // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that
1134 // pattern seems useful in practice).
1135 //
1136 // One wrinkle is that this assertion is in theory non-portable, since
1137 // sizeof(T) is in general platform-dependent. However, we don't expect this
1138 // to be much of an issue, because most LLVM development happens on 64-bit
1139 // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for
1140 // 32-bit hosts, dodging the issue. The reverse situation, where development
1141 // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a
1142 // 64-bit host, is expected to be very rare.
1143 static_assert(
1144 sizeof(T) <= 256,
1145 "You are trying to use a default number of inlined elements for "
1146 "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
1147 "explicit number of inlined elements with `SmallVector<T, N>` to make "
1148 "sure you really want that much inline storage.");
1149
1150 // Discount the size of the header itself when calculating the maximum inline
1151 // bytes.
1152 static constexpr size_t PreferredInlineBytes =
1153 kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>);
1154 static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T);
1155 static constexpr size_t value =
1156 NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
1157};
1158
1159/// This is a 'vector' (really, a variable-sized array), optimized
1160/// for the case when the array is small. It contains some number of elements
1161/// in-place, which allows it to avoid heap allocation when the actual number of
1162/// elements is below that threshold. This allows normal "small" cases to be
1163/// fast without losing generality for large inputs.
1164///
1165/// \note
1166/// In the absence of a well-motivated choice for the number of inlined
1167/// elements \p N, it is recommended to use \c SmallVector<T> (that is,
1168/// omitting the \p N). This will choose a default number of inlined elements
1169/// reasonable for allocation on the stack (for example, trying to keep \c
1170/// sizeof(SmallVector<T>) around 64 bytes).
1171///
1172/// \warning This does not attempt to be exception safe.
1173///
1174/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
1175template <typename T,
1176 unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value>
1177class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>,
1178 SmallVectorStorage<T, N> {
1179public:
1180 SmallVector() : SmallVectorImpl<T>(N) {}
1181
1182 ~SmallVector() {
1183 // Destroy the constructed elements in the vector.
1184 this->destroy_range(this->begin(), this->end());
1185 }
1186
1187 explicit SmallVector(size_t Size, const T &Value = T())
1188 : SmallVectorImpl<T>(N) {
1189 this->assign(Size, Value);
1190 }
1191
1192 template <typename ItTy,
1193 typename = std::enable_if_t<std::is_convertible<
1194 typename std::iterator_traits<ItTy>::iterator_category,
1195 std::input_iterator_tag>::value>>
1196 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
1197 this->append(S, E);
1198 }
1199
1200 template <typename RangeTy>
1201 explicit SmallVector(const iterator_range<RangeTy> &R)
1202 : SmallVectorImpl<T>(N) {
1203 this->append(R.begin(), R.end());
1204 }
1205
1206 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
1207 this->assign(IL);
1208 }
1209
1210 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
1211 if (!RHS.empty())
1212 SmallVectorImpl<T>::operator=(RHS);
1213 }
1214
1215 SmallVector &operator=(const SmallVector &RHS) {
1216 SmallVectorImpl<T>::operator=(RHS);
1217 return *this;
1218 }
1219
1220 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
1221 if (!RHS.empty())
1222 SmallVectorImpl<T>::operator=(::std::move(RHS));
1223 }
1224
1225 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
1226 if (!RHS.empty())
1227 SmallVectorImpl<T>::operator=(::std::move(RHS));
1228 }
1229
1230 SmallVector &operator=(SmallVector &&RHS) {
1231 SmallVectorImpl<T>::operator=(::std::move(RHS));
1232 return *this;
1233 }
1234
1235 SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
1236 SmallVectorImpl<T>::operator=(::std::move(RHS));
1237 return *this;
1238 }
1239
1240 SmallVector &operator=(std::initializer_list<T> IL) {
1241 this->assign(IL);
1242 return *this;
1243 }
1244};
1245
1246template <typename T, unsigned N>
1247inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
1248 return X.capacity_in_bytes();
1249}
1250
1251template <typename RangeType>
1252using ValueTypeFromRangeType =
1253 typename std::remove_const<typename std::remove_reference<
1254 decltype(*std::begin(std::declval<RangeType &>()))>::type>::type;
1255
1256/// Given a range of type R, iterate the entire range and return a
1257/// SmallVector with elements of the vector. This is useful, for example,
1258/// when you want to iterate a range and then sort the results.
1259template <unsigned Size, typename R>
1260SmallVector<ValueTypeFromRangeType<R>, Size> to_vector(R &&Range) {
1261 return {std::begin(Range), std::end(Range)};
1262}
1263template <typename R>
1264SmallVector<ValueTypeFromRangeType<R>,
1265 CalculateSmallVectorDefaultInlinedElements<
1266 ValueTypeFromRangeType<R>>::value>
1267to_vector(R &&Range) {
1268 return {std::begin(Range), std::end(Range)};
1269}
1270
1271} // end namespace llvm
1272
1273namespace std {
1274
1275 /// Implement std::swap in terms of SmallVector swap.
1276 template<typename T>
1277 inline void
1278 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
1279 LHS.swap(RHS);
1280 }
1281
1282 /// Implement std::swap in terms of SmallVector swap.
1283 template<typename T, unsigned N>
1284 inline void
1285 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
1286 LHS.swap(RHS);
1287 }
1288
1289} // end namespace std
1290
1291#endif // LLVM_ADT_SMALLVECTOR_H