Bug Summary

File:llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
Warning:line 384, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUPromoteAlloca.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/build-llvm/lib/Target/AMDGPU -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-11-29-190409-37574-1 -x c++ /build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

1//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass eliminates allocas by either converting them into vectors or
10// by migrating them to local address space.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "AMDGPUSubtarget.h"
16#include "Utils/AMDGPUBaseInfo.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/Triple.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/Analysis/CaptureTracking.h"
24#include "llvm/Analysis/ValueTracking.h"
25#include "llvm/CodeGen/TargetPassConfig.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/BasicBlock.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/GlobalVariable.h"
35#include "llvm/IR/IRBuilder.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/IntrinsicsAMDGPU.h"
41#include "llvm/IR/IntrinsicsR600.h"
42#include "llvm/IR/LLVMContext.h"
43#include "llvm/IR/Metadata.h"
44#include "llvm/IR/Module.h"
45#include "llvm/IR/Type.h"
46#include "llvm/IR/User.h"
47#include "llvm/IR/Value.h"
48#include "llvm/Pass.h"
49#include "llvm/Support/Casting.h"
50#include "llvm/Support/Debug.h"
51#include "llvm/Support/ErrorHandling.h"
52#include "llvm/Support/MathExtras.h"
53#include "llvm/Support/raw_ostream.h"
54#include "llvm/Target/TargetMachine.h"
55#include <algorithm>
56#include <cassert>
57#include <cstdint>
58#include <map>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63#define DEBUG_TYPE"amdgpu-promote-alloca" "amdgpu-promote-alloca"
64
65using namespace llvm;
66
67namespace {
68
69static cl::opt<bool> DisablePromoteAllocaToVector(
70 "disable-promote-alloca-to-vector",
71 cl::desc("Disable promote alloca to vector"),
72 cl::init(false));
73
74static cl::opt<bool> DisablePromoteAllocaToLDS(
75 "disable-promote-alloca-to-lds",
76 cl::desc("Disable promote alloca to LDS"),
77 cl::init(false));
78
79static cl::opt<unsigned> PromoteAllocaToVectorLimit(
80 "amdgpu-promote-alloca-to-vector-limit",
81 cl::desc("Maximum byte size to consider promote alloca to vector"),
82 cl::init(0));
83
84// FIXME: This can create globals so should be a module pass.
85class AMDGPUPromoteAlloca : public FunctionPass {
86private:
87 const TargetMachine *TM;
88 Module *Mod = nullptr;
89 const DataLayout *DL = nullptr;
90
91 // FIXME: This should be per-kernel.
92 uint32_t LocalMemLimit = 0;
93 uint32_t CurrentLocalMemUsage = 0;
94 unsigned MaxVGPRs;
95
96 bool IsAMDGCN = false;
97 bool IsAMDHSA = false;
98
99 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
100 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
101
102 /// BaseAlloca is the alloca root the search started from.
103 /// Val may be that alloca or a recursive user of it.
104 bool collectUsesWithPtrTypes(Value *BaseAlloca,
105 Value *Val,
106 std::vector<Value*> &WorkList) const;
107
108 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
109 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
110 /// Returns true if both operands are derived from the same alloca. Val should
111 /// be the same value as one of the input operands of UseInst.
112 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
113 Instruction *UseInst,
114 int OpIdx0, int OpIdx1) const;
115
116 /// Check whether we have enough local memory for promotion.
117 bool hasSufficientLocalMem(const Function &F);
118
119public:
120 static char ID;
121
122 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
123
124 bool doInitialization(Module &M) override;
125 bool runOnFunction(Function &F) override;
126
127 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
128
129 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
130
131 void getAnalysisUsage(AnalysisUsage &AU) const override {
132 AU.setPreservesCFG();
133 FunctionPass::getAnalysisUsage(AU);
134 }
135};
136
137class AMDGPUPromoteAllocaToVector : public FunctionPass {
138private:
139 unsigned MaxVGPRs;
140
141public:
142 static char ID;
143
144 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
145
146 bool runOnFunction(Function &F) override;
147
148 StringRef getPassName() const override {
149 return "AMDGPU Promote Alloca to vector";
150 }
151
152 bool handleAlloca(AllocaInst &I);
153
154 void getAnalysisUsage(AnalysisUsage &AU) const override {
155 AU.setPreservesCFG();
156 FunctionPass::getAnalysisUsage(AU);
157 }
158};
159
160} // end anonymous namespace
161
162char AMDGPUPromoteAlloca::ID = 0;
163char AMDGPUPromoteAllocaToVector::ID = 0;
164
165INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
166 "AMDGPU promote alloca to vector or LDS", false, false)static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
167
168INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
169 "AMDGPU promote alloca to vector", false, false)static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
170
171char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
172char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
173
174bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
175 Mod = &M;
176 DL = &Mod->getDataLayout();
177
178 return false;
179}
180
181bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
182 if (skipFunction(F))
183 return false;
184
185 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
186 TM = &TPC->getTM<TargetMachine>();
187 else
188 return false;
189
190 const Triple &TT = TM->getTargetTriple();
191 IsAMDGCN = TT.getArch() == Triple::amdgcn;
192 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
193
194 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
195 if (!ST.isPromoteAllocaEnabled())
196 return false;
197
198 if (IsAMDGCN) {
199 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(F);
200 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
201 } else {
202 MaxVGPRs = 128;
203 }
204
205 bool SufficientLDS = hasSufficientLocalMem(F);
206 bool Changed = false;
207 BasicBlock &EntryBB = *F.begin();
208
209 SmallVector<AllocaInst *, 16> Allocas;
210 for (Instruction &I : EntryBB) {
211 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
212 Allocas.push_back(AI);
213 }
214
215 for (AllocaInst *AI : Allocas) {
216 if (handleAlloca(*AI, SufficientLDS))
217 Changed = true;
218 }
219
220 return Changed;
221}
222
223std::pair<Value *, Value *>
224AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
225 const Function &F = *Builder.GetInsertBlock()->getParent();
226 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
227
228 if (!IsAMDHSA) {
229 Function *LocalSizeYFn
230 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
231 Function *LocalSizeZFn
232 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
233
234 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
235 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
236
237 ST.makeLIDRangeMetadata(LocalSizeY);
238 ST.makeLIDRangeMetadata(LocalSizeZ);
239
240 return std::make_pair(LocalSizeY, LocalSizeZ);
241 }
242
243 // We must read the size out of the dispatch pointer.
244 assert(IsAMDGCN)((IsAMDGCN) ? static_cast<void> (0) : __assert_fail ("IsAMDGCN"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 244, __PRETTY_FUNCTION__))
;
245
246 // We are indexing into this struct, and want to extract the workgroup_size_*
247 // fields.
248 //
249 // typedef struct hsa_kernel_dispatch_packet_s {
250 // uint16_t header;
251 // uint16_t setup;
252 // uint16_t workgroup_size_x ;
253 // uint16_t workgroup_size_y;
254 // uint16_t workgroup_size_z;
255 // uint16_t reserved0;
256 // uint32_t grid_size_x ;
257 // uint32_t grid_size_y ;
258 // uint32_t grid_size_z;
259 //
260 // uint32_t private_segment_size;
261 // uint32_t group_segment_size;
262 // uint64_t kernel_object;
263 //
264 // #ifdef HSA_LARGE_MODEL
265 // void *kernarg_address;
266 // #elif defined HSA_LITTLE_ENDIAN
267 // void *kernarg_address;
268 // uint32_t reserved1;
269 // #else
270 // uint32_t reserved1;
271 // void *kernarg_address;
272 // #endif
273 // uint64_t reserved2;
274 // hsa_signal_t completion_signal; // uint64_t wrapper
275 // } hsa_kernel_dispatch_packet_t
276 //
277 Function *DispatchPtrFn
278 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
279
280 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
281 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
282 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
283
284 // Size of the dispatch packet struct.
285 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64);
286
287 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
288 Value *CastDispatchPtr = Builder.CreateBitCast(
289 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
290
291 // We could do a single 64-bit load here, but it's likely that the basic
292 // 32-bit and extract sequence is already present, and it is probably easier
293 // to CSE this. The loads should be mergable later anyway.
294 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
295 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
296
297 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
298 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
299
300 MDNode *MD = MDNode::get(Mod->getContext(), None);
301 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
302 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
303 ST.makeLIDRangeMetadata(LoadZU);
304
305 // Extract y component. Upper half of LoadZU should be zero already.
306 Value *Y = Builder.CreateLShr(LoadXY, 16);
307
308 return std::make_pair(Y, LoadZU);
309}
310
311Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
312 const AMDGPUSubtarget &ST =
313 AMDGPUSubtarget::get(*TM, *Builder.GetInsertBlock()->getParent());
314 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
315
316 switch (N) {
317 case 0:
318 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
319 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
320 break;
321 case 1:
322 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
323 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
324 break;
325
326 case 2:
327 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
328 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
329 break;
330 default:
331 llvm_unreachable("invalid dimension")::llvm::llvm_unreachable_internal("invalid dimension", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 331)
;
332 }
333
334 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
335 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
336 ST.makeLIDRangeMetadata(CI);
337
338 return CI;
339}
340
341static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
342 return FixedVectorType::get(ArrayTy->getElementType(),
343 ArrayTy->getNumElements());
344}
345
346static Value *stripBitcasts(Value *V) {
347 while (Instruction *I = dyn_cast<Instruction>(V)) {
348 if (I->getOpcode() != Instruction::BitCast)
349 break;
350 V = I->getOperand(0);
351 }
352 return V;
353}
354
355static Value *
356calculateVectorIndex(Value *Ptr,
357 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
358 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(stripBitcasts(Ptr));
359 if (!GEP)
360 return nullptr;
361
362 auto I = GEPIdx.find(GEP);
363 return I == GEPIdx.end() ? nullptr : I->second;
364}
365
366static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
367 // FIXME we only support simple cases
368 if (GEP->getNumOperands() != 3)
369 return nullptr;
370
371 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
372 if (!I0 || !I0->isZero())
373 return nullptr;
374
375 return GEP->getOperand(2);
376}
377
378// Not an instruction handled below to turn into a vector.
379//
380// TODO: Check isTriviallyVectorizable for calls and handle other
381// instructions.
382static bool canVectorizeInst(Instruction *Inst, User *User,
383 const DataLayout &DL) {
384 switch (Inst->getOpcode()) {
49
Called C++ object pointer is null
385 case Instruction::Load: {
386 // Currently only handle the case where the Pointer Operand is a GEP.
387 // Also we could not vectorize volatile or atomic loads.
388 LoadInst *LI = cast<LoadInst>(Inst);
389 if (isa<AllocaInst>(User) &&
390 LI->getPointerOperandType() == User->getType() &&
391 isa<VectorType>(LI->getType()))
392 return true;
393
394 Instruction *PtrInst = dyn_cast<Instruction>(LI->getPointerOperand());
395 if (!PtrInst)
396 return false;
397
398 return (PtrInst->getOpcode() == Instruction::GetElementPtr ||
399 PtrInst->getOpcode() == Instruction::BitCast) &&
400 LI->isSimple();
401 }
402 case Instruction::BitCast:
403 return true;
404 case Instruction::Store: {
405 // Must be the stored pointer operand, not a stored value, plus
406 // since it should be canonical form, the User should be a GEP.
407 // Also we could not vectorize volatile or atomic stores.
408 StoreInst *SI = cast<StoreInst>(Inst);
409 if (isa<AllocaInst>(User) &&
410 SI->getPointerOperandType() == User->getType() &&
411 isa<VectorType>(SI->getValueOperand()->getType()))
412 return true;
413
414 Instruction *UserInst = dyn_cast<Instruction>(User);
415 if (!UserInst)
416 return false;
417
418 return (SI->getPointerOperand() == User) &&
419 (UserInst->getOpcode() == Instruction::GetElementPtr ||
420 UserInst->getOpcode() == Instruction::BitCast) &&
421 SI->isSimple();
422 }
423 default:
424 return false;
425 }
426}
427
428static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL,
429 unsigned MaxVGPRs) {
430
431 if (DisablePromoteAllocaToVector) {
20
Assuming the condition is false
21
Taking false branch
432 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Promotion alloca to vector is disabled\n"
; } } while (false)
;
433 return false;
434 }
435
436 Type *AllocaTy = Alloca->getAllocatedType();
437 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
22
Assuming 'AllocaTy' is not a 'FixedVectorType'
438 if (auto *ArrayTy
23.1
'ArrayTy' is non-null
23.1
'ArrayTy' is non-null
= dyn_cast<ArrayType>(AllocaTy)) {
23
Assuming 'AllocaTy' is a 'ArrayType'
24
Taking true branch
439 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
25
Assuming the condition is true
27
Taking true branch
440 ArrayTy->getNumElements() > 0)
26
Assuming the condition is true
441 VectorTy = arrayTypeToVecType(ArrayTy);
442 }
443
444 // Use up to 1/4 of available register budget for vectorization.
445 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
28
Assuming the condition is false
29
'?' condition is false
446 : (MaxVGPRs * 32);
447
448 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
30
Assuming the condition is false
31
Taking false branch
449 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
450 << MaxVGPRs << " registers available\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
;
451 return false;
452 }
453
454 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Alloca candidate for vectorization\n"
; } } while (false)
;
32
Assuming 'DebugFlag' is false
33
Loop condition is false. Exiting loop
455
456 // FIXME: There is no reason why we can't support larger arrays, we
457 // are just being conservative for now.
458 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
459 // could also be promoted but we don't currently handle this case
460 if (!VectorTy || VectorTy->getNumElements() > 16 ||
34
Assuming 'VectorTy' is non-null
35
Assuming the condition is false
37
Taking false branch
461 VectorTy->getNumElements() < 2) {
36
Assuming the condition is false
462 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot convert type to vector\n"
; } } while (false)
;
463 return false;
464 }
465
466 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
467 std::vector<Value *> WorkList;
468 SmallVector<User *, 8> Users(Alloca->users());
469 SmallVector<User *, 8> UseUsers(Users.size(), Alloca);
470 Type *VecEltTy = VectorTy->getElementType();
471 while (!Users.empty()) {
38
Calling 'SmallVectorBase::empty'
41
Returning from 'SmallVectorBase::empty'
42
Loop condition is true. Entering loop body
472 User *AllocaUser = Users.pop_back_val();
473 User *UseUser = UseUsers.pop_back_val();
474 Instruction *Inst = dyn_cast<Instruction>(AllocaUser);
43
Assuming 'AllocaUser' is not a 'Instruction'
44
'Inst' initialized to a null pointer value
475
476 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
45
Assuming 'AllocaUser' is not a 'GetElementPtrInst'
477 if (!GEP
45.1
'GEP' is null
45.1
'GEP' is null
) {
46
Taking true branch
478 if (!canVectorizeInst(Inst, UseUser, DL))
47
Passing null pointer value via 1st parameter 'Inst'
48
Calling 'canVectorizeInst'
479 return false;
480
481 if (Inst->getOpcode() == Instruction::BitCast) {
482 Type *FromTy = Inst->getOperand(0)->getType()->getPointerElementType();
483 Type *ToTy = Inst->getType()->getPointerElementType();
484 if (FromTy->isAggregateType() || ToTy->isAggregateType() ||
485 DL.getTypeSizeInBits(FromTy) != DL.getTypeSizeInBits(ToTy))
486 continue;
487
488 for (User *CastUser : Inst->users()) {
489 if (isAssumeLikeIntrinsic(cast<Instruction>(CastUser)))
490 continue;
491 Users.push_back(CastUser);
492 UseUsers.push_back(Inst);
493 }
494
495 continue;
496 }
497
498 WorkList.push_back(AllocaUser);
499 continue;
500 }
501
502 Value *Index = GEPToVectorIndex(GEP);
503
504 // If we can't compute a vector index from this GEP, then we can't
505 // promote this alloca to vector.
506 if (!Index) {
507 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEPdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
508 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
;
509 return false;
510 }
511
512 GEPVectorIdx[GEP] = Index;
513 Users.append(GEP->user_begin(), GEP->user_end());
514 UseUsers.append(GEP->getNumUses(), GEP);
515 }
516
517 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
518 << *VectorTy << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
;
519
520 for (Value *V : WorkList) {
521 Instruction *Inst = cast<Instruction>(V);
522 IRBuilder<> Builder(Inst);
523 switch (Inst->getOpcode()) {
524 case Instruction::Load: {
525 if (Inst->getType() == AllocaTy || Inst->getType()->isVectorTy())
526 break;
527
528 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
529 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
530 if (!Index)
531 break;
532
533 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
534 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
535 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
536 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
537 if (Inst->getType() != VecEltTy)
538 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
539 Inst->replaceAllUsesWith(ExtractElement);
540 Inst->eraseFromParent();
541 break;
542 }
543 case Instruction::Store: {
544 StoreInst *SI = cast<StoreInst>(Inst);
545 if (SI->getValueOperand()->getType() == AllocaTy ||
546 SI->getValueOperand()->getType()->isVectorTy())
547 break;
548
549 Value *Ptr = SI->getPointerOperand();
550 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
551 if (!Index)
552 break;
553
554 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
555 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
556 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
557 Value *Elt = SI->getValueOperand();
558 if (Elt->getType() != VecEltTy)
559 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
560 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
561 Builder.CreateStore(NewVecValue, BitCast);
562 Inst->eraseFromParent();
563 break;
564 }
565
566 default:
567 llvm_unreachable("Inconsistency in instructions promotable to vector")::llvm::llvm_unreachable_internal("Inconsistency in instructions promotable to vector"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 567)
;
568 }
569 }
570 return true;
571}
572
573static bool isCallPromotable(CallInst *CI) {
574 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
575 if (!II)
576 return false;
577
578 switch (II->getIntrinsicID()) {
579 case Intrinsic::memcpy:
580 case Intrinsic::memmove:
581 case Intrinsic::memset:
582 case Intrinsic::lifetime_start:
583 case Intrinsic::lifetime_end:
584 case Intrinsic::invariant_start:
585 case Intrinsic::invariant_end:
586 case Intrinsic::launder_invariant_group:
587 case Intrinsic::strip_invariant_group:
588 case Intrinsic::objectsize:
589 return true;
590 default:
591 return false;
592 }
593}
594
595bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
596 Value *Val,
597 Instruction *Inst,
598 int OpIdx0,
599 int OpIdx1) const {
600 // Figure out which operand is the one we might not be promoting.
601 Value *OtherOp = Inst->getOperand(OpIdx0);
602 if (Val == OtherOp)
603 OtherOp = Inst->getOperand(OpIdx1);
604
605 if (isa<ConstantPointerNull>(OtherOp))
606 return true;
607
608 Value *OtherObj = getUnderlyingObject(OtherOp);
609 if (!isa<AllocaInst>(OtherObj))
610 return false;
611
612 // TODO: We should be able to replace undefs with the right pointer type.
613
614 // TODO: If we know the other base object is another promotable
615 // alloca, not necessarily this alloca, we can do this. The
616 // important part is both must have the same address space at
617 // the end.
618 if (OtherObj != BaseAlloca) {
619 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
620 dbgs() << "Found a binary instruction with another alloca object\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
;
621 return false;
622 }
623
624 return true;
625}
626
627bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
628 Value *BaseAlloca,
629 Value *Val,
630 std::vector<Value*> &WorkList) const {
631
632 for (User *User : Val->users()) {
633 if (is_contained(WorkList, User))
634 continue;
635
636 if (CallInst *CI = dyn_cast<CallInst>(User)) {
637 if (!isCallPromotable(CI))
638 return false;
639
640 WorkList.push_back(User);
641 continue;
642 }
643
644 Instruction *UseInst = cast<Instruction>(User);
645 if (UseInst->getOpcode() == Instruction::PtrToInt)
646 return false;
647
648 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
649 if (LI->isVolatile())
650 return false;
651
652 continue;
653 }
654
655 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
656 if (SI->isVolatile())
657 return false;
658
659 // Reject if the stored value is not the pointer operand.
660 if (SI->getPointerOperand() != Val)
661 return false;
662 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
663 if (RMW->isVolatile())
664 return false;
665 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
666 if (CAS->isVolatile())
667 return false;
668 }
669
670 // Only promote a select if we know that the other select operand
671 // is from another pointer that will also be promoted.
672 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
673 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
674 return false;
675
676 // May need to rewrite constant operands.
677 WorkList.push_back(ICmp);
678 }
679
680 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
681 // Give up if the pointer may be captured.
682 if (PointerMayBeCaptured(UseInst, true, true))
683 return false;
684 // Don't collect the users of this.
685 WorkList.push_back(User);
686 continue;
687 }
688
689 if (!User->getType()->isPointerTy())
690 continue;
691
692 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
693 // Be conservative if an address could be computed outside the bounds of
694 // the alloca.
695 if (!GEP->isInBounds())
696 return false;
697 }
698
699 // Only promote a select if we know that the other select operand is from
700 // another pointer that will also be promoted.
701 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
702 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
703 return false;
704 }
705
706 // Repeat for phis.
707 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
708 // TODO: Handle more complex cases. We should be able to replace loops
709 // over arrays.
710 switch (Phi->getNumIncomingValues()) {
711 case 1:
712 break;
713 case 2:
714 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
715 return false;
716 break;
717 default:
718 return false;
719 }
720 }
721
722 WorkList.push_back(User);
723 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
724 return false;
725 }
726
727 return true;
728}
729
730bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {
731
732 FunctionType *FTy = F.getFunctionType();
733 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
734
735 // If the function has any arguments in the local address space, then it's
736 // possible these arguments require the entire local memory space, so
737 // we cannot use local memory in the pass.
738 for (Type *ParamTy : FTy->params()) {
739 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
740 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
741 LocalMemLimit = 0;
742 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
743 "local memory disabled.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
;
744 return false;
745 }
746 }
747
748 LocalMemLimit = ST.getLocalMemorySize();
749 if (LocalMemLimit == 0)
750 return false;
751
752 SmallVector<const Constant *, 16> Stack;
753 SmallPtrSet<const Constant *, 8> VisitedConstants;
754 SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
755
756 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
757 for (const User *U : Val->users()) {
758 if (const Instruction *Use = dyn_cast<Instruction>(U)) {
759 if (Use->getParent()->getParent() == &F)
760 return true;
761 } else {
762 const Constant *C = cast<Constant>(U);
763 if (VisitedConstants.insert(C).second)
764 Stack.push_back(C);
765 }
766 }
767
768 return false;
769 };
770
771 for (GlobalVariable &GV : Mod->globals()) {
772 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
773 continue;
774
775 if (visitUsers(&GV, &GV)) {
776 UsedLDS.insert(&GV);
777 Stack.clear();
778 continue;
779 }
780
781 // For any ConstantExpr uses, we need to recursively search the users until
782 // we see a function.
783 while (!Stack.empty()) {
784 const Constant *C = Stack.pop_back_val();
785 if (visitUsers(&GV, C)) {
786 UsedLDS.insert(&GV);
787 Stack.clear();
788 break;
789 }
790 }
791 }
792
793 const DataLayout &DL = Mod->getDataLayout();
794 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
795 AllocatedSizes.reserve(UsedLDS.size());
796
797 for (const GlobalVariable *GV : UsedLDS) {
798 Align Alignment =
799 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
800 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
801 AllocatedSizes.emplace_back(AllocSize, Alignment);
802 }
803
804 // Sort to try to estimate the worst case alignment padding
805 //
806 // FIXME: We should really do something to fix the addresses to a more optimal
807 // value instead
808 llvm::sort(AllocatedSizes.begin(), AllocatedSizes.end(),
809 [](std::pair<uint64_t, Align> LHS, std::pair<uint64_t, Align> RHS) {
810 return LHS.second < RHS.second;
811 });
812
813 // Check how much local memory is being used by global objects
814 CurrentLocalMemUsage = 0;
815
816 // FIXME: Try to account for padding here. The real padding and address is
817 // currently determined from the inverse order of uses in the function when
818 // legalizing, which could also potentially change. We try to estimate the
819 // worst case here, but we probably should fix the addresses earlier.
820 for (auto Alloc : AllocatedSizes) {
821 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
822 CurrentLocalMemUsage += Alloc.first;
823 }
824
825 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
826 F);
827
828 // Restrict local memory usage so that we don't drastically reduce occupancy,
829 // unless it is already significantly reduced.
830
831 // TODO: Have some sort of hint or other heuristics to guess occupancy based
832 // on other factors..
833 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
834 if (OccupancyHint == 0)
835 OccupancyHint = 7;
836
837 // Clamp to max value.
838 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
839
840 // Check the hint but ignore it if it's obviously wrong from the existing LDS
841 // usage.
842 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
843
844
845 // Round up to the next tier of usage.
846 unsigned MaxSizeWithWaveCount
847 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
848
849 // Program is possibly broken by using more local mem than available.
850 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
851 return false;
852
853 LocalMemLimit = MaxSizeWithWaveCount;
854
855 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsagedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
856 << " bytes of LDS\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
857 << " Rounding size to " << MaxSizeWithWaveCountdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
858 << " with a maximum occupancy of " << MaxOccupancy << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
859 << " and " << (LocalMemLimit - CurrentLocalMemUsage)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
860 << " available for promotion\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
;
861
862 return true;
863}
864
865// FIXME: Should try to pick the most likely to be profitable allocas first.
866bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
867 // Array allocations are probably not worth handling, since an allocation of
868 // the array type is the canonical form.
869 if (!I.isStaticAlloca() || I.isArrayAllocation())
870 return false;
871
872 const DataLayout &DL = Mod->getDataLayout();
873 IRBuilder<> Builder(&I);
874
875 // First try to replace the alloca with a vector
876 Type *AllocaTy = I.getAllocatedType();
877
878 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
879
880 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
881 return true; // Promoted to vector.
882
883 if (DisablePromoteAllocaToLDS)
884 return false;
885
886 const Function &ContainingFunction = *I.getParent()->getParent();
887 CallingConv::ID CC = ContainingFunction.getCallingConv();
888
889 // Don't promote the alloca to LDS for shader calling conventions as the work
890 // item ID intrinsics are not supported for these calling conventions.
891 // Furthermore not all LDS is available for some of the stages.
892 switch (CC) {
893 case CallingConv::AMDGPU_KERNEL:
894 case CallingConv::SPIR_KERNEL:
895 break;
896 default:
897 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
898 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
899 << " promote alloca to LDS not supported with calling convention.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
;
900 return false;
901 }
902
903 // Not likely to have sufficient local memory for promotion.
904 if (!SufficientLDS)
905 return false;
906
907 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, ContainingFunction);
908 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
909
910 Align Alignment =
911 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
912
913 // FIXME: This computed padding is likely wrong since it depends on inverse
914 // usage order.
915 //
916 // FIXME: It is also possible that if we're allowed to use all of the memory
917 // could could end up using more than the maximum due to alignment padding.
918
919 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
920 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
921 NewSize += AllocSize;
922
923 if (NewSize > LocalMemLimit) {
924 LLVM_DEBUG(dbgs() << " " << AllocSizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
925 << " bytes of local memory not available to promote\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
;
926 return false;
927 }
928
929 CurrentLocalMemUsage = NewSize;
930
931 std::vector<Value*> WorkList;
932
933 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
934 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Do not know how to convert all uses\n"
; } } while (false)
;
935 return false;
936 }
937
938 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Promoting alloca to local memory\n"
; } } while (false)
;
939
940 Function *F = I.getParent()->getParent();
941
942 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
943 GlobalVariable *GV = new GlobalVariable(
944 *Mod, GVTy, false, GlobalValue::InternalLinkage,
945 UndefValue::get(GVTy),
946 Twine(F->getName()) + Twine('.') + I.getName(),
947 nullptr,
948 GlobalVariable::NotThreadLocal,
949 AMDGPUAS::LOCAL_ADDRESS);
950 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
951 GV->setAlignment(MaybeAlign(I.getAlignment()));
952
953 Value *TCntY, *TCntZ;
954
955 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
956 Value *TIdX = getWorkitemID(Builder, 0);
957 Value *TIdY = getWorkitemID(Builder, 1);
958 Value *TIdZ = getWorkitemID(Builder, 2);
959
960 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
961 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
962 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
963 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
964 TID = Builder.CreateAdd(TID, TIdZ);
965
966 Value *Indices[] = {
967 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
968 TID
969 };
970
971 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
972 I.mutateType(Offset->getType());
973 I.replaceAllUsesWith(Offset);
974 I.eraseFromParent();
975
976 for (Value *V : WorkList) {
977 CallInst *Call = dyn_cast<CallInst>(V);
978 if (!Call) {
979 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
980 Value *Src0 = CI->getOperand(0);
981 Type *EltTy = Src0->getType()->getPointerElementType();
982 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
983
984 if (isa<ConstantPointerNull>(CI->getOperand(0)))
985 CI->setOperand(0, ConstantPointerNull::get(NewTy));
986
987 if (isa<ConstantPointerNull>(CI->getOperand(1)))
988 CI->setOperand(1, ConstantPointerNull::get(NewTy));
989
990 continue;
991 }
992
993 // The operand's value should be corrected on its own and we don't want to
994 // touch the users.
995 if (isa<AddrSpaceCastInst>(V))
996 continue;
997
998 Type *EltTy = V->getType()->getPointerElementType();
999 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
1000
1001 // FIXME: It doesn't really make sense to try to do this for all
1002 // instructions.
1003 V->mutateType(NewTy);
1004
1005 // Adjust the types of any constant operands.
1006 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1007 if (isa<ConstantPointerNull>(SI->getOperand(1)))
1008 SI->setOperand(1, ConstantPointerNull::get(NewTy));
1009
1010 if (isa<ConstantPointerNull>(SI->getOperand(2)))
1011 SI->setOperand(2, ConstantPointerNull::get(NewTy));
1012 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1013 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1014 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1015 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1016 }
1017 }
1018
1019 continue;
1020 }
1021
1022 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1023 Builder.SetInsertPoint(Intr);
1024 switch (Intr->getIntrinsicID()) {
1025 case Intrinsic::lifetime_start:
1026 case Intrinsic::lifetime_end:
1027 // These intrinsics are for address space 0 only
1028 Intr->eraseFromParent();
1029 continue;
1030 case Intrinsic::memcpy: {
1031 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
1032 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlign(),
1033 MemCpy->getRawSource(), MemCpy->getSourceAlign(),
1034 MemCpy->getLength(), MemCpy->isVolatile());
1035 Intr->eraseFromParent();
1036 continue;
1037 }
1038 case Intrinsic::memmove: {
1039 MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
1040 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlign(),
1041 MemMove->getRawSource(), MemMove->getSourceAlign(),
1042 MemMove->getLength(), MemMove->isVolatile());
1043 Intr->eraseFromParent();
1044 continue;
1045 }
1046 case Intrinsic::memset: {
1047 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1048 Builder.CreateMemSet(
1049 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(),
1050 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile());
1051 Intr->eraseFromParent();
1052 continue;
1053 }
1054 case Intrinsic::invariant_start:
1055 case Intrinsic::invariant_end:
1056 case Intrinsic::launder_invariant_group:
1057 case Intrinsic::strip_invariant_group:
1058 Intr->eraseFromParent();
1059 // FIXME: I think the invariant marker should still theoretically apply,
1060 // but the intrinsics need to be changed to accept pointers with any
1061 // address space.
1062 continue;
1063 case Intrinsic::objectsize: {
1064 Value *Src = Intr->getOperand(0);
1065 Type *SrcTy = Src->getType()->getPointerElementType();
1066 Function *ObjectSize = Intrinsic::getDeclaration(Mod,
1067 Intrinsic::objectsize,
1068 { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) }
1069 );
1070
1071 CallInst *NewCall = Builder.CreateCall(
1072 ObjectSize,
1073 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1074 Intr->replaceAllUsesWith(NewCall);
1075 Intr->eraseFromParent();
1076 continue;
1077 }
1078 default:
1079 Intr->print(errs());
1080 llvm_unreachable("Don't know how to promote alloca intrinsic use.")::llvm::llvm_unreachable_internal("Don't know how to promote alloca intrinsic use."
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 1080)
;
1081 }
1082 }
1083 return true;
1084}
1085
1086bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) {
1087 if (skipFunction(F) || DisablePromoteAllocaToVector)
1
Assuming the condition is false
2
Assuming the condition is false
3
Taking false branch
1088 return false;
1089
1090 const TargetMachine *TM;
1091 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
4
Assuming 'TPC' is non-null
5
Taking true branch
1092 TM = &TPC->getTM<TargetMachine>();
1093 else
1094 return false;
1095
1096 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
1097 if (!ST.isPromoteAllocaEnabled())
6
Assuming the condition is false
7
Taking false branch
1098 return false;
1099
1100 if (TM->getTargetTriple().getArch() == Triple::amdgcn) {
8
Assuming the condition is false
9
Taking false branch
1101 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(F);
1102 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1103 } else {
1104 MaxVGPRs = 128;
1105 }
1106
1107 bool Changed = false;
1108 BasicBlock &EntryBB = *F.begin();
1109
1110 SmallVector<AllocaInst *, 16> Allocas;
1111 for (Instruction &I : EntryBB) {
1112 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1113 Allocas.push_back(AI);
1114 }
1115
1116 for (AllocaInst *AI : Allocas) {
10
Assuming '__begin1' is not equal to '__end1'
1117 if (handleAlloca(*AI))
11
Taking false branch
12
Taking false branch
13
Calling 'AMDGPUPromoteAllocaToVector::handleAlloca'
1118 Changed = true;
1119 }
1120
1121 return Changed;
1122}
1123
1124bool AMDGPUPromoteAllocaToVector::handleAlloca(AllocaInst &I) {
1125 // Array allocations are probably not worth handling, since an allocation of
1126 // the array type is the canonical form.
1127 if (!I.isStaticAlloca() || I.isArrayAllocation())
14
Assuming the condition is false
15
Assuming the condition is false
16
Taking false branch
1128 return false;
1129
1130 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
17
Assuming 'DebugFlag' is false
18
Loop condition is false. Exiting loop
1131
1132 Module *Mod = I.getParent()->getParent()->getParent();
1133 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
19
Calling 'tryPromoteAllocaToVector'
1134}
1135
1136FunctionPass *llvm::createAMDGPUPromoteAlloca() {
1137 return new AMDGPUPromoteAlloca();
1138}
1139
1140FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
1141 return new AMDGPUPromoteAllocaToVector();
1142}

/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/ErrorHandling.h"
19#include "llvm/Support/MathExtras.h"
20#include "llvm/Support/MemAlloc.h"
21#include "llvm/Support/type_traits.h"
22#include <algorithm>
23#include <cassert>
24#include <cstddef>
25#include <cstdlib>
26#include <cstring>
27#include <initializer_list>
28#include <iterator>
29#include <limits>
30#include <memory>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37/// This is all the stuff common to all SmallVectors.
38///
39/// The template parameter specifies the type which should be used to hold the
40/// Size and Capacity of the SmallVector, so it can be adjusted.
41/// Using 32 bit size is desirable to shrink the size of the SmallVector.
42/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
43/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
44/// buffering bitcode output - which can exceed 4GB.
45template <class Size_T> class SmallVectorBase {
46protected:
47 void *BeginX;
48 Size_T Size = 0, Capacity;
49
50 /// The maximum value of the Size_T used.
51 static constexpr size_t SizeTypeMax() {
52 return std::numeric_limits<Size_T>::max();
53 }
54
55 SmallVectorBase() = delete;
56 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
57 : BeginX(FirstEl), Capacity(TotalCapacity) {}
58
59 /// This is an implementation of the grow() method which only works
60 /// on POD-like data types and is out of line to reduce code duplication.
61 /// This function will report a fatal error if it cannot increase capacity.
62 void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
63
64 /// Report that MinSize doesn't fit into this vector's size type. Throws
65 /// std::length_error or calls report_fatal_error.
66 LLVM_ATTRIBUTE_NORETURN__attribute__((noreturn)) static void report_size_overflow(size_t MinSize);
67 /// Report that this vector is already at maximum capacity. Throws
68 /// std::length_error or calls report_fatal_error.
69 LLVM_ATTRIBUTE_NORETURN__attribute__((noreturn)) static void report_at_maximum_capacity();
70
71public:
72 size_t size() const { return Size; }
73 size_t capacity() const { return Capacity; }
74
75 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
39
Assuming field 'Size' is not equal to 0
40
Returning zero, which participates in a condition later
76
77 /// Set the array size to \p N, which the current array must have enough
78 /// capacity for.
79 ///
80 /// This does not construct or destroy any elements in the vector.
81 ///
82 /// Clients can use this in conjunction with capacity() to write past the end
83 /// of the buffer when they know that more elements are available, and only
84 /// update the size later. This avoids the cost of value initializing elements
85 /// which will only be overwritten.
86 void set_size(size_t N) {
87 assert(N <= capacity())((N <= capacity()) ? static_cast<void> (0) : __assert_fail
("N <= capacity()", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 87, __PRETTY_FUNCTION__))
;
88 Size = N;
89 }
90};
91
92template <class T>
93using SmallVectorSizeType =
94 typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
95 uint32_t>::type;
96
97/// Figure out the offset of the first element.
98template <class T, typename = void> struct SmallVectorAlignmentAndSize {
99 alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
100 SmallVectorBase<SmallVectorSizeType<T>>)];
101 alignas(T) char FirstEl[sizeof(T)];
102};
103
104/// This is the part of SmallVectorTemplateBase which does not depend on whether
105/// the type T is a POD. The extra dummy template argument is used by ArrayRef
106/// to avoid unnecessarily requiring T to be complete.
107template <typename T, typename = void>
108class SmallVectorTemplateCommon
109 : public SmallVectorBase<SmallVectorSizeType<T>> {
110 using Base = SmallVectorBase<SmallVectorSizeType<T>>;
111
112 /// Find the address of the first element. For this pointer math to be valid
113 /// with small-size of 0 for T with lots of alignment, it's important that
114 /// SmallVectorStorage is properly-aligned even for small-size of 0.
115 void *getFirstEl() const {
116 return const_cast<void *>(reinterpret_cast<const void *>(
117 reinterpret_cast<const char *>(this) +
118 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
119 }
120 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
121
122protected:
123 SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
124
125 void grow_pod(size_t MinSize, size_t TSize) {
126 Base::grow_pod(getFirstEl(), MinSize, TSize);
127 }
128
129 /// Return true if this is a smallvector which has not had dynamic
130 /// memory allocated for it.
131 bool isSmall() const { return this->BeginX == getFirstEl(); }
132
133 /// Put this vector in a state of being small.
134 void resetToSmall() {
135 this->BeginX = getFirstEl();
136 this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
137 }
138
139 /// Return true unless Elt will be invalidated by resizing the vector to
140 /// NewSize.
141 bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
142 // Past the end.
143 if (LLVM_LIKELY(Elt < this->begin() || Elt >= this->end())__builtin_expect((bool)(Elt < this->begin() || Elt >=
this->end()), true)
)
144 return true;
145
146 // Return false if Elt will be destroyed by shrinking.
147 if (NewSize <= this->size())
148 return Elt < this->begin() + NewSize;
149
150 // Return false if we need to grow.
151 return NewSize <= this->capacity();
152 }
153
154 /// Check whether Elt will be invalidated by resizing the vector to NewSize.
155 void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
156 assert(isSafeToReferenceAfterResize(Elt, NewSize) &&((isSafeToReferenceAfterResize(Elt, NewSize) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? static_cast<void> (0) : __assert_fail
("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 158, __PRETTY_FUNCTION__))
157 "Attempting to reference an element of the vector in an operation "((isSafeToReferenceAfterResize(Elt, NewSize) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? static_cast<void> (0) : __assert_fail
("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 158, __PRETTY_FUNCTION__))
158 "that invalidates it")((isSafeToReferenceAfterResize(Elt, NewSize) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? static_cast<void> (0) : __assert_fail
("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 158, __PRETTY_FUNCTION__))
;
159 }
160
161 /// Check whether Elt will be invalidated by increasing the size of the
162 /// vector by N.
163 void assertSafeToAdd(const void *Elt, size_t N = 1) {
164 this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
165 }
166
167 /// Check whether any part of the range will be invalidated by clearing.
168 void assertSafeToReferenceAfterClear(const T *From, const T *To) {
169 if (From == To)
170 return;
171 this->assertSafeToReferenceAfterResize(From, 0);
172 this->assertSafeToReferenceAfterResize(To - 1, 0);
173 }
174 template <
175 class ItTy,
176 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
177 bool> = false>
178 void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
179
180 /// Check whether any part of the range will be invalidated by growing.
181 void assertSafeToAddRange(const T *From, const T *To) {
182 if (From == To)
183 return;
184 this->assertSafeToAdd(From, To - From);
185 this->assertSafeToAdd(To - 1, To - From);
186 }
187 template <
188 class ItTy,
189 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
190 bool> = false>
191 void assertSafeToAddRange(ItTy, ItTy) {}
192
193 /// Check whether any argument will be invalidated by growing for
194 /// emplace_back.
195 template <class ArgType1, class... ArgTypes>
196 void assertSafeToEmplace(ArgType1 &Arg1, ArgTypes &... Args) {
197 this->assertSafeToAdd(&Arg1);
198 this->assertSafeToEmplace(Args...);
199 }
200 void assertSafeToEmplace() {}
201
202public:
203 using size_type = size_t;
204 using difference_type = ptrdiff_t;
205 using value_type = T;
206 using iterator = T *;
207 using const_iterator = const T *;
208
209 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
210 using reverse_iterator = std::reverse_iterator<iterator>;
211
212 using reference = T &;
213 using const_reference = const T &;
214 using pointer = T *;
215 using const_pointer = const T *;
216
217 using Base::capacity;
218 using Base::empty;
219 using Base::size;
220
221 // forward iterator creation methods.
222 iterator begin() { return (iterator)this->BeginX; }
223 const_iterator begin() const { return (const_iterator)this->BeginX; }
224 iterator end() { return begin() + size(); }
225 const_iterator end() const { return begin() + size(); }
226
227 // reverse iterator creation methods.
228 reverse_iterator rbegin() { return reverse_iterator(end()); }
229 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
230 reverse_iterator rend() { return reverse_iterator(begin()); }
231 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
232
233 size_type size_in_bytes() const { return size() * sizeof(T); }
234 size_type max_size() const {
235 return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
236 }
237
238 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
239
240 /// Return a pointer to the vector's buffer, even if empty().
241 pointer data() { return pointer(begin()); }
242 /// Return a pointer to the vector's buffer, even if empty().
243 const_pointer data() const { return const_pointer(begin()); }
244
245 reference operator[](size_type idx) {
246 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 246, __PRETTY_FUNCTION__))
;
247 return begin()[idx];
248 }
249 const_reference operator[](size_type idx) const {
250 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 250, __PRETTY_FUNCTION__))
;
251 return begin()[idx];
252 }
253
254 reference front() {
255 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 255, __PRETTY_FUNCTION__))
;
256 return begin()[0];
257 }
258 const_reference front() const {
259 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 259, __PRETTY_FUNCTION__))
;
260 return begin()[0];
261 }
262
263 reference back() {
264 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 264, __PRETTY_FUNCTION__))
;
265 return end()[-1];
266 }
267 const_reference back() const {
268 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 268, __PRETTY_FUNCTION__))
;
269 return end()[-1];
270 }
271};
272
273/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
274/// method implementations that are designed to work with non-trivial T's.
275///
276/// We approximate is_trivially_copyable with trivial move/copy construction and
277/// trivial destruction. While the standard doesn't specify that you're allowed
278/// copy these types with memcpy, there is no way for the type to observe this.
279/// This catches the important case of std::pair<POD, POD>, which is not
280/// trivially assignable.
281template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
282 (is_trivially_move_constructible<T>::value) &&
283 std::is_trivially_destructible<T>::value>
284class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
285protected:
286 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
287
288 static void destroy_range(T *S, T *E) {
289 while (S != E) {
290 --E;
291 E->~T();
292 }
293 }
294
295 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
296 /// constructing elements as needed.
297 template<typename It1, typename It2>
298 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
299 std::uninitialized_copy(std::make_move_iterator(I),
300 std::make_move_iterator(E), Dest);
301 }
302
303 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
304 /// constructing elements as needed.
305 template<typename It1, typename It2>
306 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
307 std::uninitialized_copy(I, E, Dest);
308 }
309
310 /// Grow the allocated memory (without initializing new elements), doubling
311 /// the size of the allocated memory. Guarantees space for at least one more
312 /// element, or MinSize more elements if specified.
313 void grow(size_t MinSize = 0);
314
315public:
316 void push_back(const T &Elt) {
317 this->assertSafeToAdd(&Elt);
318 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
319 this->grow();
320 ::new ((void*) this->end()) T(Elt);
321 this->set_size(this->size() + 1);
322 }
323
324 void push_back(T &&Elt) {
325 this->assertSafeToAdd(&Elt);
326 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
327 this->grow();
328 ::new ((void*) this->end()) T(::std::move(Elt));
329 this->set_size(this->size() + 1);
330 }
331
332 void pop_back() {
333 this->set_size(this->size() - 1);
334 this->end()->~T();
335 }
336};
337
338// Define this out-of-line to dissuade the C++ compiler from inlining it.
339template <typename T, bool TriviallyCopyable>
340void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
341 // Ensure we can fit the new capacity.
342 // This is only going to be applicable when the capacity is 32 bit.
343 if (MinSize > this->SizeTypeMax())
344 this->report_size_overflow(MinSize);
345
346 // Ensure we can meet the guarantee of space for at least one more element.
347 // The above check alone will not catch the case where grow is called with a
348 // default MinSize of 0, but the current capacity cannot be increased.
349 // This is only going to be applicable when the capacity is 32 bit.
350 if (this->capacity() == this->SizeTypeMax())
351 this->report_at_maximum_capacity();
352
353 // Always grow, even from zero.
354 size_t NewCapacity = size_t(NextPowerOf2(this->capacity() + 2));
355 NewCapacity = std::min(std::max(NewCapacity, MinSize), this->SizeTypeMax());
356 T *NewElts = static_cast<T*>(llvm::safe_malloc(NewCapacity*sizeof(T)));
357
358 // Move the elements over.
359 this->uninitialized_move(this->begin(), this->end(), NewElts);
360
361 // Destroy the original elements.
362 destroy_range(this->begin(), this->end());
363
364 // If this wasn't grown from the inline copy, deallocate the old space.
365 if (!this->isSmall())
366 free(this->begin());
367
368 this->BeginX = NewElts;
369 this->Capacity = NewCapacity;
370}
371
372/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
373/// method implementations that are designed to work with trivially copyable
374/// T's. This allows using memcpy in place of copy/move construction and
375/// skipping destruction.
376template <typename T>
377class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
378protected:
379 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
380
381 // No need to do a destroy loop for POD's.
382 static void destroy_range(T *, T *) {}
383
384 /// Move the range [I, E) onto the uninitialized memory
385 /// starting with "Dest", constructing elements into it as needed.
386 template<typename It1, typename It2>
387 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
388 // Just do a copy.
389 uninitialized_copy(I, E, Dest);
390 }
391
392 /// Copy the range [I, E) onto the uninitialized memory
393 /// starting with "Dest", constructing elements into it as needed.
394 template<typename It1, typename It2>
395 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
396 // Arbitrary iterator types; just use the basic implementation.
397 std::uninitialized_copy(I, E, Dest);
398 }
399
400 /// Copy the range [I, E) onto the uninitialized memory
401 /// starting with "Dest", constructing elements into it as needed.
402 template <typename T1, typename T2>
403 static void uninitialized_copy(
404 T1 *I, T1 *E, T2 *Dest,
405 std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
406 T2>::value> * = nullptr) {
407 // Use memcpy for PODs iterated by pointers (which includes SmallVector
408 // iterators): std::uninitialized_copy optimizes to memmove, but we can
409 // use memcpy here. Note that I and E are iterators and thus might be
410 // invalid for memcpy if they are equal.
411 if (I != E)
412 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
413 }
414
415 /// Double the size of the allocated memory, guaranteeing space for at
416 /// least one more element or MinSize if specified.
417 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
418
419public:
420 void push_back(const T &Elt) {
421 this->assertSafeToAdd(&Elt);
422 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
423 this->grow();
424 memcpy(reinterpret_cast<void *>(this->end()), &Elt, sizeof(T));
425 this->set_size(this->size() + 1);
426 }
427
428 void pop_back() { this->set_size(this->size() - 1); }
429};
430
431/// This class consists of common code factored out of the SmallVector class to
432/// reduce code duplication based on the SmallVector 'N' template parameter.
433template <typename T>
434class SmallVectorImpl : public SmallVectorTemplateBase<T> {
435 using SuperClass = SmallVectorTemplateBase<T>;
436
437public:
438 using iterator = typename SuperClass::iterator;
439 using const_iterator = typename SuperClass::const_iterator;
440 using reference = typename SuperClass::reference;
441 using size_type = typename SuperClass::size_type;
442
443protected:
444 // Default ctor - Initialize to empty.
445 explicit SmallVectorImpl(unsigned N)
446 : SmallVectorTemplateBase<T>(N) {}
447
448public:
449 SmallVectorImpl(const SmallVectorImpl &) = delete;
450
451 ~SmallVectorImpl() {
452 // Subclass has already destructed this vector's elements.
453 // If this wasn't grown from the inline copy, deallocate the old space.
454 if (!this->isSmall())
455 free(this->begin());
456 }
457
458 void clear() {
459 this->destroy_range(this->begin(), this->end());
460 this->Size = 0;
461 }
462
463 void resize(size_type N) {
464 if (N < this->size()) {
465 this->destroy_range(this->begin()+N, this->end());
466 this->set_size(N);
467 } else if (N > this->size()) {
468 if (this->capacity() < N)
469 this->grow(N);
470 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
471 new (&*I) T();
472 this->set_size(N);
473 }
474 }
475
476 void resize(size_type N, const T &NV) {
477 if (N == this->size())
478 return;
479
480 if (N < this->size()) {
481 this->destroy_range(this->begin()+N, this->end());
482 this->set_size(N);
483 return;
484 }
485
486 this->assertSafeToReferenceAfterResize(&NV, N);
487 if (this->capacity() < N)
488 this->grow(N);
489 std::uninitialized_fill(this->end(), this->begin() + N, NV);
490 this->set_size(N);
491 }
492
493 void reserve(size_type N) {
494 if (this->capacity() < N)
495 this->grow(N);
496 }
497
498 void pop_back_n(size_type NumItems) {
499 assert(this->size() >= NumItems)((this->size() >= NumItems) ? static_cast<void> (
0) : __assert_fail ("this->size() >= NumItems", "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 499, __PRETTY_FUNCTION__))
;
500 this->destroy_range(this->end() - NumItems, this->end());
501 this->set_size(this->size() - NumItems);
502 }
503
504 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
505 T Result = ::std::move(this->back());
506 this->pop_back();
507 return Result;
508 }
509
510 void swap(SmallVectorImpl &RHS);
511
512 /// Add the specified range to the end of the SmallVector.
513 template <typename in_iter,
514 typename = std::enable_if_t<std::is_convertible<
515 typename std::iterator_traits<in_iter>::iterator_category,
516 std::input_iterator_tag>::value>>
517 void append(in_iter in_start, in_iter in_end) {
518 this->assertSafeToAddRange(in_start, in_end);
519 size_type NumInputs = std::distance(in_start, in_end);
520 if (NumInputs > this->capacity() - this->size())
521 this->grow(this->size()+NumInputs);
522
523 this->uninitialized_copy(in_start, in_end, this->end());
524 this->set_size(this->size() + NumInputs);
525 }
526
527 /// Append \p NumInputs copies of \p Elt to the end.
528 void append(size_type NumInputs, const T &Elt) {
529 this->assertSafeToAdd(&Elt, NumInputs);
530 if (NumInputs > this->capacity() - this->size())
531 this->grow(this->size()+NumInputs);
532
533 std::uninitialized_fill_n(this->end(), NumInputs, Elt);
534 this->set_size(this->size() + NumInputs);
535 }
536
537 void append(std::initializer_list<T> IL) {
538 append(IL.begin(), IL.end());
539 }
540
541 // FIXME: Consider assigning over existing elements, rather than clearing &
542 // re-initializing them - for all assign(...) variants.
543
544 void assign(size_type NumElts, const T &Elt) {
545 this->assertSafeToReferenceAfterResize(&Elt, 0);
546 clear();
547 if (this->capacity() < NumElts)
548 this->grow(NumElts);
549 this->set_size(NumElts);
550 std::uninitialized_fill(this->begin(), this->end(), Elt);
551 }
552
553 template <typename in_iter,
554 typename = std::enable_if_t<std::is_convertible<
555 typename std::iterator_traits<in_iter>::iterator_category,
556 std::input_iterator_tag>::value>>
557 void assign(in_iter in_start, in_iter in_end) {
558 this->assertSafeToReferenceAfterClear(in_start, in_end);
559 clear();
560 append(in_start, in_end);
561 }
562
563 void assign(std::initializer_list<T> IL) {
564 clear();
565 append(IL);
566 }
567
568 iterator erase(const_iterator CI) {
569 // Just cast away constness because this is a non-const member function.
570 iterator I = const_cast<iterator>(CI);
571
572 assert(I >= this->begin() && "Iterator to erase is out of bounds.")((I >= this->begin() && "Iterator to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Iterator to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 572, __PRETTY_FUNCTION__))
;
573 assert(I < this->end() && "Erasing at past-the-end iterator.")((I < this->end() && "Erasing at past-the-end iterator."
) ? static_cast<void> (0) : __assert_fail ("I < this->end() && \"Erasing at past-the-end iterator.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 573, __PRETTY_FUNCTION__))
;
574
575 iterator N = I;
576 // Shift all elts down one.
577 std::move(I+1, this->end(), I);
578 // Drop the last elt.
579 this->pop_back();
580 return(N);
581 }
582
583 iterator erase(const_iterator CS, const_iterator CE) {
584 // Just cast away constness because this is a non-const member function.
585 iterator S = const_cast<iterator>(CS);
586 iterator E = const_cast<iterator>(CE);
587
588 assert(S >= this->begin() && "Range to erase is out of bounds.")((S >= this->begin() && "Range to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("S >= this->begin() && \"Range to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 588, __PRETTY_FUNCTION__))
;
589 assert(S <= E && "Trying to erase invalid range.")((S <= E && "Trying to erase invalid range.") ? static_cast
<void> (0) : __assert_fail ("S <= E && \"Trying to erase invalid range.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 589, __PRETTY_FUNCTION__))
;
590 assert(E <= this->end() && "Trying to erase past the end.")((E <= this->end() && "Trying to erase past the end."
) ? static_cast<void> (0) : __assert_fail ("E <= this->end() && \"Trying to erase past the end.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 590, __PRETTY_FUNCTION__))
;
591
592 iterator N = S;
593 // Shift all elts down.
594 iterator I = std::move(E, this->end(), S);
595 // Drop the last elts.
596 this->destroy_range(I, this->end());
597 this->set_size(I - this->begin());
598 return(N);
599 }
600
601private:
602 template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) {
603 if (I == this->end()) { // Important special case for empty vector.
604 this->push_back(::std::forward<ArgType>(Elt));
605 return this->end()-1;
606 }
607
608 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 608, __PRETTY_FUNCTION__))
;
609 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 609, __PRETTY_FUNCTION__))
;
610
611 // Check that adding an element won't invalidate Elt.
612 this->assertSafeToAdd(&Elt);
613
614 if (this->size() >= this->capacity()) {
615 size_t EltNo = I-this->begin();
616 this->grow();
617 I = this->begin()+EltNo;
618 }
619
620 ::new ((void*) this->end()) T(::std::move(this->back()));
621 // Push everything else over.
622 std::move_backward(I, this->end()-1, this->end());
623 this->set_size(this->size() + 1);
624
625 // If we just moved the element we're inserting, be sure to update
626 // the reference.
627 std::remove_reference_t<ArgType> *EltPtr = &Elt;
628 if (I <= EltPtr && EltPtr < this->end())
629 ++EltPtr;
630
631 *I = ::std::forward<ArgType>(*EltPtr);
632 return I;
633 }
634
635public:
636 iterator insert(iterator I, T &&Elt) {
637 return insert_one_impl(I, std::move(Elt));
638 }
639
640 iterator insert(iterator I, const T &Elt) { return insert_one_impl(I, Elt); }
641
642 iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
643 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
644 size_t InsertElt = I - this->begin();
645
646 if (I == this->end()) { // Important special case for empty vector.
647 append(NumToInsert, Elt);
648 return this->begin()+InsertElt;
649 }
650
651 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 651, __PRETTY_FUNCTION__))
;
652 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 652, __PRETTY_FUNCTION__))
;
653
654 // Check that adding NumToInsert elements won't invalidate Elt.
655 this->assertSafeToAdd(&Elt, NumToInsert);
656
657 // Ensure there is enough space.
658 reserve(this->size() + NumToInsert);
659
660 // Uninvalidate the iterator.
661 I = this->begin()+InsertElt;
662
663 // If there are more elements between the insertion point and the end of the
664 // range than there are being inserted, we can use a simple approach to
665 // insertion. Since we already reserved space, we know that this won't
666 // reallocate the vector.
667 if (size_t(this->end()-I) >= NumToInsert) {
668 T *OldEnd = this->end();
669 append(std::move_iterator<iterator>(this->end() - NumToInsert),
670 std::move_iterator<iterator>(this->end()));
671
672 // Copy the existing elements that get replaced.
673 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
674
675 std::fill_n(I, NumToInsert, Elt);
676 return I;
677 }
678
679 // Otherwise, we're inserting more elements than exist already, and we're
680 // not inserting at the end.
681
682 // Move over the elements that we're about to overwrite.
683 T *OldEnd = this->end();
684 this->set_size(this->size() + NumToInsert);
685 size_t NumOverwritten = OldEnd-I;
686 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
687
688 // Replace the overwritten part.
689 std::fill_n(I, NumOverwritten, Elt);
690
691 // Insert the non-overwritten middle part.
692 std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
693 return I;
694 }
695
696 template <typename ItTy,
697 typename = std::enable_if_t<std::is_convertible<
698 typename std::iterator_traits<ItTy>::iterator_category,
699 std::input_iterator_tag>::value>>
700 iterator insert(iterator I, ItTy From, ItTy To) {
701 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
702 size_t InsertElt = I - this->begin();
703
704 if (I == this->end()) { // Important special case for empty vector.
705 append(From, To);
706 return this->begin()+InsertElt;
707 }
708
709 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 709, __PRETTY_FUNCTION__))
;
710 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20201129111111+e987fbdd85d/llvm/include/llvm/ADT/SmallVector.h"
, 710, __PRETTY_FUNCTION__))
;
711
712 // Check that the reserve that follows doesn't invalidate the iterators.
713 this->assertSafeToAddRange(From, To);
714
715 size_t NumToInsert = std::distance(From, To);
716
717 // Ensure there is enough space.
718 reserve(this->size() + NumToInsert);
719
720 // Uninvalidate the iterator.
721 I = this->begin()+InsertElt;
722
723 // If there are more elements between the insertion point and the end of the
724 // range than there are being inserted, we can use a simple approach to
725 // insertion. Since we already reserved space, we know that this won't
726 // reallocate the vector.
727 if (size_t(this->end()-I) >= NumToInsert) {
728 T *OldEnd = this->end();
729 append(std::move_iterator<iterator>(this->end() - NumToInsert),
730 std::move_iterator<iterator>(this->end()));
731
732 // Copy the existing elements that get replaced.
733 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
734
735 std::copy(From, To, I);
736 return I;
737 }
738
739 // Otherwise, we're inserting more elements than exist already, and we're
740 // not inserting at the end.
741
742 // Move over the elements that we're about to overwrite.
743 T *OldEnd = this->end();
744 this->set_size(this->size() + NumToInsert);
745 size_t NumOverwritten = OldEnd-I;
746 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
747
748 // Replace the overwritten part.
749 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
750 *J = *From;
751 ++J; ++From;
752 }
753
754 // Insert the non-overwritten middle part.
755 this->uninitialized_copy(From, To, OldEnd);
756 return I;
757 }
758
759 void insert(iterator I, std::initializer_list<T> IL) {
760 insert(I, IL.begin(), IL.end());
761 }
762
763 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
764 this->assertSafeToEmplace(Args...);
765 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
766 this->grow();
767 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
768 this->set_size(this->size() + 1);
769 return this->back();
770 }
771
772 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
773
774 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
775
776 bool operator==(const SmallVectorImpl &RHS) const {
777 if (this->size() != RHS.size()) return false;
778 return std::equal(this->begin(), this->end(), RHS.begin());
779 }
780 bool operator!=(const SmallVectorImpl &RHS) const {
781 return !(*this == RHS);
782 }
783
784 bool operator<(const SmallVectorImpl &RHS) const {
785 return std::lexicographical_compare(this->begin(), this->end(),
786 RHS.begin(), RHS.end());
787 }
788};
789
790template <typename T>
791void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
792 if (this == &RHS) return;
793
794 // We can only avoid copying elements if neither vector is small.
795 if (!this->isSmall() && !RHS.isSmall()) {
796 std::swap(this->BeginX, RHS.BeginX);
797 std::swap(this->Size, RHS.Size);
798 std::swap(this->Capacity, RHS.Capacity);
799 return;
800 }
801 if (RHS.size() > this->capacity())
802 this->grow(RHS.size());
803 if (this->size() > RHS.capacity())
804 RHS.grow(this->size());
805
806 // Swap the shared elements.
807 size_t NumShared = this->size();
808 if (NumShared > RHS.size()) NumShared = RHS.size();
809 for (size_type i = 0; i != NumShared; ++i)
810 std::swap((*this)[i], RHS[i]);
811
812 // Copy over the extra elts.
813 if (this->size() > RHS.size()) {
814 size_t EltDiff = this->size() - RHS.size();
815 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
816 RHS.set_size(RHS.size() + EltDiff);
817 this->destroy_range(this->begin()+NumShared, this->end());
818 this->set_size(NumShared);
819 } else if (RHS.size() > this->size()) {
820 size_t EltDiff = RHS.size() - this->size();
821 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
822 this->set_size(this->size() + EltDiff);
823 this->destroy_range(RHS.begin()+NumShared, RHS.end());
824 RHS.set_size(NumShared);
825 }
826}
827
828template <typename T>
829SmallVectorImpl<T> &SmallVectorImpl<T>::
830 operator=(const SmallVectorImpl<T> &RHS) {
831 // Avoid self-assignment.
832 if (this == &RHS) return *this;
833
834 // If we already have sufficient space, assign the common elements, then
835 // destroy any excess.
836 size_t RHSSize = RHS.size();
837 size_t CurSize = this->size();
838 if (CurSize >= RHSSize) {
839 // Assign common elements.
840 iterator NewEnd;
841 if (RHSSize)
842 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
843 else
844 NewEnd = this->begin();
845
846 // Destroy excess elements.
847 this->destroy_range(NewEnd, this->end());
848
849 // Trim.
850 this->set_size(RHSSize);
851 return *this;
852 }
853
854 // If we have to grow to have enough elements, destroy the current elements.
855 // This allows us to avoid copying them during the grow.
856 // FIXME: don't do this if they're efficiently moveable.
857 if (this->capacity() < RHSSize) {
858 // Destroy current elements.
859 this->destroy_range(this->begin(), this->end());
860 this->set_size(0);
861 CurSize = 0;
862 this->grow(RHSSize);
863 } else if (CurSize) {
864 // Otherwise, use assignment for the already-constructed elements.
865 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
866 }
867
868 // Copy construct the new elements in place.
869 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
870 this->begin()+CurSize);
871
872 // Set end.
873 this->set_size(RHSSize);
874 return *this;
875}
876
877template <typename T>
878SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
879 // Avoid self-assignment.
880 if (this == &RHS) return *this;
881
882 // If the RHS isn't small, clear this vector and then steal its buffer.
883 if (!RHS.isSmall()) {
884 this->destroy_range(this->begin(), this->end());
885 if (!this->isSmall()) free(this->begin());
886 this->BeginX = RHS.BeginX;
887 this->Size = RHS.Size;
888 this->Capacity = RHS.Capacity;
889 RHS.resetToSmall();
890 return *this;
891 }
892
893 // If we already have sufficient space, assign the common elements, then
894 // destroy any excess.
895 size_t RHSSize = RHS.size();
896 size_t CurSize = this->size();
897 if (CurSize >= RHSSize) {
898 // Assign common elements.
899 iterator NewEnd = this->begin();
900 if (RHSSize)
901 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
902
903 // Destroy excess elements and trim the bounds.
904 this->destroy_range(NewEnd, this->end());
905 this->set_size(RHSSize);
906
907 // Clear the RHS.
908 RHS.clear();
909
910 return *this;
911 }
912
913 // If we have to grow to have enough elements, destroy the current elements.
914 // This allows us to avoid copying them during the grow.
915 // FIXME: this may not actually make any sense if we can efficiently move
916 // elements.
917 if (this->capacity() < RHSSize) {
918 // Destroy current elements.
919 this->destroy_range(this->begin(), this->end());
920 this->set_size(0);
921 CurSize = 0;
922 this->grow(RHSSize);
923 } else if (CurSize) {
924 // Otherwise, use assignment for the already-constructed elements.
925 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
926 }
927
928 // Move-construct the new elements in place.
929 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
930 this->begin()+CurSize);
931
932 // Set end.
933 this->set_size(RHSSize);
934
935 RHS.clear();
936 return *this;
937}
938
939/// Storage for the SmallVector elements. This is specialized for the N=0 case
940/// to avoid allocating unnecessary storage.
941template <typename T, unsigned N>
942struct SmallVectorStorage {
943 alignas(T) char InlineElts[N * sizeof(T)];
944};
945
946/// We need the storage to be properly aligned even for small-size of 0 so that
947/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
948/// well-defined.
949template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};
950
951/// This is a 'vector' (really, a variable-sized array), optimized
952/// for the case when the array is small. It contains some number of elements
953/// in-place, which allows it to avoid heap allocation when the actual number of
954/// elements is below that threshold. This allows normal "small" cases to be
955/// fast without losing generality for large inputs.
956///
957/// Note that this does not attempt to be exception safe.
958///
959template <typename T, unsigned N>
960class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>,
961 SmallVectorStorage<T, N> {
962public:
963 SmallVector() : SmallVectorImpl<T>(N) {}
964
965 ~SmallVector() {
966 // Destroy the constructed elements in the vector.
967 this->destroy_range(this->begin(), this->end());
968 }
969
970 explicit SmallVector(size_t Size, const T &Value = T())
971 : SmallVectorImpl<T>(N) {
972 this->assign(Size, Value);
973 }
974
975 template <typename ItTy,
976 typename = std::enable_if_t<std::is_convertible<
977 typename std::iterator_traits<ItTy>::iterator_category,
978 std::input_iterator_tag>::value>>
979 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
980 this->append(S, E);
981 }
982
983 template <typename RangeTy>
984 explicit SmallVector(const iterator_range<RangeTy> &R)
985 : SmallVectorImpl<T>(N) {
986 this->append(R.begin(), R.end());
987 }
988
989 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
990 this->assign(IL);
991 }
992
993 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
994 if (!RHS.empty())
995 SmallVectorImpl<T>::operator=(RHS);
996 }
997
998 SmallVector &operator=(const SmallVector &RHS) {
999 SmallVectorImpl<T>::operator=(RHS);
1000 return *this;
1001 }
1002
1003 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
1004 if (!RHS.empty())
1005 SmallVectorImpl<T>::operator=(::std::move(RHS));
1006 }
1007
1008 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
1009 if (!RHS.empty())
1010 SmallVectorImpl<T>::operator=(::std::move(RHS));
1011 }
1012
1013 SmallVector &operator=(SmallVector &&RHS) {
1014 SmallVectorImpl<T>::operator=(::std::move(RHS));
1015 return *this;
1016 }
1017
1018 SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
1019 SmallVectorImpl<T>::operator=(::std::move(RHS));
1020 return *this;
1021 }
1022
1023 SmallVector &operator=(std::initializer_list<T> IL) {
1024 this->assign(IL);
1025 return *this;
1026 }
1027};
1028
1029template <typename T, unsigned N>
1030inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
1031 return X.capacity_in_bytes();
1032}
1033
1034/// Given a range of type R, iterate the entire range and return a
1035/// SmallVector with elements of the vector. This is useful, for example,
1036/// when you want to iterate a range and then sort the results.
1037template <unsigned Size, typename R>
1038SmallVector<typename std::remove_const<typename std::remove_reference<
1039 decltype(*std::begin(std::declval<R &>()))>::type>::type,
1040 Size>
1041to_vector(R &&Range) {
1042 return {std::begin(Range), std::end(Range)};
1043}
1044
1045} // end namespace llvm
1046
1047namespace std {
1048
1049 /// Implement std::swap in terms of SmallVector swap.
1050 template<typename T>
1051 inline void
1052 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
1053 LHS.swap(RHS);
1054 }
1055
1056 /// Implement std::swap in terms of SmallVector swap.
1057 template<typename T, unsigned N>
1058 inline void
1059 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
1060 LHS.swap(RHS);
1061 }
1062
1063} // end namespace std
1064
1065#endif // LLVM_ADT_SMALLVECTOR_H