Bug Summary

File:llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
Warning:line 384, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUPromoteAlloca.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/build-llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/build-llvm/lib/Target/AMDGPU -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-26-161721-17566-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

1//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass eliminates allocas by either converting them into vectors or
10// by migrating them to local address space.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "AMDGPUSubtarget.h"
16#include "Utils/AMDGPUBaseInfo.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/Triple.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/Analysis/CaptureTracking.h"
24#include "llvm/Analysis/ValueTracking.h"
25#include "llvm/CodeGen/TargetPassConfig.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/BasicBlock.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/GlobalVariable.h"
35#include "llvm/IR/IRBuilder.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/IntrinsicsAMDGPU.h"
41#include "llvm/IR/IntrinsicsR600.h"
42#include "llvm/IR/LLVMContext.h"
43#include "llvm/IR/Metadata.h"
44#include "llvm/IR/Module.h"
45#include "llvm/IR/Type.h"
46#include "llvm/IR/User.h"
47#include "llvm/IR/Value.h"
48#include "llvm/Pass.h"
49#include "llvm/Support/Casting.h"
50#include "llvm/Support/Debug.h"
51#include "llvm/Support/ErrorHandling.h"
52#include "llvm/Support/MathExtras.h"
53#include "llvm/Support/raw_ostream.h"
54#include "llvm/Target/TargetMachine.h"
55#include <algorithm>
56#include <cassert>
57#include <cstdint>
58#include <map>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63#define DEBUG_TYPE"amdgpu-promote-alloca" "amdgpu-promote-alloca"
64
65using namespace llvm;
66
67namespace {
68
69static cl::opt<bool> DisablePromoteAllocaToVector(
70 "disable-promote-alloca-to-vector",
71 cl::desc("Disable promote alloca to vector"),
72 cl::init(false));
73
74static cl::opt<bool> DisablePromoteAllocaToLDS(
75 "disable-promote-alloca-to-lds",
76 cl::desc("Disable promote alloca to LDS"),
77 cl::init(false));
78
79static cl::opt<unsigned> PromoteAllocaToVectorLimit(
80 "amdgpu-promote-alloca-to-vector-limit",
81 cl::desc("Maximum byte size to consider promote alloca to vector"),
82 cl::init(0));
83
84// FIXME: This can create globals so should be a module pass.
85class AMDGPUPromoteAlloca : public FunctionPass {
86private:
87 const TargetMachine *TM;
88 Module *Mod = nullptr;
89 const DataLayout *DL = nullptr;
90
91 // FIXME: This should be per-kernel.
92 uint32_t LocalMemLimit = 0;
93 uint32_t CurrentLocalMemUsage = 0;
94 unsigned MaxVGPRs;
95
96 bool IsAMDGCN = false;
97 bool IsAMDHSA = false;
98
99 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
100 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
101
102 /// BaseAlloca is the alloca root the search started from.
103 /// Val may be that alloca or a recursive user of it.
104 bool collectUsesWithPtrTypes(Value *BaseAlloca,
105 Value *Val,
106 std::vector<Value*> &WorkList) const;
107
108 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
109 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
110 /// Returns true if both operands are derived from the same alloca. Val should
111 /// be the same value as one of the input operands of UseInst.
112 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
113 Instruction *UseInst,
114 int OpIdx0, int OpIdx1) const;
115
116 /// Check whether we have enough local memory for promotion.
117 bool hasSufficientLocalMem(const Function &F);
118
119public:
120 static char ID;
121
122 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
123
124 bool doInitialization(Module &M) override;
125 bool runOnFunction(Function &F) override;
126
127 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
128
129 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
130
131 void getAnalysisUsage(AnalysisUsage &AU) const override {
132 AU.setPreservesCFG();
133 FunctionPass::getAnalysisUsage(AU);
134 }
135};
136
137class AMDGPUPromoteAllocaToVector : public FunctionPass {
138private:
139 unsigned MaxVGPRs;
140
141public:
142 static char ID;
143
144 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
145
146 bool runOnFunction(Function &F) override;
147
148 StringRef getPassName() const override {
149 return "AMDGPU Promote Alloca to vector";
150 }
151
152 bool handleAlloca(AllocaInst &I);
153
154 void getAnalysisUsage(AnalysisUsage &AU) const override {
155 AU.setPreservesCFG();
156 FunctionPass::getAnalysisUsage(AU);
157 }
158};
159
160} // end anonymous namespace
161
162char AMDGPUPromoteAlloca::ID = 0;
163char AMDGPUPromoteAllocaToVector::ID = 0;
164
165INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
166 "AMDGPU promote alloca to vector or LDS", false, false)static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
167
168INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
169 "AMDGPU promote alloca to vector", false, false)static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
170
171char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
172char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
173
174bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
175 Mod = &M;
176 DL = &Mod->getDataLayout();
177
178 return false;
179}
180
181bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
182 if (skipFunction(F))
183 return false;
184
185 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
186 TM = &TPC->getTM<TargetMachine>();
187 else
188 return false;
189
190 const Triple &TT = TM->getTargetTriple();
191 IsAMDGCN = TT.getArch() == Triple::amdgcn;
192 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
193
194 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
195 if (!ST.isPromoteAllocaEnabled())
196 return false;
197
198 if (IsAMDGCN) {
199 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(F);
200 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
201 } else {
202 MaxVGPRs = 128;
203 }
204
205 bool SufficientLDS = hasSufficientLocalMem(F);
206 bool Changed = false;
207 BasicBlock &EntryBB = *F.begin();
208
209 SmallVector<AllocaInst *, 16> Allocas;
210 for (Instruction &I : EntryBB) {
211 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
212 Allocas.push_back(AI);
213 }
214
215 for (AllocaInst *AI : Allocas) {
216 if (handleAlloca(*AI, SufficientLDS))
217 Changed = true;
218 }
219
220 return Changed;
221}
222
223std::pair<Value *, Value *>
224AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
225 const Function &F = *Builder.GetInsertBlock()->getParent();
226 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
227
228 if (!IsAMDHSA) {
229 Function *LocalSizeYFn
230 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
231 Function *LocalSizeZFn
232 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
233
234 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
235 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
236
237 ST.makeLIDRangeMetadata(LocalSizeY);
238 ST.makeLIDRangeMetadata(LocalSizeZ);
239
240 return std::make_pair(LocalSizeY, LocalSizeZ);
241 }
242
243 // We must read the size out of the dispatch pointer.
244 assert(IsAMDGCN)((IsAMDGCN) ? static_cast<void> (0) : __assert_fail ("IsAMDGCN"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 244, __PRETTY_FUNCTION__))
;
245
246 // We are indexing into this struct, and want to extract the workgroup_size_*
247 // fields.
248 //
249 // typedef struct hsa_kernel_dispatch_packet_s {
250 // uint16_t header;
251 // uint16_t setup;
252 // uint16_t workgroup_size_x ;
253 // uint16_t workgroup_size_y;
254 // uint16_t workgroup_size_z;
255 // uint16_t reserved0;
256 // uint32_t grid_size_x ;
257 // uint32_t grid_size_y ;
258 // uint32_t grid_size_z;
259 //
260 // uint32_t private_segment_size;
261 // uint32_t group_segment_size;
262 // uint64_t kernel_object;
263 //
264 // #ifdef HSA_LARGE_MODEL
265 // void *kernarg_address;
266 // #elif defined HSA_LITTLE_ENDIAN
267 // void *kernarg_address;
268 // uint32_t reserved1;
269 // #else
270 // uint32_t reserved1;
271 // void *kernarg_address;
272 // #endif
273 // uint64_t reserved2;
274 // hsa_signal_t completion_signal; // uint64_t wrapper
275 // } hsa_kernel_dispatch_packet_t
276 //
277 Function *DispatchPtrFn
278 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
279
280 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
281 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
282 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
283
284 // Size of the dispatch packet struct.
285 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64);
286
287 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
288 Value *CastDispatchPtr = Builder.CreateBitCast(
289 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
290
291 // We could do a single 64-bit load here, but it's likely that the basic
292 // 32-bit and extract sequence is already present, and it is probably easier
293 // to CSE this. The loads should be mergable later anyway.
294 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
295 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
296
297 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
298 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
299
300 MDNode *MD = MDNode::get(Mod->getContext(), None);
301 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
302 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
303 ST.makeLIDRangeMetadata(LoadZU);
304
305 // Extract y component. Upper half of LoadZU should be zero already.
306 Value *Y = Builder.CreateLShr(LoadXY, 16);
307
308 return std::make_pair(Y, LoadZU);
309}
310
311Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
312 const AMDGPUSubtarget &ST =
313 AMDGPUSubtarget::get(*TM, *Builder.GetInsertBlock()->getParent());
314 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
315
316 switch (N) {
317 case 0:
318 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
319 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
320 break;
321 case 1:
322 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
323 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
324 break;
325
326 case 2:
327 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
328 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
329 break;
330 default:
331 llvm_unreachable("invalid dimension")::llvm::llvm_unreachable_internal("invalid dimension", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 331)
;
332 }
333
334 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
335 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
336 ST.makeLIDRangeMetadata(CI);
337
338 return CI;
339}
340
341static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
342 return FixedVectorType::get(ArrayTy->getElementType(),
343 ArrayTy->getNumElements());
344}
345
346static Value *stripBitcasts(Value *V) {
347 while (Instruction *I = dyn_cast<Instruction>(V)) {
348 if (I->getOpcode() != Instruction::BitCast)
349 break;
350 V = I->getOperand(0);
351 }
352 return V;
353}
354
355static Value *
356calculateVectorIndex(Value *Ptr,
357 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
358 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(stripBitcasts(Ptr));
359 if (!GEP)
360 return nullptr;
361
362 auto I = GEPIdx.find(GEP);
363 return I == GEPIdx.end() ? nullptr : I->second;
364}
365
366static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
367 // FIXME we only support simple cases
368 if (GEP->getNumOperands() != 3)
369 return nullptr;
370
371 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
372 if (!I0 || !I0->isZero())
373 return nullptr;
374
375 return GEP->getOperand(2);
376}
377
378// Not an instruction handled below to turn into a vector.
379//
380// TODO: Check isTriviallyVectorizable for calls and handle other
381// instructions.
382static bool canVectorizeInst(Instruction *Inst, User *User,
383 const DataLayout &DL) {
384 switch (Inst->getOpcode()) {
49
Called C++ object pointer is null
385 case Instruction::Load: {
386 // Currently only handle the case where the Pointer Operand is a GEP.
387 // Also we could not vectorize volatile or atomic loads.
388 LoadInst *LI = cast<LoadInst>(Inst);
389 if (isa<AllocaInst>(User) &&
390 LI->getPointerOperandType() == User->getType() &&
391 isa<VectorType>(LI->getType()))
392 return true;
393
394 Instruction *PtrInst = dyn_cast<Instruction>(LI->getPointerOperand());
395 if (!PtrInst)
396 return false;
397
398 return (PtrInst->getOpcode() == Instruction::GetElementPtr ||
399 PtrInst->getOpcode() == Instruction::BitCast) &&
400 LI->isSimple();
401 }
402 case Instruction::BitCast:
403 return true;
404 case Instruction::Store: {
405 // Must be the stored pointer operand, not a stored value, plus
406 // since it should be canonical form, the User should be a GEP.
407 // Also we could not vectorize volatile or atomic stores.
408 StoreInst *SI = cast<StoreInst>(Inst);
409 if (isa<AllocaInst>(User) &&
410 SI->getPointerOperandType() == User->getType() &&
411 isa<VectorType>(SI->getValueOperand()->getType()))
412 return true;
413
414 Instruction *UserInst = dyn_cast<Instruction>(User);
415 if (!UserInst)
416 return false;
417
418 return (SI->getPointerOperand() == User) &&
419 (UserInst->getOpcode() == Instruction::GetElementPtr ||
420 UserInst->getOpcode() == Instruction::BitCast) &&
421 SI->isSimple();
422 }
423 default:
424 return false;
425 }
426}
427
428static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL,
429 unsigned MaxVGPRs) {
430
431 if (DisablePromoteAllocaToVector) {
20
Assuming the condition is false
21
Taking false branch
432 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Promotion alloca to vector is disabled\n"
; } } while (false)
;
433 return false;
434 }
435
436 Type *AllocaTy = Alloca->getAllocatedType();
437 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
22
Assuming 'AllocaTy' is not a 'FixedVectorType'
438 if (auto *ArrayTy
23.1
'ArrayTy' is non-null
23.1
'ArrayTy' is non-null
= dyn_cast<ArrayType>(AllocaTy)) {
23
Assuming 'AllocaTy' is a 'ArrayType'
24
Taking true branch
439 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
25
Assuming the condition is true
27
Taking true branch
440 ArrayTy->getNumElements() > 0)
26
Assuming the condition is true
441 VectorTy = arrayTypeToVecType(ArrayTy);
442 }
443
444 // Use up to 1/4 of available register budget for vectorization.
445 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
28
Assuming the condition is false
29
'?' condition is false
446 : (MaxVGPRs * 32);
447
448 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
30
Assuming the condition is false
31
Taking false branch
449 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
450 << MaxVGPRs << " registers available\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
;
451 return false;
452 }
453
454 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Alloca candidate for vectorization\n"
; } } while (false)
;
32
Assuming 'DebugFlag' is false
33
Loop condition is false. Exiting loop
455
456 // FIXME: There is no reason why we can't support larger arrays, we
457 // are just being conservative for now.
458 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
459 // could also be promoted but we don't currently handle this case
460 if (!VectorTy || VectorTy->getNumElements() > 16 ||
34
Assuming 'VectorTy' is non-null
35
Assuming the condition is false
37
Taking false branch
461 VectorTy->getNumElements() < 2) {
36
Assuming the condition is false
462 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot convert type to vector\n"
; } } while (false)
;
463 return false;
464 }
465
466 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
467 std::vector<Value *> WorkList;
468 SmallVector<User *, 8> Users(Alloca->users());
469 SmallVector<User *, 8> UseUsers(Users.size(), Alloca);
470 Type *VecEltTy = VectorTy->getElementType();
471 while (!Users.empty()) {
38
Calling 'SmallVectorBase::empty'
41
Returning from 'SmallVectorBase::empty'
42
Loop condition is true. Entering loop body
472 User *AllocaUser = Users.pop_back_val();
473 User *UseUser = UseUsers.pop_back_val();
474 Instruction *Inst = dyn_cast<Instruction>(AllocaUser);
43
Assuming 'AllocaUser' is not a 'Instruction'
44
'Inst' initialized to a null pointer value
475
476 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
45
Assuming 'AllocaUser' is not a 'GetElementPtrInst'
477 if (!GEP
45.1
'GEP' is null
45.1
'GEP' is null
) {
46
Taking true branch
478 if (!canVectorizeInst(Inst, UseUser, DL))
47
Passing null pointer value via 1st parameter 'Inst'
48
Calling 'canVectorizeInst'
479 return false;
480
481 if (Inst->getOpcode() == Instruction::BitCast) {
482 Type *FromTy = Inst->getOperand(0)->getType()->getPointerElementType();
483 Type *ToTy = Inst->getType()->getPointerElementType();
484 if (FromTy->isAggregateType() || ToTy->isAggregateType() ||
485 DL.getTypeSizeInBits(FromTy) != DL.getTypeSizeInBits(ToTy))
486 continue;
487
488 for (User *CastUser : Inst->users()) {
489 if (isAssumeLikeIntrinsic(cast<Instruction>(CastUser)))
490 continue;
491 Users.push_back(CastUser);
492 UseUsers.push_back(Inst);
493 }
494
495 continue;
496 }
497
498 WorkList.push_back(AllocaUser);
499 continue;
500 }
501
502 Value *Index = GEPToVectorIndex(GEP);
503
504 // If we can't compute a vector index from this GEP, then we can't
505 // promote this alloca to vector.
506 if (!Index) {
507 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEPdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
508 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
;
509 return false;
510 }
511
512 GEPVectorIdx[GEP] = Index;
513 Users.append(GEP->user_begin(), GEP->user_end());
514 UseUsers.append(GEP->getNumUses(), GEP);
515 }
516
517 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
518 << *VectorTy << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
;
519
520 for (Value *V : WorkList) {
521 Instruction *Inst = cast<Instruction>(V);
522 IRBuilder<> Builder(Inst);
523 switch (Inst->getOpcode()) {
524 case Instruction::Load: {
525 if (Inst->getType() == AllocaTy || Inst->getType()->isVectorTy())
526 break;
527
528 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
529 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
530 if (!Index)
531 break;
532
533 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
534 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
535 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
536 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
537 if (Inst->getType() != VecEltTy)
538 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
539 Inst->replaceAllUsesWith(ExtractElement);
540 Inst->eraseFromParent();
541 break;
542 }
543 case Instruction::Store: {
544 StoreInst *SI = cast<StoreInst>(Inst);
545 if (SI->getValueOperand()->getType() == AllocaTy ||
546 SI->getValueOperand()->getType()->isVectorTy())
547 break;
548
549 Value *Ptr = SI->getPointerOperand();
550 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
551 if (!Index)
552 break;
553
554 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
555 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
556 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
557 Value *Elt = SI->getValueOperand();
558 if (Elt->getType() != VecEltTy)
559 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
560 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
561 Builder.CreateStore(NewVecValue, BitCast);
562 Inst->eraseFromParent();
563 break;
564 }
565
566 default:
567 llvm_unreachable("Inconsistency in instructions promotable to vector")::llvm::llvm_unreachable_internal("Inconsistency in instructions promotable to vector"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 567)
;
568 }
569 }
570 return true;
571}
572
573static bool isCallPromotable(CallInst *CI) {
574 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
575 if (!II)
576 return false;
577
578 switch (II->getIntrinsicID()) {
579 case Intrinsic::memcpy:
580 case Intrinsic::memmove:
581 case Intrinsic::memset:
582 case Intrinsic::lifetime_start:
583 case Intrinsic::lifetime_end:
584 case Intrinsic::invariant_start:
585 case Intrinsic::invariant_end:
586 case Intrinsic::launder_invariant_group:
587 case Intrinsic::strip_invariant_group:
588 case Intrinsic::objectsize:
589 return true;
590 default:
591 return false;
592 }
593}
594
595bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
596 Value *Val,
597 Instruction *Inst,
598 int OpIdx0,
599 int OpIdx1) const {
600 // Figure out which operand is the one we might not be promoting.
601 Value *OtherOp = Inst->getOperand(OpIdx0);
602 if (Val == OtherOp)
603 OtherOp = Inst->getOperand(OpIdx1);
604
605 if (isa<ConstantPointerNull>(OtherOp))
606 return true;
607
608 Value *OtherObj = getUnderlyingObject(OtherOp);
609 if (!isa<AllocaInst>(OtherObj))
610 return false;
611
612 // TODO: We should be able to replace undefs with the right pointer type.
613
614 // TODO: If we know the other base object is another promotable
615 // alloca, not necessarily this alloca, we can do this. The
616 // important part is both must have the same address space at
617 // the end.
618 if (OtherObj != BaseAlloca) {
619 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
620 dbgs() << "Found a binary instruction with another alloca object\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
;
621 return false;
622 }
623
624 return true;
625}
626
627bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
628 Value *BaseAlloca,
629 Value *Val,
630 std::vector<Value*> &WorkList) const {
631
632 for (User *User : Val->users()) {
633 if (is_contained(WorkList, User))
634 continue;
635
636 if (CallInst *CI = dyn_cast<CallInst>(User)) {
637 if (!isCallPromotable(CI))
638 return false;
639
640 WorkList.push_back(User);
641 continue;
642 }
643
644 Instruction *UseInst = cast<Instruction>(User);
645 if (UseInst->getOpcode() == Instruction::PtrToInt)
646 return false;
647
648 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
649 if (LI->isVolatile())
650 return false;
651
652 continue;
653 }
654
655 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
656 if (SI->isVolatile())
657 return false;
658
659 // Reject if the stored value is not the pointer operand.
660 if (SI->getPointerOperand() != Val)
661 return false;
662 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
663 if (RMW->isVolatile())
664 return false;
665 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
666 if (CAS->isVolatile())
667 return false;
668 }
669
670 // Only promote a select if we know that the other select operand
671 // is from another pointer that will also be promoted.
672 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
673 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
674 return false;
675
676 // May need to rewrite constant operands.
677 WorkList.push_back(ICmp);
678 }
679
680 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
681 // Give up if the pointer may be captured.
682 if (PointerMayBeCaptured(UseInst, true, true))
683 return false;
684 // Don't collect the users of this.
685 WorkList.push_back(User);
686 continue;
687 }
688
689 if (!User->getType()->isPointerTy())
690 continue;
691
692 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
693 // Be conservative if an address could be computed outside the bounds of
694 // the alloca.
695 if (!GEP->isInBounds())
696 return false;
697 }
698
699 // Only promote a select if we know that the other select operand is from
700 // another pointer that will also be promoted.
701 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
702 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
703 return false;
704 }
705
706 // Repeat for phis.
707 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
708 // TODO: Handle more complex cases. We should be able to replace loops
709 // over arrays.
710 switch (Phi->getNumIncomingValues()) {
711 case 1:
712 break;
713 case 2:
714 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
715 return false;
716 break;
717 default:
718 return false;
719 }
720 }
721
722 WorkList.push_back(User);
723 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
724 return false;
725 }
726
727 return true;
728}
729
730bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {
731
732 FunctionType *FTy = F.getFunctionType();
733 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
734
735 // If the function has any arguments in the local address space, then it's
736 // possible these arguments require the entire local memory space, so
737 // we cannot use local memory in the pass.
738 for (Type *ParamTy : FTy->params()) {
739 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
740 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
741 LocalMemLimit = 0;
742 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
743 "local memory disabled.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
;
744 return false;
745 }
746 }
747
748 LocalMemLimit = ST.getLocalMemorySize();
749 if (LocalMemLimit == 0)
750 return false;
751
752 SmallVector<const Constant *, 16> Stack;
753 SmallPtrSet<const Constant *, 8> VisitedConstants;
754 SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
755
756 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
757 for (const User *U : Val->users()) {
758 if (const Instruction *Use = dyn_cast<Instruction>(U)) {
759 if (Use->getParent()->getParent() == &F)
760 return true;
761 } else {
762 const Constant *C = cast<Constant>(U);
763 if (VisitedConstants.insert(C).second)
764 Stack.push_back(C);
765 }
766 }
767
768 return false;
769 };
770
771 for (GlobalVariable &GV : Mod->globals()) {
772 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
773 continue;
774
775 if (visitUsers(&GV, &GV)) {
776 UsedLDS.insert(&GV);
777 Stack.clear();
778 continue;
779 }
780
781 // For any ConstantExpr uses, we need to recursively search the users until
782 // we see a function.
783 while (!Stack.empty()) {
784 const Constant *C = Stack.pop_back_val();
785 if (visitUsers(&GV, C)) {
786 UsedLDS.insert(&GV);
787 Stack.clear();
788 break;
789 }
790 }
791 }
792
793 const DataLayout &DL = Mod->getDataLayout();
794 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
795 AllocatedSizes.reserve(UsedLDS.size());
796
797 for (const GlobalVariable *GV : UsedLDS) {
798 Align Alignment =
799 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
800 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
801 AllocatedSizes.emplace_back(AllocSize, Alignment);
802 }
803
804 // Sort to try to estimate the worst case alignment padding
805 //
806 // FIXME: We should really do something to fix the addresses to a more optimal
807 // value instead
808 llvm::sort(AllocatedSizes.begin(), AllocatedSizes.end(),
809 [](std::pair<uint64_t, Align> LHS, std::pair<uint64_t, Align> RHS) {
810 return LHS.second < RHS.second;
811 });
812
813 // Check how much local memory is being used by global objects
814 CurrentLocalMemUsage = 0;
815
816 // FIXME: Try to account for padding here. The real padding and address is
817 // currently determined from the inverse order of uses in the function when
818 // legalizing, which could also potentially change. We try to estimate the
819 // worst case here, but we probably should fix the addresses earlier.
820 for (auto Alloc : AllocatedSizes) {
821 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
822 CurrentLocalMemUsage += Alloc.first;
823 }
824
825 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
826 F);
827
828 // Restrict local memory usage so that we don't drastically reduce occupancy,
829 // unless it is already significantly reduced.
830
831 // TODO: Have some sort of hint or other heuristics to guess occupancy based
832 // on other factors..
833 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
834 if (OccupancyHint == 0)
835 OccupancyHint = 7;
836
837 // Clamp to max value.
838 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
839
840 // Check the hint but ignore it if it's obviously wrong from the existing LDS
841 // usage.
842 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
843
844
845 // Round up to the next tier of usage.
846 unsigned MaxSizeWithWaveCount
847 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
848
849 // Program is possibly broken by using more local mem than available.
850 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
851 return false;
852
853 LocalMemLimit = MaxSizeWithWaveCount;
854
855 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsagedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
856 << " bytes of LDS\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
857 << " Rounding size to " << MaxSizeWithWaveCountdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
858 << " with a maximum occupancy of " << MaxOccupancy << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
859 << " and " << (LocalMemLimit - CurrentLocalMemUsage)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
860 << " available for promotion\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
;
861
862 return true;
863}
864
865// FIXME: Should try to pick the most likely to be profitable allocas first.
866bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
867 // Array allocations are probably not worth handling, since an allocation of
868 // the array type is the canonical form.
869 if (!I.isStaticAlloca() || I.isArrayAllocation())
870 return false;
871
872 const DataLayout &DL = Mod->getDataLayout();
873 IRBuilder<> Builder(&I);
874
875 // First try to replace the alloca with a vector
876 Type *AllocaTy = I.getAllocatedType();
877
878 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
879
880 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
881 return true; // Promoted to vector.
882
883 if (DisablePromoteAllocaToLDS)
884 return false;
885
886 const Function &ContainingFunction = *I.getParent()->getParent();
887 CallingConv::ID CC = ContainingFunction.getCallingConv();
888
889 // Don't promote the alloca to LDS for shader calling conventions as the work
890 // item ID intrinsics are not supported for these calling conventions.
891 // Furthermore not all LDS is available for some of the stages.
892 switch (CC) {
893 case CallingConv::AMDGPU_KERNEL:
894 case CallingConv::SPIR_KERNEL:
895 break;
896 default:
897 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
898 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
899 << " promote alloca to LDS not supported with calling convention.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
;
900 return false;
901 }
902
903 // Not likely to have sufficient local memory for promotion.
904 if (!SufficientLDS)
905 return false;
906
907 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, ContainingFunction);
908 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
909
910 Align Alignment =
911 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
912
913 // FIXME: This computed padding is likely wrong since it depends on inverse
914 // usage order.
915 //
916 // FIXME: It is also possible that if we're allowed to use all of the memory
917 // could could end up using more than the maximum due to alignment padding.
918
919 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
920 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
921 NewSize += AllocSize;
922
923 if (NewSize > LocalMemLimit) {
924 LLVM_DEBUG(dbgs() << " " << AllocSizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
925 << " bytes of local memory not available to promote\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
;
926 return false;
927 }
928
929 CurrentLocalMemUsage = NewSize;
930
931 std::vector<Value*> WorkList;
932
933 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
934 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Do not know how to convert all uses\n"
; } } while (false)
;
935 return false;
936 }
937
938 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Promoting alloca to local memory\n"
; } } while (false)
;
939
940 Function *F = I.getParent()->getParent();
941
942 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
943 GlobalVariable *GV = new GlobalVariable(
944 *Mod, GVTy, false, GlobalValue::InternalLinkage,
945 UndefValue::get(GVTy),
946 Twine(F->getName()) + Twine('.') + I.getName(),
947 nullptr,
948 GlobalVariable::NotThreadLocal,
949 AMDGPUAS::LOCAL_ADDRESS);
950 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
951 GV->setAlignment(MaybeAlign(I.getAlignment()));
952
953 Value *TCntY, *TCntZ;
954
955 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
956 Value *TIdX = getWorkitemID(Builder, 0);
957 Value *TIdY = getWorkitemID(Builder, 1);
958 Value *TIdZ = getWorkitemID(Builder, 2);
959
960 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
961 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
962 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
963 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
964 TID = Builder.CreateAdd(TID, TIdZ);
965
966 Value *Indices[] = {
967 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
968 TID
969 };
970
971 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
972 I.mutateType(Offset->getType());
973 I.replaceAllUsesWith(Offset);
974 I.eraseFromParent();
975
976 for (Value *V : WorkList) {
977 CallInst *Call = dyn_cast<CallInst>(V);
978 if (!Call) {
979 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
980 Value *Src0 = CI->getOperand(0);
981 Type *EltTy = Src0->getType()->getPointerElementType();
982 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
983
984 if (isa<ConstantPointerNull>(CI->getOperand(0)))
985 CI->setOperand(0, ConstantPointerNull::get(NewTy));
986
987 if (isa<ConstantPointerNull>(CI->getOperand(1)))
988 CI->setOperand(1, ConstantPointerNull::get(NewTy));
989
990 continue;
991 }
992
993 // The operand's value should be corrected on its own and we don't want to
994 // touch the users.
995 if (isa<AddrSpaceCastInst>(V))
996 continue;
997
998 Type *EltTy = V->getType()->getPointerElementType();
999 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
1000
1001 // FIXME: It doesn't really make sense to try to do this for all
1002 // instructions.
1003 V->mutateType(NewTy);
1004
1005 // Adjust the types of any constant operands.
1006 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1007 if (isa<ConstantPointerNull>(SI->getOperand(1)))
1008 SI->setOperand(1, ConstantPointerNull::get(NewTy));
1009
1010 if (isa<ConstantPointerNull>(SI->getOperand(2)))
1011 SI->setOperand(2, ConstantPointerNull::get(NewTy));
1012 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1013 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1014 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1015 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1016 }
1017 }
1018
1019 continue;
1020 }
1021
1022 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1023 Builder.SetInsertPoint(Intr);
1024 switch (Intr->getIntrinsicID()) {
1025 case Intrinsic::lifetime_start:
1026 case Intrinsic::lifetime_end:
1027 // These intrinsics are for address space 0 only
1028 Intr->eraseFromParent();
1029 continue;
1030 case Intrinsic::memcpy: {
1031 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
1032 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlign(),
1033 MemCpy->getRawSource(), MemCpy->getSourceAlign(),
1034 MemCpy->getLength(), MemCpy->isVolatile());
1035 Intr->eraseFromParent();
1036 continue;
1037 }
1038 case Intrinsic::memmove: {
1039 MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
1040 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlign(),
1041 MemMove->getRawSource(), MemMove->getSourceAlign(),
1042 MemMove->getLength(), MemMove->isVolatile());
1043 Intr->eraseFromParent();
1044 continue;
1045 }
1046 case Intrinsic::memset: {
1047 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1048 Builder.CreateMemSet(
1049 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(),
1050 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile());
1051 Intr->eraseFromParent();
1052 continue;
1053 }
1054 case Intrinsic::invariant_start:
1055 case Intrinsic::invariant_end:
1056 case Intrinsic::launder_invariant_group:
1057 case Intrinsic::strip_invariant_group:
1058 Intr->eraseFromParent();
1059 // FIXME: I think the invariant marker should still theoretically apply,
1060 // but the intrinsics need to be changed to accept pointers with any
1061 // address space.
1062 continue;
1063 case Intrinsic::objectsize: {
1064 Value *Src = Intr->getOperand(0);
1065 Type *SrcTy = Src->getType()->getPointerElementType();
1066 Function *ObjectSize = Intrinsic::getDeclaration(Mod,
1067 Intrinsic::objectsize,
1068 { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) }
1069 );
1070
1071 CallInst *NewCall = Builder.CreateCall(
1072 ObjectSize,
1073 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1074 Intr->replaceAllUsesWith(NewCall);
1075 Intr->eraseFromParent();
1076 continue;
1077 }
1078 default:
1079 Intr->print(errs());
1080 llvm_unreachable("Don't know how to promote alloca intrinsic use.")::llvm::llvm_unreachable_internal("Don't know how to promote alloca intrinsic use."
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 1080)
;
1081 }
1082 }
1083 return true;
1084}
1085
1086bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) {
1087 if (skipFunction(F) || DisablePromoteAllocaToVector)
1
Assuming the condition is false
2
Assuming the condition is false
3
Taking false branch
1088 return false;
1089
1090 const TargetMachine *TM;
1091 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
4
Assuming 'TPC' is non-null
5
Taking true branch
1092 TM = &TPC->getTM<TargetMachine>();
1093 else
1094 return false;
1095
1096 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
1097 if (!ST.isPromoteAllocaEnabled())
6
Assuming the condition is false
7
Taking false branch
1098 return false;
1099
1100 if (TM->getTargetTriple().getArch() == Triple::amdgcn) {
8
Assuming the condition is false
9
Taking false branch
1101 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(F);
1102 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1103 } else {
1104 MaxVGPRs = 128;
1105 }
1106
1107 bool Changed = false;
1108 BasicBlock &EntryBB = *F.begin();
1109
1110 SmallVector<AllocaInst *, 16> Allocas;
1111 for (Instruction &I : EntryBB) {
1112 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1113 Allocas.push_back(AI);
1114 }
1115
1116 for (AllocaInst *AI : Allocas) {
10
Assuming '__begin1' is not equal to '__end1'
1117 if (handleAlloca(*AI))
11
Taking false branch
12
Taking false branch
13
Calling 'AMDGPUPromoteAllocaToVector::handleAlloca'
1118 Changed = true;
1119 }
1120
1121 return Changed;
1122}
1123
1124bool AMDGPUPromoteAllocaToVector::handleAlloca(AllocaInst &I) {
1125 // Array allocations are probably not worth handling, since an allocation of
1126 // the array type is the canonical form.
1127 if (!I.isStaticAlloca() || I.isArrayAllocation())
14
Assuming the condition is false
15
Assuming the condition is false
16
Taking false branch
1128 return false;
1129
1130 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
17
Assuming 'DebugFlag' is false
18
Loop condition is false. Exiting loop
1131
1132 Module *Mod = I.getParent()->getParent()->getParent();
1133 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
19
Calling 'tryPromoteAllocaToVector'
1134}
1135
1136FunctionPass *llvm::createAMDGPUPromoteAlloca() {
1137 return new AMDGPUPromoteAlloca();
1138}
1139
1140FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
1141 return new AMDGPUPromoteAllocaToVector();
1142}

/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Support/AlignOf.h"
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/ErrorHandling.h"
20#include "llvm/Support/MathExtras.h"
21#include "llvm/Support/MemAlloc.h"
22#include "llvm/Support/type_traits.h"
23#include <algorithm>
24#include <cassert>
25#include <cstddef>
26#include <cstdlib>
27#include <cstring>
28#include <initializer_list>
29#include <iterator>
30#include <limits>
31#include <memory>
32#include <new>
33#include <type_traits>
34#include <utility>
35
36namespace llvm {
37
38/// This is all the stuff common to all SmallVectors.
39///
40/// The template parameter specifies the type which should be used to hold the
41/// Size and Capacity of the SmallVector, so it can be adjusted.
42/// Using 32 bit size is desirable to shrink the size of the SmallVector.
43/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
44/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
45/// buffering bitcode output - which can exceed 4GB.
46template <class Size_T> class SmallVectorBase {
47protected:
48 void *BeginX;
49 Size_T Size = 0, Capacity;
50
51 /// The maximum value of the Size_T used.
52 static constexpr size_t SizeTypeMax() {
53 return std::numeric_limits<Size_T>::max();
54 }
55
56 SmallVectorBase() = delete;
57 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
58 : BeginX(FirstEl), Capacity(TotalCapacity) {}
59
60 /// This is an implementation of the grow() method which only works
61 /// on POD-like data types and is out of line to reduce code duplication.
62 /// This function will report a fatal error if it cannot increase capacity.
63 void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
64
65 /// Report that MinSize doesn't fit into this vector's size type. Throws
66 /// std::length_error or calls report_fatal_error.
67 LLVM_ATTRIBUTE_NORETURN__attribute__((noreturn)) static void report_size_overflow(size_t MinSize);
68 /// Report that this vector is already at maximum capacity. Throws
69 /// std::length_error or calls report_fatal_error.
70 LLVM_ATTRIBUTE_NORETURN__attribute__((noreturn)) static void report_at_maximum_capacity();
71
72public:
73 size_t size() const { return Size; }
74 size_t capacity() const { return Capacity; }
75
76 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
39
Assuming field 'Size' is not equal to 0
40
Returning zero, which participates in a condition later
77
78 /// Set the array size to \p N, which the current array must have enough
79 /// capacity for.
80 ///
81 /// This does not construct or destroy any elements in the vector.
82 ///
83 /// Clients can use this in conjunction with capacity() to write past the end
84 /// of the buffer when they know that more elements are available, and only
85 /// update the size later. This avoids the cost of value initializing elements
86 /// which will only be overwritten.
87 void set_size(size_t N) {
88 assert(N <= capacity())((N <= capacity()) ? static_cast<void> (0) : __assert_fail
("N <= capacity()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 88, __PRETTY_FUNCTION__))
;
89 Size = N;
90 }
91};
92
93template <class T>
94using SmallVectorSizeType =
95 typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
96 uint32_t>::type;
97
98/// Figure out the offset of the first element.
99template <class T, typename = void> struct SmallVectorAlignmentAndSize {
100 AlignedCharArrayUnion<SmallVectorBase<SmallVectorSizeType<T>>> Base;
101 AlignedCharArrayUnion<T> FirstEl;
102};
103
104/// This is the part of SmallVectorTemplateBase which does not depend on whether
105/// the type T is a POD. The extra dummy template argument is used by ArrayRef
106/// to avoid unnecessarily requiring T to be complete.
107template <typename T, typename = void>
108class SmallVectorTemplateCommon
109 : public SmallVectorBase<SmallVectorSizeType<T>> {
110 using Base = SmallVectorBase<SmallVectorSizeType<T>>;
111
112 /// Find the address of the first element. For this pointer math to be valid
113 /// with small-size of 0 for T with lots of alignment, it's important that
114 /// SmallVectorStorage is properly-aligned even for small-size of 0.
115 void *getFirstEl() const {
116 return const_cast<void *>(reinterpret_cast<const void *>(
117 reinterpret_cast<const char *>(this) +
118 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
119 }
120 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
121
122protected:
123 SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
124
125 void grow_pod(size_t MinSize, size_t TSize) {
126 Base::grow_pod(getFirstEl(), MinSize, TSize);
127 }
128
129 /// Return true if this is a smallvector which has not had dynamic
130 /// memory allocated for it.
131 bool isSmall() const { return this->BeginX == getFirstEl(); }
132
133 /// Put this vector in a state of being small.
134 void resetToSmall() {
135 this->BeginX = getFirstEl();
136 this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
137 }
138
139public:
140 using size_type = size_t;
141 using difference_type = ptrdiff_t;
142 using value_type = T;
143 using iterator = T *;
144 using const_iterator = const T *;
145
146 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
147 using reverse_iterator = std::reverse_iterator<iterator>;
148
149 using reference = T &;
150 using const_reference = const T &;
151 using pointer = T *;
152 using const_pointer = const T *;
153
154 using Base::capacity;
155 using Base::empty;
156 using Base::size;
157
158 // forward iterator creation methods.
159 iterator begin() { return (iterator)this->BeginX; }
160 const_iterator begin() const { return (const_iterator)this->BeginX; }
161 iterator end() { return begin() + size(); }
162 const_iterator end() const { return begin() + size(); }
163
164 // reverse iterator creation methods.
165 reverse_iterator rbegin() { return reverse_iterator(end()); }
166 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
167 reverse_iterator rend() { return reverse_iterator(begin()); }
168 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
169
170 size_type size_in_bytes() const { return size() * sizeof(T); }
171 size_type max_size() const {
172 return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
173 }
174
175 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
176
177 /// Return a pointer to the vector's buffer, even if empty().
178 pointer data() { return pointer(begin()); }
179 /// Return a pointer to the vector's buffer, even if empty().
180 const_pointer data() const { return const_pointer(begin()); }
181
182 reference operator[](size_type idx) {
183 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 183, __PRETTY_FUNCTION__))
;
184 return begin()[idx];
185 }
186 const_reference operator[](size_type idx) const {
187 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 187, __PRETTY_FUNCTION__))
;
188 return begin()[idx];
189 }
190
191 reference front() {
192 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 192, __PRETTY_FUNCTION__))
;
193 return begin()[0];
194 }
195 const_reference front() const {
196 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 196, __PRETTY_FUNCTION__))
;
197 return begin()[0];
198 }
199
200 reference back() {
201 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 201, __PRETTY_FUNCTION__))
;
202 return end()[-1];
203 }
204 const_reference back() const {
205 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 205, __PRETTY_FUNCTION__))
;
206 return end()[-1];
207 }
208};
209
210/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
211/// method implementations that are designed to work with non-trivial T's.
212///
213/// We approximate is_trivially_copyable with trivial move/copy construction and
214/// trivial destruction. While the standard doesn't specify that you're allowed
215/// copy these types with memcpy, there is no way for the type to observe this.
216/// This catches the important case of std::pair<POD, POD>, which is not
217/// trivially assignable.
218template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
219 (is_trivially_move_constructible<T>::value) &&
220 std::is_trivially_destructible<T>::value>
221class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
222protected:
223 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
224
225 static void destroy_range(T *S, T *E) {
226 while (S != E) {
227 --E;
228 E->~T();
229 }
230 }
231
232 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
233 /// constructing elements as needed.
234 template<typename It1, typename It2>
235 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
236 std::uninitialized_copy(std::make_move_iterator(I),
237 std::make_move_iterator(E), Dest);
238 }
239
240 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
241 /// constructing elements as needed.
242 template<typename It1, typename It2>
243 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
244 std::uninitialized_copy(I, E, Dest);
245 }
246
247 /// Grow the allocated memory (without initializing new elements), doubling
248 /// the size of the allocated memory. Guarantees space for at least one more
249 /// element, or MinSize more elements if specified.
250 void grow(size_t MinSize = 0);
251
252public:
253 void push_back(const T &Elt) {
254 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
255 this->grow();
256 ::new ((void*) this->end()) T(Elt);
257 this->set_size(this->size() + 1);
258 }
259
260 void push_back(T &&Elt) {
261 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
262 this->grow();
263 ::new ((void*) this->end()) T(::std::move(Elt));
264 this->set_size(this->size() + 1);
265 }
266
267 void pop_back() {
268 this->set_size(this->size() - 1);
269 this->end()->~T();
270 }
271};
272
273// Define this out-of-line to dissuade the C++ compiler from inlining it.
274template <typename T, bool TriviallyCopyable>
275void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
276 // Ensure we can fit the new capacity.
277 // This is only going to be applicable when the capacity is 32 bit.
278 if (MinSize > this->SizeTypeMax())
279 this->report_size_overflow(MinSize);
280
281 // Ensure we can meet the guarantee of space for at least one more element.
282 // The above check alone will not catch the case where grow is called with a
283 // default MinSize of 0, but the current capacity cannot be increased.
284 // This is only going to be applicable when the capacity is 32 bit.
285 if (this->capacity() == this->SizeTypeMax())
286 this->report_at_maximum_capacity();
287
288 // Always grow, even from zero.
289 size_t NewCapacity = size_t(NextPowerOf2(this->capacity() + 2));
290 NewCapacity = std::min(std::max(NewCapacity, MinSize), this->SizeTypeMax());
291 T *NewElts = static_cast<T*>(llvm::safe_malloc(NewCapacity*sizeof(T)));
292
293 // Move the elements over.
294 this->uninitialized_move(this->begin(), this->end(), NewElts);
295
296 // Destroy the original elements.
297 destroy_range(this->begin(), this->end());
298
299 // If this wasn't grown from the inline copy, deallocate the old space.
300 if (!this->isSmall())
301 free(this->begin());
302
303 this->BeginX = NewElts;
304 this->Capacity = NewCapacity;
305}
306
307/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
308/// method implementations that are designed to work with trivially copyable
309/// T's. This allows using memcpy in place of copy/move construction and
310/// skipping destruction.
311template <typename T>
312class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
313protected:
314 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
315
316 // No need to do a destroy loop for POD's.
317 static void destroy_range(T *, T *) {}
318
319 /// Move the range [I, E) onto the uninitialized memory
320 /// starting with "Dest", constructing elements into it as needed.
321 template<typename It1, typename It2>
322 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
323 // Just do a copy.
324 uninitialized_copy(I, E, Dest);
325 }
326
327 /// Copy the range [I, E) onto the uninitialized memory
328 /// starting with "Dest", constructing elements into it as needed.
329 template<typename It1, typename It2>
330 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
331 // Arbitrary iterator types; just use the basic implementation.
332 std::uninitialized_copy(I, E, Dest);
333 }
334
335 /// Copy the range [I, E) onto the uninitialized memory
336 /// starting with "Dest", constructing elements into it as needed.
337 template <typename T1, typename T2>
338 static void uninitialized_copy(
339 T1 *I, T1 *E, T2 *Dest,
340 std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
341 T2>::value> * = nullptr) {
342 // Use memcpy for PODs iterated by pointers (which includes SmallVector
343 // iterators): std::uninitialized_copy optimizes to memmove, but we can
344 // use memcpy here. Note that I and E are iterators and thus might be
345 // invalid for memcpy if they are equal.
346 if (I != E)
347 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
348 }
349
350 /// Double the size of the allocated memory, guaranteeing space for at
351 /// least one more element or MinSize if specified.
352 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
353
354public:
355 void push_back(const T &Elt) {
356 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
357 this->grow();
358 memcpy(reinterpret_cast<void *>(this->end()), &Elt, sizeof(T));
359 this->set_size(this->size() + 1);
360 }
361
362 void pop_back() { this->set_size(this->size() - 1); }
363};
364
365/// This class consists of common code factored out of the SmallVector class to
366/// reduce code duplication based on the SmallVector 'N' template parameter.
367template <typename T>
368class SmallVectorImpl : public SmallVectorTemplateBase<T> {
369 using SuperClass = SmallVectorTemplateBase<T>;
370
371public:
372 using iterator = typename SuperClass::iterator;
373 using const_iterator = typename SuperClass::const_iterator;
374 using reference = typename SuperClass::reference;
375 using size_type = typename SuperClass::size_type;
376
377protected:
378 // Default ctor - Initialize to empty.
379 explicit SmallVectorImpl(unsigned N)
380 : SmallVectorTemplateBase<T>(N) {}
381
382public:
383 SmallVectorImpl(const SmallVectorImpl &) = delete;
384
385 ~SmallVectorImpl() {
386 // Subclass has already destructed this vector's elements.
387 // If this wasn't grown from the inline copy, deallocate the old space.
388 if (!this->isSmall())
389 free(this->begin());
390 }
391
392 void clear() {
393 this->destroy_range(this->begin(), this->end());
394 this->Size = 0;
395 }
396
397 void resize(size_type N) {
398 if (N < this->size()) {
399 this->destroy_range(this->begin()+N, this->end());
400 this->set_size(N);
401 } else if (N > this->size()) {
402 if (this->capacity() < N)
403 this->grow(N);
404 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
405 new (&*I) T();
406 this->set_size(N);
407 }
408 }
409
410 void resize(size_type N, const T &NV) {
411 if (N < this->size()) {
412 this->destroy_range(this->begin()+N, this->end());
413 this->set_size(N);
414 } else if (N > this->size()) {
415 if (this->capacity() < N)
416 this->grow(N);
417 std::uninitialized_fill(this->end(), this->begin()+N, NV);
418 this->set_size(N);
419 }
420 }
421
422 void reserve(size_type N) {
423 if (this->capacity() < N)
424 this->grow(N);
425 }
426
427 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
428 T Result = ::std::move(this->back());
429 this->pop_back();
430 return Result;
431 }
432
433 void swap(SmallVectorImpl &RHS);
434
435 /// Add the specified range to the end of the SmallVector.
436 template <typename in_iter,
437 typename = std::enable_if_t<std::is_convertible<
438 typename std::iterator_traits<in_iter>::iterator_category,
439 std::input_iterator_tag>::value>>
440 void append(in_iter in_start, in_iter in_end) {
441 size_type NumInputs = std::distance(in_start, in_end);
442 if (NumInputs > this->capacity() - this->size())
443 this->grow(this->size()+NumInputs);
444
445 this->uninitialized_copy(in_start, in_end, this->end());
446 this->set_size(this->size() + NumInputs);
447 }
448
449 /// Append \p NumInputs copies of \p Elt to the end.
450 void append(size_type NumInputs, const T &Elt) {
451 if (NumInputs > this->capacity() - this->size())
452 this->grow(this->size()+NumInputs);
453
454 std::uninitialized_fill_n(this->end(), NumInputs, Elt);
455 this->set_size(this->size() + NumInputs);
456 }
457
458 void append(std::initializer_list<T> IL) {
459 append(IL.begin(), IL.end());
460 }
461
462 // FIXME: Consider assigning over existing elements, rather than clearing &
463 // re-initializing them - for all assign(...) variants.
464
465 void assign(size_type NumElts, const T &Elt) {
466 clear();
467 if (this->capacity() < NumElts)
468 this->grow(NumElts);
469 this->set_size(NumElts);
470 std::uninitialized_fill(this->begin(), this->end(), Elt);
471 }
472
473 template <typename in_iter,
474 typename = std::enable_if_t<std::is_convertible<
475 typename std::iterator_traits<in_iter>::iterator_category,
476 std::input_iterator_tag>::value>>
477 void assign(in_iter in_start, in_iter in_end) {
478 clear();
479 append(in_start, in_end);
480 }
481
482 void assign(std::initializer_list<T> IL) {
483 clear();
484 append(IL);
485 }
486
487 iterator erase(const_iterator CI) {
488 // Just cast away constness because this is a non-const member function.
489 iterator I = const_cast<iterator>(CI);
490
491 assert(I >= this->begin() && "Iterator to erase is out of bounds.")((I >= this->begin() && "Iterator to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Iterator to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 491, __PRETTY_FUNCTION__))
;
492 assert(I < this->end() && "Erasing at past-the-end iterator.")((I < this->end() && "Erasing at past-the-end iterator."
) ? static_cast<void> (0) : __assert_fail ("I < this->end() && \"Erasing at past-the-end iterator.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 492, __PRETTY_FUNCTION__))
;
493
494 iterator N = I;
495 // Shift all elts down one.
496 std::move(I+1, this->end(), I);
497 // Drop the last elt.
498 this->pop_back();
499 return(N);
500 }
501
502 iterator erase(const_iterator CS, const_iterator CE) {
503 // Just cast away constness because this is a non-const member function.
504 iterator S = const_cast<iterator>(CS);
505 iterator E = const_cast<iterator>(CE);
506
507 assert(S >= this->begin() && "Range to erase is out of bounds.")((S >= this->begin() && "Range to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("S >= this->begin() && \"Range to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 507, __PRETTY_FUNCTION__))
;
508 assert(S <= E && "Trying to erase invalid range.")((S <= E && "Trying to erase invalid range.") ? static_cast
<void> (0) : __assert_fail ("S <= E && \"Trying to erase invalid range.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 508, __PRETTY_FUNCTION__))
;
509 assert(E <= this->end() && "Trying to erase past the end.")((E <= this->end() && "Trying to erase past the end."
) ? static_cast<void> (0) : __assert_fail ("E <= this->end() && \"Trying to erase past the end.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 509, __PRETTY_FUNCTION__))
;
510
511 iterator N = S;
512 // Shift all elts down.
513 iterator I = std::move(E, this->end(), S);
514 // Drop the last elts.
515 this->destroy_range(I, this->end());
516 this->set_size(I - this->begin());
517 return(N);
518 }
519
520 iterator insert(iterator I, T &&Elt) {
521 if (I == this->end()) { // Important special case for empty vector.
522 this->push_back(::std::move(Elt));
523 return this->end()-1;
524 }
525
526 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 526, __PRETTY_FUNCTION__))
;
527 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 527, __PRETTY_FUNCTION__))
;
528
529 if (this->size() >= this->capacity()) {
530 size_t EltNo = I-this->begin();
531 this->grow();
532 I = this->begin()+EltNo;
533 }
534
535 ::new ((void*) this->end()) T(::std::move(this->back()));
536 // Push everything else over.
537 std::move_backward(I, this->end()-1, this->end());
538 this->set_size(this->size() + 1);
539
540 // If we just moved the element we're inserting, be sure to update
541 // the reference.
542 T *EltPtr = &Elt;
543 if (I <= EltPtr && EltPtr < this->end())
544 ++EltPtr;
545
546 *I = ::std::move(*EltPtr);
547 return I;
548 }
549
550 iterator insert(iterator I, const T &Elt) {
551 if (I == this->end()) { // Important special case for empty vector.
552 this->push_back(Elt);
553 return this->end()-1;
554 }
555
556 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 556, __PRETTY_FUNCTION__))
;
557 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 557, __PRETTY_FUNCTION__))
;
558
559 if (this->size() >= this->capacity()) {
560 size_t EltNo = I-this->begin();
561 this->grow();
562 I = this->begin()+EltNo;
563 }
564 ::new ((void*) this->end()) T(std::move(this->back()));
565 // Push everything else over.
566 std::move_backward(I, this->end()-1, this->end());
567 this->set_size(this->size() + 1);
568
569 // If we just moved the element we're inserting, be sure to update
570 // the reference.
571 const T *EltPtr = &Elt;
572 if (I <= EltPtr && EltPtr < this->end())
573 ++EltPtr;
574
575 *I = *EltPtr;
576 return I;
577 }
578
579 iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
580 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
581 size_t InsertElt = I - this->begin();
582
583 if (I == this->end()) { // Important special case for empty vector.
584 append(NumToInsert, Elt);
585 return this->begin()+InsertElt;
586 }
587
588 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 588, __PRETTY_FUNCTION__))
;
589 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 589, __PRETTY_FUNCTION__))
;
590
591 // Ensure there is enough space.
592 reserve(this->size() + NumToInsert);
593
594 // Uninvalidate the iterator.
595 I = this->begin()+InsertElt;
596
597 // If there are more elements between the insertion point and the end of the
598 // range than there are being inserted, we can use a simple approach to
599 // insertion. Since we already reserved space, we know that this won't
600 // reallocate the vector.
601 if (size_t(this->end()-I) >= NumToInsert) {
602 T *OldEnd = this->end();
603 append(std::move_iterator<iterator>(this->end() - NumToInsert),
604 std::move_iterator<iterator>(this->end()));
605
606 // Copy the existing elements that get replaced.
607 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
608
609 std::fill_n(I, NumToInsert, Elt);
610 return I;
611 }
612
613 // Otherwise, we're inserting more elements than exist already, and we're
614 // not inserting at the end.
615
616 // Move over the elements that we're about to overwrite.
617 T *OldEnd = this->end();
618 this->set_size(this->size() + NumToInsert);
619 size_t NumOverwritten = OldEnd-I;
620 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
621
622 // Replace the overwritten part.
623 std::fill_n(I, NumOverwritten, Elt);
624
625 // Insert the non-overwritten middle part.
626 std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
627 return I;
628 }
629
630 template <typename ItTy,
631 typename = std::enable_if_t<std::is_convertible<
632 typename std::iterator_traits<ItTy>::iterator_category,
633 std::input_iterator_tag>::value>>
634 iterator insert(iterator I, ItTy From, ItTy To) {
635 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
636 size_t InsertElt = I - this->begin();
637
638 if (I == this->end()) { // Important special case for empty vector.
639 append(From, To);
640 return this->begin()+InsertElt;
641 }
642
643 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 643, __PRETTY_FUNCTION__))
;
644 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/ADT/SmallVector.h"
, 644, __PRETTY_FUNCTION__))
;
645
646 size_t NumToInsert = std::distance(From, To);
647
648 // Ensure there is enough space.
649 reserve(this->size() + NumToInsert);
650
651 // Uninvalidate the iterator.
652 I = this->begin()+InsertElt;
653
654 // If there are more elements between the insertion point and the end of the
655 // range than there are being inserted, we can use a simple approach to
656 // insertion. Since we already reserved space, we know that this won't
657 // reallocate the vector.
658 if (size_t(this->end()-I) >= NumToInsert) {
659 T *OldEnd = this->end();
660 append(std::move_iterator<iterator>(this->end() - NumToInsert),
661 std::move_iterator<iterator>(this->end()));
662
663 // Copy the existing elements that get replaced.
664 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
665
666 std::copy(From, To, I);
667 return I;
668 }
669
670 // Otherwise, we're inserting more elements than exist already, and we're
671 // not inserting at the end.
672
673 // Move over the elements that we're about to overwrite.
674 T *OldEnd = this->end();
675 this->set_size(this->size() + NumToInsert);
676 size_t NumOverwritten = OldEnd-I;
677 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
678
679 // Replace the overwritten part.
680 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
681 *J = *From;
682 ++J; ++From;
683 }
684
685 // Insert the non-overwritten middle part.
686 this->uninitialized_copy(From, To, OldEnd);
687 return I;
688 }
689
690 void insert(iterator I, std::initializer_list<T> IL) {
691 insert(I, IL.begin(), IL.end());
692 }
693
694 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
695 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
696 this->grow();
697 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
698 this->set_size(this->size() + 1);
699 return this->back();
700 }
701
702 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
703
704 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
705
706 bool operator==(const SmallVectorImpl &RHS) const {
707 if (this->size() != RHS.size()) return false;
708 return std::equal(this->begin(), this->end(), RHS.begin());
709 }
710 bool operator!=(const SmallVectorImpl &RHS) const {
711 return !(*this == RHS);
712 }
713
714 bool operator<(const SmallVectorImpl &RHS) const {
715 return std::lexicographical_compare(this->begin(), this->end(),
716 RHS.begin(), RHS.end());
717 }
718};
719
720template <typename T>
721void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
722 if (this == &RHS) return;
723
724 // We can only avoid copying elements if neither vector is small.
725 if (!this->isSmall() && !RHS.isSmall()) {
726 std::swap(this->BeginX, RHS.BeginX);
727 std::swap(this->Size, RHS.Size);
728 std::swap(this->Capacity, RHS.Capacity);
729 return;
730 }
731 if (RHS.size() > this->capacity())
732 this->grow(RHS.size());
733 if (this->size() > RHS.capacity())
734 RHS.grow(this->size());
735
736 // Swap the shared elements.
737 size_t NumShared = this->size();
738 if (NumShared > RHS.size()) NumShared = RHS.size();
739 for (size_type i = 0; i != NumShared; ++i)
740 std::swap((*this)[i], RHS[i]);
741
742 // Copy over the extra elts.
743 if (this->size() > RHS.size()) {
744 size_t EltDiff = this->size() - RHS.size();
745 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
746 RHS.set_size(RHS.size() + EltDiff);
747 this->destroy_range(this->begin()+NumShared, this->end());
748 this->set_size(NumShared);
749 } else if (RHS.size() > this->size()) {
750 size_t EltDiff = RHS.size() - this->size();
751 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
752 this->set_size(this->size() + EltDiff);
753 this->destroy_range(RHS.begin()+NumShared, RHS.end());
754 RHS.set_size(NumShared);
755 }
756}
757
758template <typename T>
759SmallVectorImpl<T> &SmallVectorImpl<T>::
760 operator=(const SmallVectorImpl<T> &RHS) {
761 // Avoid self-assignment.
762 if (this == &RHS) return *this;
763
764 // If we already have sufficient space, assign the common elements, then
765 // destroy any excess.
766 size_t RHSSize = RHS.size();
767 size_t CurSize = this->size();
768 if (CurSize >= RHSSize) {
769 // Assign common elements.
770 iterator NewEnd;
771 if (RHSSize)
772 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
773 else
774 NewEnd = this->begin();
775
776 // Destroy excess elements.
777 this->destroy_range(NewEnd, this->end());
778
779 // Trim.
780 this->set_size(RHSSize);
781 return *this;
782 }
783
784 // If we have to grow to have enough elements, destroy the current elements.
785 // This allows us to avoid copying them during the grow.
786 // FIXME: don't do this if they're efficiently moveable.
787 if (this->capacity() < RHSSize) {
788 // Destroy current elements.
789 this->destroy_range(this->begin(), this->end());
790 this->set_size(0);
791 CurSize = 0;
792 this->grow(RHSSize);
793 } else if (CurSize) {
794 // Otherwise, use assignment for the already-constructed elements.
795 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
796 }
797
798 // Copy construct the new elements in place.
799 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
800 this->begin()+CurSize);
801
802 // Set end.
803 this->set_size(RHSSize);
804 return *this;
805}
806
807template <typename T>
808SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
809 // Avoid self-assignment.
810 if (this == &RHS) return *this;
811
812 // If the RHS isn't small, clear this vector and then steal its buffer.
813 if (!RHS.isSmall()) {
814 this->destroy_range(this->begin(), this->end());
815 if (!this->isSmall()) free(this->begin());
816 this->BeginX = RHS.BeginX;
817 this->Size = RHS.Size;
818 this->Capacity = RHS.Capacity;
819 RHS.resetToSmall();
820 return *this;
821 }
822
823 // If we already have sufficient space, assign the common elements, then
824 // destroy any excess.
825 size_t RHSSize = RHS.size();
826 size_t CurSize = this->size();
827 if (CurSize >= RHSSize) {
828 // Assign common elements.
829 iterator NewEnd = this->begin();
830 if (RHSSize)
831 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
832
833 // Destroy excess elements and trim the bounds.
834 this->destroy_range(NewEnd, this->end());
835 this->set_size(RHSSize);
836
837 // Clear the RHS.
838 RHS.clear();
839
840 return *this;
841 }
842
843 // If we have to grow to have enough elements, destroy the current elements.
844 // This allows us to avoid copying them during the grow.
845 // FIXME: this may not actually make any sense if we can efficiently move
846 // elements.
847 if (this->capacity() < RHSSize) {
848 // Destroy current elements.
849 this->destroy_range(this->begin(), this->end());
850 this->set_size(0);
851 CurSize = 0;
852 this->grow(RHSSize);
853 } else if (CurSize) {
854 // Otherwise, use assignment for the already-constructed elements.
855 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
856 }
857
858 // Move-construct the new elements in place.
859 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
860 this->begin()+CurSize);
861
862 // Set end.
863 this->set_size(RHSSize);
864
865 RHS.clear();
866 return *this;
867}
868
869/// Storage for the SmallVector elements. This is specialized for the N=0 case
870/// to avoid allocating unnecessary storage.
871template <typename T, unsigned N>
872struct SmallVectorStorage {
873 AlignedCharArrayUnion<T> InlineElts[N];
874};
875
876/// We need the storage to be properly aligned even for small-size of 0 so that
877/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
878/// well-defined.
879template <typename T> struct alignas(alignof(T)) SmallVectorStorage<T, 0> {};
880
881/// This is a 'vector' (really, a variable-sized array), optimized
882/// for the case when the array is small. It contains some number of elements
883/// in-place, which allows it to avoid heap allocation when the actual number of
884/// elements is below that threshold. This allows normal "small" cases to be
885/// fast without losing generality for large inputs.
886///
887/// Note that this does not attempt to be exception safe.
888///
889template <typename T, unsigned N>
890class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>,
891 SmallVectorStorage<T, N> {
892public:
893 SmallVector() : SmallVectorImpl<T>(N) {}
894
895 ~SmallVector() {
896 // Destroy the constructed elements in the vector.
897 this->destroy_range(this->begin(), this->end());
898 }
899
900 explicit SmallVector(size_t Size, const T &Value = T())
901 : SmallVectorImpl<T>(N) {
902 this->assign(Size, Value);
903 }
904
905 template <typename ItTy,
906 typename = std::enable_if_t<std::is_convertible<
907 typename std::iterator_traits<ItTy>::iterator_category,
908 std::input_iterator_tag>::value>>
909 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
910 this->append(S, E);
911 }
912
913 template <typename RangeTy>
914 explicit SmallVector(const iterator_range<RangeTy> &R)
915 : SmallVectorImpl<T>(N) {
916 this->append(R.begin(), R.end());
917 }
918
919 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
920 this->assign(IL);
921 }
922
923 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
924 if (!RHS.empty())
925 SmallVectorImpl<T>::operator=(RHS);
926 }
927
928 const SmallVector &operator=(const SmallVector &RHS) {
929 SmallVectorImpl<T>::operator=(RHS);
930 return *this;
931 }
932
933 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
934 if (!RHS.empty())
935 SmallVectorImpl<T>::operator=(::std::move(RHS));
936 }
937
938 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
939 if (!RHS.empty())
940 SmallVectorImpl<T>::operator=(::std::move(RHS));
941 }
942
943 const SmallVector &operator=(SmallVector &&RHS) {
944 SmallVectorImpl<T>::operator=(::std::move(RHS));
945 return *this;
946 }
947
948 const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
949 SmallVectorImpl<T>::operator=(::std::move(RHS));
950 return *this;
951 }
952
953 const SmallVector &operator=(std::initializer_list<T> IL) {
954 this->assign(IL);
955 return *this;
956 }
957};
958
959template <typename T, unsigned N>
960inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
961 return X.capacity_in_bytes();
962}
963
964/// Given a range of type R, iterate the entire range and return a
965/// SmallVector with elements of the vector. This is useful, for example,
966/// when you want to iterate a range and then sort the results.
967template <unsigned Size, typename R>
968SmallVector<typename std::remove_const<typename std::remove_reference<
969 decltype(*std::begin(std::declval<R &>()))>::type>::type,
970 Size>
971to_vector(R &&Range) {
972 return {std::begin(Range), std::end(Range)};
973}
974
975} // end namespace llvm
976
977namespace std {
978
979 /// Implement std::swap in terms of SmallVector swap.
980 template<typename T>
981 inline void
982 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
983 LHS.swap(RHS);
984 }
985
986 /// Implement std::swap in terms of SmallVector swap.
987 template<typename T, unsigned N>
988 inline void
989 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
990 LHS.swap(RHS);
991 }
992
993} // end namespace std
994
995#endif // LLVM_ADT_SMALLVECTOR_H