Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
Warning:line 375, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUPromoteAlloca.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/= -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-03-11-015528-30204-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

/build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

1//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass eliminates allocas by either converting them into vectors or
10// by migrating them to local address space.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "GCNSubtarget.h"
16#include "Utils/AMDGPUBaseInfo.h"
17#include "llvm/Analysis/CaptureTracking.h"
18#include "llvm/Analysis/ValueTracking.h"
19#include "llvm/CodeGen/TargetPassConfig.h"
20#include "llvm/IR/IRBuilder.h"
21#include "llvm/IR/IntrinsicInst.h"
22#include "llvm/IR/IntrinsicsAMDGPU.h"
23#include "llvm/IR/IntrinsicsR600.h"
24#include "llvm/Pass.h"
25#include "llvm/Target/TargetMachine.h"
26
27#define DEBUG_TYPE"amdgpu-promote-alloca" "amdgpu-promote-alloca"
28
29using namespace llvm;
30
31namespace {
32
33static cl::opt<bool> DisablePromoteAllocaToVector(
34 "disable-promote-alloca-to-vector",
35 cl::desc("Disable promote alloca to vector"),
36 cl::init(false));
37
38static cl::opt<bool> DisablePromoteAllocaToLDS(
39 "disable-promote-alloca-to-lds",
40 cl::desc("Disable promote alloca to LDS"),
41 cl::init(false));
42
43static cl::opt<unsigned> PromoteAllocaToVectorLimit(
44 "amdgpu-promote-alloca-to-vector-limit",
45 cl::desc("Maximum byte size to consider promote alloca to vector"),
46 cl::init(0));
47
48// FIXME: This can create globals so should be a module pass.
49class AMDGPUPromoteAlloca : public FunctionPass {
50public:
51 static char ID;
52
53 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
54
55 bool runOnFunction(Function &F) override;
56
57 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
58
59 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
60
61 void getAnalysisUsage(AnalysisUsage &AU) const override {
62 AU.setPreservesCFG();
63 FunctionPass::getAnalysisUsage(AU);
64 }
65};
66
67class AMDGPUPromoteAllocaImpl {
68private:
69 const TargetMachine &TM;
70 Module *Mod = nullptr;
71 const DataLayout *DL = nullptr;
72
73 // FIXME: This should be per-kernel.
74 uint32_t LocalMemLimit = 0;
75 uint32_t CurrentLocalMemUsage = 0;
76 unsigned MaxVGPRs;
77
78 bool IsAMDGCN = false;
79 bool IsAMDHSA = false;
80
81 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
82 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
83
84 /// BaseAlloca is the alloca root the search started from.
85 /// Val may be that alloca or a recursive user of it.
86 bool collectUsesWithPtrTypes(Value *BaseAlloca,
87 Value *Val,
88 std::vector<Value*> &WorkList) const;
89
90 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
91 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
92 /// Returns true if both operands are derived from the same alloca. Val should
93 /// be the same value as one of the input operands of UseInst.
94 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
95 Instruction *UseInst,
96 int OpIdx0, int OpIdx1) const;
97
98 /// Check whether we have enough local memory for promotion.
99 bool hasSufficientLocalMem(const Function &F);
100
101 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
102
103public:
104 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {}
105 bool run(Function &F);
106};
107
108class AMDGPUPromoteAllocaToVector : public FunctionPass {
109public:
110 static char ID;
111
112 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
113
114 bool runOnFunction(Function &F) override;
115
116 StringRef getPassName() const override {
117 return "AMDGPU Promote Alloca to vector";
118 }
119
120 void getAnalysisUsage(AnalysisUsage &AU) const override {
121 AU.setPreservesCFG();
122 FunctionPass::getAnalysisUsage(AU);
123 }
124};
125
126} // end anonymous namespace
127
128char AMDGPUPromoteAlloca::ID = 0;
129char AMDGPUPromoteAllocaToVector::ID = 0;
130
131INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) {
132 "AMDGPU promote alloca to vector or LDS", false, false)static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) {
133// Move LDS uses from functions to kernels before promote alloca for accurate
134// estimation of LDS available
135INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS)initializeAMDGPULowerModuleLDSPass(Registry);
136INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
137 "AMDGPU promote alloca to vector or LDS", false, false)PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
138
139INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
140 "AMDGPU promote alloca to vector", false, false)static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
141
142char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
143char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
144
145bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
146 if (skipFunction(F))
147 return false;
148
149 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
150 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F);
151 }
152 return false;
153}
154
155PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
156 FunctionAnalysisManager &AM) {
157 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F);
158 if (Changed) {
159 PreservedAnalyses PA;
160 PA.preserveSet<CFGAnalyses>();
161 return PA;
162 }
163 return PreservedAnalyses::all();
164}
165
166bool AMDGPUPromoteAllocaImpl::run(Function &F) {
167 Mod = F.getParent();
168 DL = &Mod->getDataLayout();
169
170 const Triple &TT = TM.getTargetTriple();
171 IsAMDGCN = TT.getArch() == Triple::amdgcn;
172 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
173
174 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
175 if (!ST.isPromoteAllocaEnabled())
176 return false;
177
178 if (IsAMDGCN) {
179 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
180 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
181 // A non-entry function has only 32 caller preserved registers.
182 // Do not promote alloca which will force spilling.
183 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
184 MaxVGPRs = std::min(MaxVGPRs, 32u);
185 } else {
186 MaxVGPRs = 128;
187 }
188
189 bool SufficientLDS = hasSufficientLocalMem(F);
190 bool Changed = false;
191 BasicBlock &EntryBB = *F.begin();
192
193 SmallVector<AllocaInst *, 16> Allocas;
194 for (Instruction &I : EntryBB) {
195 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
196 Allocas.push_back(AI);
197 }
198
199 for (AllocaInst *AI : Allocas) {
200 if (handleAlloca(*AI, SufficientLDS))
201 Changed = true;
202 }
203
204 return Changed;
205}
206
207std::pair<Value *, Value *>
208AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
209 Function &F = *Builder.GetInsertBlock()->getParent();
210 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
211
212 if (!IsAMDHSA) {
213 Function *LocalSizeYFn
214 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
215 Function *LocalSizeZFn
216 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
217
218 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
219 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
220
221 ST.makeLIDRangeMetadata(LocalSizeY);
222 ST.makeLIDRangeMetadata(LocalSizeZ);
223
224 return std::make_pair(LocalSizeY, LocalSizeZ);
225 }
226
227 // We must read the size out of the dispatch pointer.
228 assert(IsAMDGCN)(static_cast <bool> (IsAMDGCN) ? void (0) : __assert_fail
("IsAMDGCN", "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 228, __extension__ __PRETTY_FUNCTION__))
;
229
230 // We are indexing into this struct, and want to extract the workgroup_size_*
231 // fields.
232 //
233 // typedef struct hsa_kernel_dispatch_packet_s {
234 // uint16_t header;
235 // uint16_t setup;
236 // uint16_t workgroup_size_x ;
237 // uint16_t workgroup_size_y;
238 // uint16_t workgroup_size_z;
239 // uint16_t reserved0;
240 // uint32_t grid_size_x ;
241 // uint32_t grid_size_y ;
242 // uint32_t grid_size_z;
243 //
244 // uint32_t private_segment_size;
245 // uint32_t group_segment_size;
246 // uint64_t kernel_object;
247 //
248 // #ifdef HSA_LARGE_MODEL
249 // void *kernarg_address;
250 // #elif defined HSA_LITTLE_ENDIAN
251 // void *kernarg_address;
252 // uint32_t reserved1;
253 // #else
254 // uint32_t reserved1;
255 // void *kernarg_address;
256 // #endif
257 // uint64_t reserved2;
258 // hsa_signal_t completion_signal; // uint64_t wrapper
259 // } hsa_kernel_dispatch_packet_t
260 //
261 Function *DispatchPtrFn
262 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
263
264 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
265 DispatchPtr->addRetAttr(Attribute::NoAlias);
266 DispatchPtr->addRetAttr(Attribute::NonNull);
267 F.removeFnAttr("amdgpu-no-dispatch-ptr");
268
269 // Size of the dispatch packet struct.
270 DispatchPtr->addDereferenceableRetAttr(64);
271
272 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
273 Value *CastDispatchPtr = Builder.CreateBitCast(
274 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
275
276 // We could do a single 64-bit load here, but it's likely that the basic
277 // 32-bit and extract sequence is already present, and it is probably easier
278 // to CSE this. The loads should be mergeable later anyway.
279 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
280 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
281
282 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
283 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
284
285 MDNode *MD = MDNode::get(Mod->getContext(), None);
286 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
287 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
288 ST.makeLIDRangeMetadata(LoadZU);
289
290 // Extract y component. Upper half of LoadZU should be zero already.
291 Value *Y = Builder.CreateLShr(LoadXY, 16);
292
293 return std::make_pair(Y, LoadZU);
294}
295
296Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
297 unsigned N) {
298 Function *F = Builder.GetInsertBlock()->getParent();
299 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
300 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
301 StringRef AttrName;
302
303 switch (N) {
304 case 0:
305 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
306 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
307 AttrName = "amdgpu-no-workitem-id-x";
308 break;
309 case 1:
310 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
311 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
312 AttrName = "amdgpu-no-workitem-id-y";
313 break;
314
315 case 2:
316 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
317 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
318 AttrName = "amdgpu-no-workitem-id-z";
319 break;
320 default:
321 llvm_unreachable("invalid dimension")::llvm::llvm_unreachable_internal("invalid dimension", "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 321)
;
322 }
323
324 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
325 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
326 ST.makeLIDRangeMetadata(CI);
327 F->removeFnAttr(AttrName);
328
329 return CI;
330}
331
332static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
333 return FixedVectorType::get(ArrayTy->getElementType(),
334 ArrayTy->getNumElements());
335}
336
337static Value *stripBitcasts(Value *V) {
338 while (Instruction *I = dyn_cast<Instruction>(V)) {
339 if (I->getOpcode() != Instruction::BitCast)
340 break;
341 V = I->getOperand(0);
342 }
343 return V;
344}
345
346static Value *
347calculateVectorIndex(Value *Ptr,
348 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
349 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(stripBitcasts(Ptr));
350 if (!GEP)
351 return nullptr;
352
353 auto I = GEPIdx.find(GEP);
354 return I == GEPIdx.end() ? nullptr : I->second;
355}
356
357static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
358 // FIXME we only support simple cases
359 if (GEP->getNumOperands() != 3)
360 return nullptr;
361
362 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
363 if (!I0 || !I0->isZero())
364 return nullptr;
365
366 return GEP->getOperand(2);
367}
368
369// Not an instruction handled below to turn into a vector.
370//
371// TODO: Check isTriviallyVectorizable for calls and handle other
372// instructions.
373static bool canVectorizeInst(Instruction *Inst, User *User,
374 const DataLayout &DL) {
375 switch (Inst->getOpcode()) {
46
Called C++ object pointer is null
376 case Instruction::Load: {
377 // Currently only handle the case where the Pointer Operand is a GEP.
378 // Also we could not vectorize volatile or atomic loads.
379 LoadInst *LI = cast<LoadInst>(Inst);
380 if (isa<AllocaInst>(User) &&
381 LI->getPointerOperandType() == User->getType() &&
382 isa<VectorType>(LI->getType()))
383 return true;
384
385 Instruction *PtrInst = dyn_cast<Instruction>(LI->getPointerOperand());
386 if (!PtrInst)
387 return false;
388
389 return (PtrInst->getOpcode() == Instruction::GetElementPtr ||
390 PtrInst->getOpcode() == Instruction::BitCast) &&
391 LI->isSimple();
392 }
393 case Instruction::BitCast:
394 return true;
395 case Instruction::Store: {
396 // Must be the stored pointer operand, not a stored value, plus
397 // since it should be canonical form, the User should be a GEP.
398 // Also we could not vectorize volatile or atomic stores.
399 StoreInst *SI = cast<StoreInst>(Inst);
400 if (isa<AllocaInst>(User) &&
401 SI->getPointerOperandType() == User->getType() &&
402 isa<VectorType>(SI->getValueOperand()->getType()))
403 return true;
404
405 Instruction *UserInst = dyn_cast<Instruction>(User);
406 if (!UserInst)
407 return false;
408
409 return (SI->getPointerOperand() == User) &&
410 (UserInst->getOpcode() == Instruction::GetElementPtr ||
411 UserInst->getOpcode() == Instruction::BitCast) &&
412 SI->isSimple();
413 }
414 default:
415 return false;
416 }
417}
418
419static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL,
420 unsigned MaxVGPRs) {
421
422 if (DisablePromoteAllocaToVector) {
19
Assuming the condition is false
20
Taking false branch
423 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Promotion alloca to vector is disabled\n"
; } } while (false)
;
424 return false;
425 }
426
427 Type *AllocaTy = Alloca->getAllocatedType();
428 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
21
Assuming 'AllocaTy' is not a 'FixedVectorType'
429 if (auto *ArrayTy
22.1
'ArrayTy' is non-null
22.1
'ArrayTy' is non-null
= dyn_cast<ArrayType>(AllocaTy)) {
22
Assuming 'AllocaTy' is a 'ArrayType'
430 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
23
Assuming the condition is true
25
Taking true branch
431 ArrayTy->getNumElements() > 0)
24
Assuming the condition is true
432 VectorTy = arrayTypeToVecType(ArrayTy);
433 }
434
435 // Use up to 1/4 of available register budget for vectorization.
436 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
26
Assuming the condition is false
27
'?' condition is false
437 : (MaxVGPRs * 32);
438
439 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
28
Assuming the condition is false
440 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
441 << MaxVGPRs << " registers available\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
;
442 return false;
443 }
444
445 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Alloca candidate for vectorization\n"
; } } while (false)
;
29
Taking false branch
30
Assuming 'DebugFlag' is false
446
447 // FIXME: There is no reason why we can't support larger arrays, we
448 // are just being conservative for now.
449 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
450 // could also be promoted but we don't currently handle this case
451 if (!VectorTy || VectorTy->getNumElements() > 16 ||
31
Assuming 'VectorTy' is non-null
32
Assuming the condition is false
34
Taking false branch
452 VectorTy->getNumElements() < 2) {
33
Assuming the condition is false
453 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot convert type to vector\n"
; } } while (false)
;
454 return false;
455 }
456
457 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
458 std::vector<Value *> WorkList;
459 SmallVector<User *, 8> Users(Alloca->users());
460 SmallVector<User *, 8> UseUsers(Users.size(), Alloca);
461 Type *VecEltTy = VectorTy->getElementType();
462 while (!Users.empty()) {
35
Calling 'SmallVectorBase::empty'
38
Returning from 'SmallVectorBase::empty'
39
Loop condition is true. Entering loop body
463 User *AllocaUser = Users.pop_back_val();
464 User *UseUser = UseUsers.pop_back_val();
465 Instruction *Inst = dyn_cast<Instruction>(AllocaUser);
40
Assuming 'AllocaUser' is not a 'Instruction'
41
'Inst' initialized to a null pointer value
466
467 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
42
Assuming 'AllocaUser' is not a 'GetElementPtrInst'
468 if (!GEP
42.1
'GEP' is null
42.1
'GEP' is null
) {
43
Taking true branch
469 if (!canVectorizeInst(Inst, UseUser, DL))
44
Passing null pointer value via 1st parameter 'Inst'
45
Calling 'canVectorizeInst'
470 return false;
471
472 if (Inst->getOpcode() == Instruction::BitCast) {
473 Type *FromTy = Inst->getOperand(0)->getType()->getPointerElementType();
474 Type *ToTy = Inst->getType()->getPointerElementType();
475 if (FromTy->isAggregateType() || ToTy->isAggregateType() ||
476 DL.getTypeSizeInBits(FromTy) != DL.getTypeSizeInBits(ToTy))
477 continue;
478
479 for (User *CastUser : Inst->users()) {
480 if (isAssumeLikeIntrinsic(cast<Instruction>(CastUser)))
481 continue;
482 Users.push_back(CastUser);
483 UseUsers.push_back(Inst);
484 }
485
486 continue;
487 }
488
489 WorkList.push_back(AllocaUser);
490 continue;
491 }
492
493 Value *Index = GEPToVectorIndex(GEP);
494
495 // If we can't compute a vector index from this GEP, then we can't
496 // promote this alloca to vector.
497 if (!Index) {
498 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEPdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
499 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
;
500 return false;
501 }
502
503 GEPVectorIdx[GEP] = Index;
504 Users.append(GEP->user_begin(), GEP->user_end());
505 UseUsers.append(GEP->getNumUses(), GEP);
506 }
507
508 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
509 << *VectorTy << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
;
510
511 for (Value *V : WorkList) {
512 Instruction *Inst = cast<Instruction>(V);
513 IRBuilder<> Builder(Inst);
514 switch (Inst->getOpcode()) {
515 case Instruction::Load: {
516 if (Inst->getType() == AllocaTy || Inst->getType()->isVectorTy())
517 break;
518
519 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
520 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
521 if (!Index)
522 break;
523
524 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
525 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
526 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
527 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
528 if (Inst->getType() != VecEltTy)
529 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
530 Inst->replaceAllUsesWith(ExtractElement);
531 Inst->eraseFromParent();
532 break;
533 }
534 case Instruction::Store: {
535 StoreInst *SI = cast<StoreInst>(Inst);
536 if (SI->getValueOperand()->getType() == AllocaTy ||
537 SI->getValueOperand()->getType()->isVectorTy())
538 break;
539
540 Value *Ptr = SI->getPointerOperand();
541 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
542 if (!Index)
543 break;
544
545 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
546 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
547 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
548 Value *Elt = SI->getValueOperand();
549 if (Elt->getType() != VecEltTy)
550 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
551 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
552 Builder.CreateStore(NewVecValue, BitCast);
553 Inst->eraseFromParent();
554 break;
555 }
556
557 default:
558 llvm_unreachable("Inconsistency in instructions promotable to vector")::llvm::llvm_unreachable_internal("Inconsistency in instructions promotable to vector"
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 558)
;
559 }
560 }
561 return true;
562}
563
564static bool isCallPromotable(CallInst *CI) {
565 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
566 if (!II)
567 return false;
568
569 switch (II->getIntrinsicID()) {
570 case Intrinsic::memcpy:
571 case Intrinsic::memmove:
572 case Intrinsic::memset:
573 case Intrinsic::lifetime_start:
574 case Intrinsic::lifetime_end:
575 case Intrinsic::invariant_start:
576 case Intrinsic::invariant_end:
577 case Intrinsic::launder_invariant_group:
578 case Intrinsic::strip_invariant_group:
579 case Intrinsic::objectsize:
580 return true;
581 default:
582 return false;
583 }
584}
585
586bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
587 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
588 int OpIdx1) const {
589 // Figure out which operand is the one we might not be promoting.
590 Value *OtherOp = Inst->getOperand(OpIdx0);
591 if (Val == OtherOp)
592 OtherOp = Inst->getOperand(OpIdx1);
593
594 if (isa<ConstantPointerNull>(OtherOp))
595 return true;
596
597 Value *OtherObj = getUnderlyingObject(OtherOp);
598 if (!isa<AllocaInst>(OtherObj))
599 return false;
600
601 // TODO: We should be able to replace undefs with the right pointer type.
602
603 // TODO: If we know the other base object is another promotable
604 // alloca, not necessarily this alloca, we can do this. The
605 // important part is both must have the same address space at
606 // the end.
607 if (OtherObj != BaseAlloca) {
608 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
609 dbgs() << "Found a binary instruction with another alloca object\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
;
610 return false;
611 }
612
613 return true;
614}
615
616bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
617 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
618
619 for (User *User : Val->users()) {
620 if (is_contained(WorkList, User))
621 continue;
622
623 if (CallInst *CI = dyn_cast<CallInst>(User)) {
624 if (!isCallPromotable(CI))
625 return false;
626
627 WorkList.push_back(User);
628 continue;
629 }
630
631 Instruction *UseInst = cast<Instruction>(User);
632 if (UseInst->getOpcode() == Instruction::PtrToInt)
633 return false;
634
635 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
636 if (LI->isVolatile())
637 return false;
638
639 continue;
640 }
641
642 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
643 if (SI->isVolatile())
644 return false;
645
646 // Reject if the stored value is not the pointer operand.
647 if (SI->getPointerOperand() != Val)
648 return false;
649 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
650 if (RMW->isVolatile())
651 return false;
652 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
653 if (CAS->isVolatile())
654 return false;
655 }
656
657 // Only promote a select if we know that the other select operand
658 // is from another pointer that will also be promoted.
659 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
660 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
661 return false;
662
663 // May need to rewrite constant operands.
664 WorkList.push_back(ICmp);
665 }
666
667 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
668 // Give up if the pointer may be captured.
669 if (PointerMayBeCaptured(UseInst, true, true))
670 return false;
671 // Don't collect the users of this.
672 WorkList.push_back(User);
673 continue;
674 }
675
676 // Do not promote vector/aggregate type instructions. It is hard to track
677 // their users.
678 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
679 return false;
680
681 if (!User->getType()->isPointerTy())
682 continue;
683
684 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
685 // Be conservative if an address could be computed outside the bounds of
686 // the alloca.
687 if (!GEP->isInBounds())
688 return false;
689 }
690
691 // Only promote a select if we know that the other select operand is from
692 // another pointer that will also be promoted.
693 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
694 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
695 return false;
696 }
697
698 // Repeat for phis.
699 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
700 // TODO: Handle more complex cases. We should be able to replace loops
701 // over arrays.
702 switch (Phi->getNumIncomingValues()) {
703 case 1:
704 break;
705 case 2:
706 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
707 return false;
708 break;
709 default:
710 return false;
711 }
712 }
713
714 WorkList.push_back(User);
715 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
716 return false;
717 }
718
719 return true;
720}
721
722bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
723
724 FunctionType *FTy = F.getFunctionType();
725 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
726
727 // If the function has any arguments in the local address space, then it's
728 // possible these arguments require the entire local memory space, so
729 // we cannot use local memory in the pass.
730 for (Type *ParamTy : FTy->params()) {
731 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
732 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
733 LocalMemLimit = 0;
734 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
735 "local memory disabled.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
;
736 return false;
737 }
738 }
739
740 LocalMemLimit = ST.getLocalMemorySize();
741 if (LocalMemLimit == 0)
742 return false;
743
744 SmallVector<const Constant *, 16> Stack;
745 SmallPtrSet<const Constant *, 8> VisitedConstants;
746 SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
747
748 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
749 for (const User *U : Val->users()) {
750 if (const Instruction *Use = dyn_cast<Instruction>(U)) {
751 if (Use->getParent()->getParent() == &F)
752 return true;
753 } else {
754 const Constant *C = cast<Constant>(U);
755 if (VisitedConstants.insert(C).second)
756 Stack.push_back(C);
757 }
758 }
759
760 return false;
761 };
762
763 for (GlobalVariable &GV : Mod->globals()) {
764 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
765 continue;
766
767 if (visitUsers(&GV, &GV)) {
768 UsedLDS.insert(&GV);
769 Stack.clear();
770 continue;
771 }
772
773 // For any ConstantExpr uses, we need to recursively search the users until
774 // we see a function.
775 while (!Stack.empty()) {
776 const Constant *C = Stack.pop_back_val();
777 if (visitUsers(&GV, C)) {
778 UsedLDS.insert(&GV);
779 Stack.clear();
780 break;
781 }
782 }
783 }
784
785 const DataLayout &DL = Mod->getDataLayout();
786 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
787 AllocatedSizes.reserve(UsedLDS.size());
788
789 for (const GlobalVariable *GV : UsedLDS) {
790 Align Alignment =
791 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
792 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
793
794 // HIP uses an extern unsized array in local address space for dynamically
795 // allocated shared memory. In that case, we have to disable the promotion.
796 if (GV->hasExternalLinkage() && AllocSize == 0) {
797 LocalMemLimit = 0;
798 LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has a reference to externally allocated "
"local memory. Promoting to local memory " "disabled.\n"; } }
while (false)
799 "local memory. Promoting to local memory "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has a reference to externally allocated "
"local memory. Promoting to local memory " "disabled.\n"; } }
while (false)
800 "disabled.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has a reference to externally allocated "
"local memory. Promoting to local memory " "disabled.\n"; } }
while (false)
;
801 return false;
802 }
803
804 AllocatedSizes.emplace_back(AllocSize, Alignment);
805 }
806
807 // Sort to try to estimate the worst case alignment padding
808 //
809 // FIXME: We should really do something to fix the addresses to a more optimal
810 // value instead
811 llvm::sort(AllocatedSizes, [](std::pair<uint64_t, Align> LHS,
812 std::pair<uint64_t, Align> RHS) {
813 return LHS.second < RHS.second;
814 });
815
816 // Check how much local memory is being used by global objects
817 CurrentLocalMemUsage = 0;
818
819 // FIXME: Try to account for padding here. The real padding and address is
820 // currently determined from the inverse order of uses in the function when
821 // legalizing, which could also potentially change. We try to estimate the
822 // worst case here, but we probably should fix the addresses earlier.
823 for (auto Alloc : AllocatedSizes) {
824 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
825 CurrentLocalMemUsage += Alloc.first;
826 }
827
828 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
829 F);
830
831 // Restrict local memory usage so that we don't drastically reduce occupancy,
832 // unless it is already significantly reduced.
833
834 // TODO: Have some sort of hint or other heuristics to guess occupancy based
835 // on other factors..
836 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
837 if (OccupancyHint == 0)
838 OccupancyHint = 7;
839
840 // Clamp to max value.
841 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
842
843 // Check the hint but ignore it if it's obviously wrong from the existing LDS
844 // usage.
845 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
846
847
848 // Round up to the next tier of usage.
849 unsigned MaxSizeWithWaveCount
850 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
851
852 // Program is possibly broken by using more local mem than available.
853 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
854 return false;
855
856 LocalMemLimit = MaxSizeWithWaveCount;
857
858 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsagedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
859 << " bytes of LDS\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
860 << " Rounding size to " << MaxSizeWithWaveCountdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
861 << " with a maximum occupancy of " << MaxOccupancy << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
862 << " and " << (LocalMemLimit - CurrentLocalMemUsage)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
863 << " available for promotion\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
;
864
865 return true;
866}
867
868// FIXME: Should try to pick the most likely to be profitable allocas first.
869bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) {
870 // Array allocations are probably not worth handling, since an allocation of
871 // the array type is the canonical form.
872 if (!I.isStaticAlloca() || I.isArrayAllocation())
873 return false;
874
875 const DataLayout &DL = Mod->getDataLayout();
876 IRBuilder<> Builder(&I);
877
878 // First try to replace the alloca with a vector
879 Type *AllocaTy = I.getAllocatedType();
880
881 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
882
883 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
884 return true; // Promoted to vector.
885
886 if (DisablePromoteAllocaToLDS)
887 return false;
888
889 const Function &ContainingFunction = *I.getParent()->getParent();
890 CallingConv::ID CC = ContainingFunction.getCallingConv();
891
892 // Don't promote the alloca to LDS for shader calling conventions as the work
893 // item ID intrinsics are not supported for these calling conventions.
894 // Furthermore not all LDS is available for some of the stages.
895 switch (CC) {
896 case CallingConv::AMDGPU_KERNEL:
897 case CallingConv::SPIR_KERNEL:
898 break;
899 default:
900 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
901 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
902 << " promote alloca to LDS not supported with calling convention.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
;
903 return false;
904 }
905
906 // Not likely to have sufficient local memory for promotion.
907 if (!SufficientLDS)
908 return false;
909
910 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
911 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
912
913 Align Alignment =
914 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
915
916 // FIXME: This computed padding is likely wrong since it depends on inverse
917 // usage order.
918 //
919 // FIXME: It is also possible that if we're allowed to use all of the memory
920 // could end up using more than the maximum due to alignment padding.
921
922 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
923 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
924 NewSize += AllocSize;
925
926 if (NewSize > LocalMemLimit) {
927 LLVM_DEBUG(dbgs() << " " << AllocSizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
928 << " bytes of local memory not available to promote\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
;
929 return false;
930 }
931
932 CurrentLocalMemUsage = NewSize;
933
934 std::vector<Value*> WorkList;
935
936 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
937 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Do not know how to convert all uses\n"
; } } while (false)
;
938 return false;
939 }
940
941 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Promoting alloca to local memory\n"
; } } while (false)
;
942
943 Function *F = I.getParent()->getParent();
944
945 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
946 GlobalVariable *GV = new GlobalVariable(
947 *Mod, GVTy, false, GlobalValue::InternalLinkage,
948 UndefValue::get(GVTy),
949 Twine(F->getName()) + Twine('.') + I.getName(),
950 nullptr,
951 GlobalVariable::NotThreadLocal,
952 AMDGPUAS::LOCAL_ADDRESS);
953 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
954 GV->setAlignment(I.getAlign());
955
956 Value *TCntY, *TCntZ;
957
958 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
959 Value *TIdX = getWorkitemID(Builder, 0);
960 Value *TIdY = getWorkitemID(Builder, 1);
961 Value *TIdZ = getWorkitemID(Builder, 2);
962
963 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
964 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
965 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
966 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
967 TID = Builder.CreateAdd(TID, TIdZ);
968
969 Value *Indices[] = {
970 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
971 TID
972 };
973
974 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
975 I.mutateType(Offset->getType());
976 I.replaceAllUsesWith(Offset);
977 I.eraseFromParent();
978
979 SmallVector<IntrinsicInst *> DeferredIntrs;
980
981 for (Value *V : WorkList) {
982 CallInst *Call = dyn_cast<CallInst>(V);
983 if (!Call) {
984 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
985 Value *Src0 = CI->getOperand(0);
986 PointerType *NewTy = PointerType::getWithSamePointeeType(
987 cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS);
988
989 if (isa<ConstantPointerNull>(CI->getOperand(0)))
990 CI->setOperand(0, ConstantPointerNull::get(NewTy));
991
992 if (isa<ConstantPointerNull>(CI->getOperand(1)))
993 CI->setOperand(1, ConstantPointerNull::get(NewTy));
994
995 continue;
996 }
997
998 // The operand's value should be corrected on its own and we don't want to
999 // touch the users.
1000 if (isa<AddrSpaceCastInst>(V))
1001 continue;
1002
1003 PointerType *NewTy = PointerType::getWithSamePointeeType(
1004 cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS);
1005
1006 // FIXME: It doesn't really make sense to try to do this for all
1007 // instructions.
1008 V->mutateType(NewTy);
1009
1010 // Adjust the types of any constant operands.
1011 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1012 if (isa<ConstantPointerNull>(SI->getOperand(1)))
1013 SI->setOperand(1, ConstantPointerNull::get(NewTy));
1014
1015 if (isa<ConstantPointerNull>(SI->getOperand(2)))
1016 SI->setOperand(2, ConstantPointerNull::get(NewTy));
1017 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1018 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1019 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1020 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1021 }
1022 }
1023
1024 continue;
1025 }
1026
1027 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1028 Builder.SetInsertPoint(Intr);
1029 switch (Intr->getIntrinsicID()) {
1030 case Intrinsic::lifetime_start:
1031 case Intrinsic::lifetime_end:
1032 // These intrinsics are for address space 0 only
1033 Intr->eraseFromParent();
1034 continue;
1035 case Intrinsic::memcpy:
1036 case Intrinsic::memmove:
1037 // These have 2 pointer operands. In case if second pointer also needs
1038 // to be replaced we defer processing of these intrinsics until all
1039 // other values are processed.
1040 DeferredIntrs.push_back(Intr);
1041 continue;
1042 case Intrinsic::memset: {
1043 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1044 Builder.CreateMemSet(
1045 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(),
1046 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile());
1047 Intr->eraseFromParent();
1048 continue;
1049 }
1050 case Intrinsic::invariant_start:
1051 case Intrinsic::invariant_end:
1052 case Intrinsic::launder_invariant_group:
1053 case Intrinsic::strip_invariant_group:
1054 Intr->eraseFromParent();
1055 // FIXME: I think the invariant marker should still theoretically apply,
1056 // but the intrinsics need to be changed to accept pointers with any
1057 // address space.
1058 continue;
1059 case Intrinsic::objectsize: {
1060 Value *Src = Intr->getOperand(0);
1061 Function *ObjectSize = Intrinsic::getDeclaration(
1062 Mod, Intrinsic::objectsize,
1063 {Intr->getType(),
1064 PointerType::getWithSamePointeeType(
1065 cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)});
1066
1067 CallInst *NewCall = Builder.CreateCall(
1068 ObjectSize,
1069 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1070 Intr->replaceAllUsesWith(NewCall);
1071 Intr->eraseFromParent();
1072 continue;
1073 }
1074 default:
1075 Intr->print(errs());
1076 llvm_unreachable("Don't know how to promote alloca intrinsic use.")::llvm::llvm_unreachable_internal("Don't know how to promote alloca intrinsic use."
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 1076)
;
1077 }
1078 }
1079
1080 for (IntrinsicInst *Intr : DeferredIntrs) {
1081 Builder.SetInsertPoint(Intr);
1082 Intrinsic::ID ID = Intr->getIntrinsicID();
1083 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove)(static_cast <bool> (ID == Intrinsic::memcpy || ID == Intrinsic
::memmove) ? void (0) : __assert_fail ("ID == Intrinsic::memcpy || ID == Intrinsic::memmove"
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 1083, __extension__
__PRETTY_FUNCTION__))
;
1084
1085 MemTransferInst *MI = cast<MemTransferInst>(Intr);
1086 auto *B =
1087 Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(),
1088 MI->getRawSource(), MI->getSourceAlign(),
1089 MI->getLength(), MI->isVolatile());
1090
1091 for (unsigned I = 0; I != 2; ++I) {
1092 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1093 B->addDereferenceableParamAttr(I, Bytes);
1094 }
1095 }
1096
1097 Intr->eraseFromParent();
1098 }
1099
1100 return true;
1101}
1102
1103bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) {
1104 // Array allocations are probably not worth handling, since an allocation of
1105 // the array type is the canonical form.
1106 if (!I.isStaticAlloca() || I.isArrayAllocation())
13
Assuming the condition is false
14
Assuming the condition is false
1107 return false;
1108
1109 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
15
Taking false branch
16
Assuming 'DebugFlag' is false
17
Loop condition is false. Exiting loop
1110
1111 Module *Mod = I.getParent()->getParent()->getParent();
1112 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
18
Calling 'tryPromoteAllocaToVector'
1113}
1114
1115bool promoteAllocasToVector(Function &F, TargetMachine &TM) {
1116 if (DisablePromoteAllocaToVector)
2
Assuming the condition is false
3
Taking false branch
1117 return false;
1118
1119 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
1120 if (!ST.isPromoteAllocaEnabled())
4
Assuming the condition is false
5
Taking false branch
1121 return false;
1122
1123 unsigned MaxVGPRs;
1124 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
6
Assuming the condition is false
7
Taking false branch
1125 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
1126 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1127 // A non-entry function has only 32 caller preserved registers.
1128 // Do not promote alloca which will force spilling.
1129 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
1130 MaxVGPRs = std::min(MaxVGPRs, 32u);
1131 } else {
1132 MaxVGPRs = 128;
1133 }
1134
1135 bool Changed = false;
1136 BasicBlock &EntryBB = *F.begin();
1137
1138 SmallVector<AllocaInst *, 16> Allocas;
1139 for (Instruction &I : EntryBB) {
1140 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1141 Allocas.push_back(AI);
1142 }
1143
1144 for (AllocaInst *AI : Allocas) {
8
Assuming '__begin1' is not equal to '__end1'
1145 if (handlePromoteAllocaToVector(*AI, MaxVGPRs))
9
Taking false branch
10
Taking false branch
11
Taking false branch
12
Calling 'handlePromoteAllocaToVector'
1146 Changed = true;
1147 }
1148
1149 return Changed;
1150}
1151
1152bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) {
1153 if (skipFunction(F))
1154 return false;
1155 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
1156 return promoteAllocasToVector(F, TPC->getTM<TargetMachine>());
1157 }
1158 return false;
1159}
1160
1161PreservedAnalyses
1162AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
1163 bool Changed = promoteAllocasToVector(F, TM);
1
Calling 'promoteAllocasToVector'
1164 if (Changed) {
1165 PreservedAnalyses PA;
1166 PA.preserveSet<CFGAnalyses>();
1167 return PA;
1168 }
1169 return PreservedAnalyses::all();
1170}
1171
1172FunctionPass *llvm::createAMDGPUPromoteAlloca() {
1173 return new AMDGPUPromoteAlloca();
1174}
1175
1176FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
1177 return new AMDGPUPromoteAllocaToVector();
1178}

/build/llvm-toolchain-snapshot-15~++20220310101044+47f652d69517/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// /file
10/// This file defines the SmallVector class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_SMALLVECTOR_H
15#define LLVM_ADT_SMALLVECTOR_H
16
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/type_traits.h"
19#include <algorithm>
20#include <cassert>
21#include <cstddef>
22#include <cstdlib>
23#include <cstring>
24#include <functional>
25#include <initializer_list>
26#include <iterator>
27#include <limits>
28#include <memory>
29#include <new>
30#include <type_traits>
31#include <utility>
32
33namespace llvm {
34
35template <typename IteratorT> class iterator_range;
36
37/// This is all the stuff common to all SmallVectors.
38///
39/// The template parameter specifies the type which should be used to hold the
40/// Size and Capacity of the SmallVector, so it can be adjusted.
41/// Using 32 bit size is desirable to shrink the size of the SmallVector.
42/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
43/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
44/// buffering bitcode output - which can exceed 4GB.
45template <class Size_T> class SmallVectorBase {
46protected:
47 void *BeginX;
48 Size_T Size = 0, Capacity;
49
50 /// The maximum value of the Size_T used.
51 static constexpr size_t SizeTypeMax() {
52 return std::numeric_limits<Size_T>::max();
53 }
54
55 SmallVectorBase() = delete;
56 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
57 : BeginX(FirstEl), Capacity(TotalCapacity) {}
58
59 /// This is a helper for \a grow() that's out of line to reduce code
60 /// duplication. This function will report a fatal error if it can't grow at
61 /// least to \p MinSize.
62 void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity);
63
64 /// This is an implementation of the grow() method which only works
65 /// on POD-like data types and is out of line to reduce code duplication.
66 /// This function will report a fatal error if it cannot increase capacity.
67 void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
68
69public:
70 size_t size() const { return Size; }
71 size_t capacity() const { return Capacity; }
72
73 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
36
Assuming field 'Size' is not equal to 0
37
Returning zero, which participates in a condition later
74
75protected:
76 /// Set the array size to \p N, which the current array must have enough
77 /// capacity for.
78 ///
79 /// This does not construct or destroy any elements in the vector.
80 void set_size(size_t N) {
81 assert(N <= capacity())(static_cast <bool> (N <= capacity()) ? void (0) : __assert_fail
("N <= capacity()", "llvm/include/llvm/ADT/SmallVector.h"
, 81, __extension__ __PRETTY_FUNCTION__))
;
82 Size = N;
83 }
84};
85
86template <class T>
87using SmallVectorSizeType =
88 typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
89 uint32_t>::type;
90
91/// Figure out the offset of the first element.
92template <class T, typename = void> struct SmallVectorAlignmentAndSize {
93 alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
94 SmallVectorBase<SmallVectorSizeType<T>>)];
95 alignas(T) char FirstEl[sizeof(T)];
96};
97
98/// This is the part of SmallVectorTemplateBase which does not depend on whether
99/// the type T is a POD. The extra dummy template argument is used by ArrayRef
100/// to avoid unnecessarily requiring T to be complete.
101template <typename T, typename = void>
102class SmallVectorTemplateCommon
103 : public SmallVectorBase<SmallVectorSizeType<T>> {
104 using Base = SmallVectorBase<SmallVectorSizeType<T>>;
105
106 /// Find the address of the first element. For this pointer math to be valid
107 /// with small-size of 0 for T with lots of alignment, it's important that
108 /// SmallVectorStorage is properly-aligned even for small-size of 0.
109 void *getFirstEl() const {
110 return const_cast<void *>(reinterpret_cast<const void *>(
111 reinterpret_cast<const char *>(this) +
112 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
113 }
114 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
115
116protected:
117 SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
118
119 void grow_pod(size_t MinSize, size_t TSize) {
120 Base::grow_pod(getFirstEl(), MinSize, TSize);
121 }
122
123 /// Return true if this is a smallvector which has not had dynamic
124 /// memory allocated for it.
125 bool isSmall() const { return this->BeginX == getFirstEl(); }
126
127 /// Put this vector in a state of being small.
128 void resetToSmall() {
129 this->BeginX = getFirstEl();
130 this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
131 }
132
133 /// Return true if V is an internal reference to the given range.
134 bool isReferenceToRange(const void *V, const void *First, const void *Last) const {
135 // Use std::less to avoid UB.
136 std::less<> LessThan;
137 return !LessThan(V, First) && LessThan(V, Last);
138 }
139
140 /// Return true if V is an internal reference to this vector.
141 bool isReferenceToStorage(const void *V) const {
142 return isReferenceToRange(V, this->begin(), this->end());
143 }
144
145 /// Return true if First and Last form a valid (possibly empty) range in this
146 /// vector's storage.
147 bool isRangeInStorage(const void *First, const void *Last) const {
148 // Use std::less to avoid UB.
149 std::less<> LessThan;
150 return !LessThan(First, this->begin()) && !LessThan(Last, First) &&
151 !LessThan(this->end(), Last);
152 }
153
154 /// Return true unless Elt will be invalidated by resizing the vector to
155 /// NewSize.
156 bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
157 // Past the end.
158 if (LLVM_LIKELY(!isReferenceToStorage(Elt))__builtin_expect((bool)(!isReferenceToStorage(Elt)), true))
159 return true;
160
161 // Return false if Elt will be destroyed by shrinking.
162 if (NewSize <= this->size())
163 return Elt < this->begin() + NewSize;
164
165 // Return false if we need to grow.
166 return NewSize <= this->capacity();
167 }
168
169 /// Check whether Elt will be invalidated by resizing the vector to NewSize.
170 void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
171 assert(isSafeToReferenceAfterResize(Elt, NewSize) &&(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "llvm/include/llvm/ADT/SmallVector.h", 173, __extension__ __PRETTY_FUNCTION__
))
172 "Attempting to reference an element of the vector in an operation "(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "llvm/include/llvm/ADT/SmallVector.h", 173, __extension__ __PRETTY_FUNCTION__
))
173 "that invalidates it")(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "llvm/include/llvm/ADT/SmallVector.h", 173, __extension__ __PRETTY_FUNCTION__
))
;
174 }
175
176 /// Check whether Elt will be invalidated by increasing the size of the
177 /// vector by N.
178 void assertSafeToAdd(const void *Elt, size_t N = 1) {
179 this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
180 }
181
182 /// Check whether any part of the range will be invalidated by clearing.
183 void assertSafeToReferenceAfterClear(const T *From, const T *To) {
184 if (From == To)
185 return;
186 this->assertSafeToReferenceAfterResize(From, 0);
187 this->assertSafeToReferenceAfterResize(To - 1, 0);
188 }
189 template <
190 class ItTy,
191 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
192 bool> = false>
193 void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
194
195 /// Check whether any part of the range will be invalidated by growing.
196 void assertSafeToAddRange(const T *From, const T *To) {
197 if (From == To)
198 return;
199 this->assertSafeToAdd(From, To - From);
200 this->assertSafeToAdd(To - 1, To - From);
201 }
202 template <
203 class ItTy,
204 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
205 bool> = false>
206 void assertSafeToAddRange(ItTy, ItTy) {}
207
208 /// Reserve enough space to add one element, and return the updated element
209 /// pointer in case it was a reference to the storage.
210 template <class U>
211 static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt,
212 size_t N) {
213 size_t NewSize = This->size() + N;
214 if (LLVM_LIKELY(NewSize <= This->capacity())__builtin_expect((bool)(NewSize <= This->capacity()), true
)
)
215 return &Elt;
216
217 bool ReferencesStorage = false;
218 int64_t Index = -1;
219 if (!U::TakesParamByValue) {
220 if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))__builtin_expect((bool)(This->isReferenceToStorage(&Elt
)), false)
) {
221 ReferencesStorage = true;
222 Index = &Elt - This->begin();
223 }
224 }
225 This->grow(NewSize);
226 return ReferencesStorage ? This->begin() + Index : &Elt;
227 }
228
229public:
230 using size_type = size_t;
231 using difference_type = ptrdiff_t;
232 using value_type = T;
233 using iterator = T *;
234 using const_iterator = const T *;
235
236 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
237 using reverse_iterator = std::reverse_iterator<iterator>;
238
239 using reference = T &;
240 using const_reference = const T &;
241 using pointer = T *;
242 using const_pointer = const T *;
243
244 using Base::capacity;
245 using Base::empty;
246 using Base::size;
247
248 // forward iterator creation methods.
249 iterator begin() { return (iterator)this->BeginX; }
250 const_iterator begin() const { return (const_iterator)this->BeginX; }
251 iterator end() { return begin() + size(); }
252 const_iterator end() const { return begin() + size(); }
253
254 // reverse iterator creation methods.
255 reverse_iterator rbegin() { return reverse_iterator(end()); }
256 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
257 reverse_iterator rend() { return reverse_iterator(begin()); }
258 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
259
260 size_type size_in_bytes() const { return size() * sizeof(T); }
261 size_type max_size() const {
262 return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
263 }
264
265 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
266
267 /// Return a pointer to the vector's buffer, even if empty().
268 pointer data() { return pointer(begin()); }
269 /// Return a pointer to the vector's buffer, even if empty().
270 const_pointer data() const { return const_pointer(begin()); }
271
272 reference operator[](size_type idx) {
273 assert(idx < size())(static_cast <bool> (idx < size()) ? void (0) : __assert_fail
("idx < size()", "llvm/include/llvm/ADT/SmallVector.h", 273
, __extension__ __PRETTY_FUNCTION__))
;
274 return begin()[idx];
275 }
276 const_reference operator[](size_type idx) const {
277 assert(idx < size())(static_cast <bool> (idx < size()) ? void (0) : __assert_fail
("idx < size()", "llvm/include/llvm/ADT/SmallVector.h", 277
, __extension__ __PRETTY_FUNCTION__))
;
278 return begin()[idx];
279 }
280
281 reference front() {
282 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 282, __extension__
__PRETTY_FUNCTION__))
;
283 return begin()[0];
284 }
285 const_reference front() const {
286 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 286, __extension__
__PRETTY_FUNCTION__))
;
287 return begin()[0];
288 }
289
290 reference back() {
291 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 291, __extension__
__PRETTY_FUNCTION__))
;
292 return end()[-1];
293 }
294 const_reference back() const {
295 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 295, __extension__
__PRETTY_FUNCTION__))
;
296 return end()[-1];
297 }
298};
299
300/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
301/// method implementations that are designed to work with non-trivial T's.
302///
303/// We approximate is_trivially_copyable with trivial move/copy construction and
304/// trivial destruction. While the standard doesn't specify that you're allowed
305/// copy these types with memcpy, there is no way for the type to observe this.
306/// This catches the important case of std::pair<POD, POD>, which is not
307/// trivially assignable.
308template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
309 (is_trivially_move_constructible<T>::value) &&
310 std::is_trivially_destructible<T>::value>
311class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
312 friend class SmallVectorTemplateCommon<T>;
313
314protected:
315 static constexpr bool TakesParamByValue = false;
316 using ValueParamT = const T &;
317
318 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
319
320 static void destroy_range(T *S, T *E) {
321 while (S != E) {
322 --E;
323 E->~T();
324 }
325 }
326
327 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
328 /// constructing elements as needed.
329 template<typename It1, typename It2>
330 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
331 std::uninitialized_copy(std::make_move_iterator(I),
332 std::make_move_iterator(E), Dest);
333 }
334
335 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
336 /// constructing elements as needed.
337 template<typename It1, typename It2>
338 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
339 std::uninitialized_copy(I, E, Dest);
340 }
341
342 /// Grow the allocated memory (without initializing new elements), doubling
343 /// the size of the allocated memory. Guarantees space for at least one more
344 /// element, or MinSize more elements if specified.
345 void grow(size_t MinSize = 0);
346
347 /// Create a new allocation big enough for \p MinSize and pass back its size
348 /// in \p NewCapacity. This is the first section of \a grow().
349 T *mallocForGrow(size_t MinSize, size_t &NewCapacity) {
350 return static_cast<T *>(
351 SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow(
352 MinSize, sizeof(T), NewCapacity));
353 }
354
355 /// Move existing elements over to the new allocation \p NewElts, the middle
356 /// section of \a grow().
357 void moveElementsForGrow(T *NewElts);
358
359 /// Transfer ownership of the allocation, finishing up \a grow().
360 void takeAllocationForGrow(T *NewElts, size_t NewCapacity);
361
362 /// Reserve enough space to add one element, and return the updated element
363 /// pointer in case it was a reference to the storage.
364 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
365 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
366 }
367
368 /// Reserve enough space to add one element, and return the updated element
369 /// pointer in case it was a reference to the storage.
370 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
371 return const_cast<T *>(
372 this->reserveForParamAndGetAddressImpl(this, Elt, N));
373 }
374
375 static T &&forward_value_param(T &&V) { return std::move(V); }
376 static const T &forward_value_param(const T &V) { return V; }
377
378 void growAndAssign(size_t NumElts, const T &Elt) {
379 // Grow manually in case Elt is an internal reference.
380 size_t NewCapacity;
381 T *NewElts = mallocForGrow(NumElts, NewCapacity);
382 std::uninitialized_fill_n(NewElts, NumElts, Elt);
383 this->destroy_range(this->begin(), this->end());
384 takeAllocationForGrow(NewElts, NewCapacity);
385 this->set_size(NumElts);
386 }
387
388 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
389 // Grow manually in case one of Args is an internal reference.
390 size_t NewCapacity;
391 T *NewElts = mallocForGrow(0, NewCapacity);
392 ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...);
393 moveElementsForGrow(NewElts);
394 takeAllocationForGrow(NewElts, NewCapacity);
395 this->set_size(this->size() + 1);
396 return this->back();
397 }
398
399public:
400 void push_back(const T &Elt) {
401 const T *EltPtr = reserveForParamAndGetAddress(Elt);
402 ::new ((void *)this->end()) T(*EltPtr);
403 this->set_size(this->size() + 1);
404 }
405
406 void push_back(T &&Elt) {
407 T *EltPtr = reserveForParamAndGetAddress(Elt);
408 ::new ((void *)this->end()) T(::std::move(*EltPtr));
409 this->set_size(this->size() + 1);
410 }
411
412 void pop_back() {
413 this->set_size(this->size() - 1);
414 this->end()->~T();
415 }
416};
417
418// Define this out-of-line to dissuade the C++ compiler from inlining it.
419template <typename T, bool TriviallyCopyable>
420void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
421 size_t NewCapacity;
422 T *NewElts = mallocForGrow(MinSize, NewCapacity);
423 moveElementsForGrow(NewElts);
424 takeAllocationForGrow(NewElts, NewCapacity);
425}
426
427// Define this out-of-line to dissuade the C++ compiler from inlining it.
428template <typename T, bool TriviallyCopyable>
429void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow(
430 T *NewElts) {
431 // Move the elements over.
432 this->uninitialized_move(this->begin(), this->end(), NewElts);
433
434 // Destroy the original elements.
435 destroy_range(this->begin(), this->end());
436}
437
438// Define this out-of-line to dissuade the C++ compiler from inlining it.
439template <typename T, bool TriviallyCopyable>
440void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow(
441 T *NewElts, size_t NewCapacity) {
442 // If this wasn't grown from the inline copy, deallocate the old space.
443 if (!this->isSmall())
444 free(this->begin());
445
446 this->BeginX = NewElts;
447 this->Capacity = NewCapacity;
448}
449
450/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
451/// method implementations that are designed to work with trivially copyable
452/// T's. This allows using memcpy in place of copy/move construction and
453/// skipping destruction.
454template <typename T>
455class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
456 friend class SmallVectorTemplateCommon<T>;
457
458protected:
459 /// True if it's cheap enough to take parameters by value. Doing so avoids
460 /// overhead related to mitigations for reference invalidation.
461 static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *);
462
463 /// Either const T& or T, depending on whether it's cheap enough to take
464 /// parameters by value.
465 using ValueParamT =
466 typename std::conditional<TakesParamByValue, T, const T &>::type;
467
468 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
469
470 // No need to do a destroy loop for POD's.
471 static void destroy_range(T *, T *) {}
472
473 /// Move the range [I, E) onto the uninitialized memory
474 /// starting with "Dest", constructing elements into it as needed.
475 template<typename It1, typename It2>
476 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
477 // Just do a copy.
478 uninitialized_copy(I, E, Dest);
479 }
480
481 /// Copy the range [I, E) onto the uninitialized memory
482 /// starting with "Dest", constructing elements into it as needed.
483 template<typename It1, typename It2>
484 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
485 // Arbitrary iterator types; just use the basic implementation.
486 std::uninitialized_copy(I, E, Dest);
487 }
488
489 /// Copy the range [I, E) onto the uninitialized memory
490 /// starting with "Dest", constructing elements into it as needed.
491 template <typename T1, typename T2>
492 static void uninitialized_copy(
493 T1 *I, T1 *E, T2 *Dest,
494 std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
495 T2>::value> * = nullptr) {
496 // Use memcpy for PODs iterated by pointers (which includes SmallVector
497 // iterators): std::uninitialized_copy optimizes to memmove, but we can
498 // use memcpy here. Note that I and E are iterators and thus might be
499 // invalid for memcpy if they are equal.
500 if (I != E)
501 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
502 }
503
504 /// Double the size of the allocated memory, guaranteeing space for at
505 /// least one more element or MinSize if specified.
506 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
507
508 /// Reserve enough space to add one element, and return the updated element
509 /// pointer in case it was a reference to the storage.
510 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
511 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
512 }
513
514 /// Reserve enough space to add one element, and return the updated element
515 /// pointer in case it was a reference to the storage.
516 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
517 return const_cast<T *>(
518 this->reserveForParamAndGetAddressImpl(this, Elt, N));
519 }
520
521 /// Copy \p V or return a reference, depending on \a ValueParamT.
522 static ValueParamT forward_value_param(ValueParamT V) { return V; }
523
524 void growAndAssign(size_t NumElts, T Elt) {
525 // Elt has been copied in case it's an internal reference, side-stepping
526 // reference invalidation problems without losing the realloc optimization.
527 this->set_size(0);
528 this->grow(NumElts);
529 std::uninitialized_fill_n(this->begin(), NumElts, Elt);
530 this->set_size(NumElts);
531 }
532
533 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
534 // Use push_back with a copy in case Args has an internal reference,
535 // side-stepping reference invalidation problems without losing the realloc
536 // optimization.
537 push_back(T(std::forward<ArgTypes>(Args)...));
538 return this->back();
539 }
540
541public:
542 void push_back(ValueParamT Elt) {
543 const T *EltPtr = reserveForParamAndGetAddress(Elt);
544 memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
545 this->set_size(this->size() + 1);
546 }
547
548 void pop_back() { this->set_size(this->size() - 1); }
549};
550
551/// This class consists of common code factored out of the SmallVector class to
552/// reduce code duplication based on the SmallVector 'N' template parameter.
553template <typename T>
554class SmallVectorImpl : public SmallVectorTemplateBase<T> {
555 using SuperClass = SmallVectorTemplateBase<T>;
556
557public:
558 using iterator = typename SuperClass::iterator;
559 using const_iterator = typename SuperClass::const_iterator;
560 using reference = typename SuperClass::reference;
561 using size_type = typename SuperClass::size_type;
562
563protected:
564 using SmallVectorTemplateBase<T>::TakesParamByValue;
565 using ValueParamT = typename SuperClass::ValueParamT;
566
567 // Default ctor - Initialize to empty.
568 explicit SmallVectorImpl(unsigned N)
569 : SmallVectorTemplateBase<T>(N) {}
570
571 void assignRemote(SmallVectorImpl &&RHS) {
572 this->destroy_range(this->begin(), this->end());
573 if (!this->isSmall())
574 free(this->begin());
575 this->BeginX = RHS.BeginX;
576 this->Size = RHS.Size;
577 this->Capacity = RHS.Capacity;
578 RHS.resetToSmall();
579 }
580
581public:
582 SmallVectorImpl(const SmallVectorImpl &) = delete;
583
584 ~SmallVectorImpl() {
585 // Subclass has already destructed this vector's elements.
586 // If this wasn't grown from the inline copy, deallocate the old space.
587 if (!this->isSmall())
588 free(this->begin());
589 }
590
591 void clear() {
592 this->destroy_range(this->begin(), this->end());
593 this->Size = 0;
594 }
595
596private:
597 // Make set_size() private to avoid misuse in subclasses.
598 using SuperClass::set_size;
599
600 template <bool ForOverwrite> void resizeImpl(size_type N) {
601 if (N == this->size())
602 return;
603
604 if (N < this->size()) {
605 this->truncate(N);
606 return;
607 }
608
609 this->reserve(N);
610 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
611 if (ForOverwrite)
612 new (&*I) T;
613 else
614 new (&*I) T();
615 this->set_size(N);
616 }
617
618public:
619 void resize(size_type N) { resizeImpl<false>(N); }
620
621 /// Like resize, but \ref T is POD, the new values won't be initialized.
622 void resize_for_overwrite(size_type N) { resizeImpl<true>(N); }
623
624 /// Like resize, but requires that \p N is less than \a size().
625 void truncate(size_type N) {
626 assert(this->size() >= N && "Cannot increase size with truncate")(static_cast <bool> (this->size() >= N &&
"Cannot increase size with truncate") ? void (0) : __assert_fail
("this->size() >= N && \"Cannot increase size with truncate\""
, "llvm/include/llvm/ADT/SmallVector.h", 626, __extension__ __PRETTY_FUNCTION__
))
;
627 this->destroy_range(this->begin() + N, this->end());
628 this->set_size(N);
629 }
630
631 void resize(size_type N, ValueParamT NV) {
632 if (N == this->size())
633 return;
634
635 if (N < this->size()) {
636 this->truncate(N);
637 return;
638 }
639
640 // N > this->size(). Defer to append.
641 this->append(N - this->size(), NV);
642 }
643
644 void reserve(size_type N) {
645 if (this->capacity() < N)
646 this->grow(N);
647 }
648
649 void pop_back_n(size_type NumItems) {
650 assert(this->size() >= NumItems)(static_cast <bool> (this->size() >= NumItems) ? void
(0) : __assert_fail ("this->size() >= NumItems", "llvm/include/llvm/ADT/SmallVector.h"
, 650, __extension__ __PRETTY_FUNCTION__))
;
651 truncate(this->size() - NumItems);
652 }
653
654 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
655 T Result = ::std::move(this->back());
656 this->pop_back();
657 return Result;
658 }
659
660 void swap(SmallVectorImpl &RHS);
661
662 /// Add the specified range to the end of the SmallVector.
663 template <typename in_iter,
664 typename = std::enable_if_t<std::is_convertible<
665 typename std::iterator_traits<in_iter>::iterator_category,
666 std::input_iterator_tag>::value>>
667 void append(in_iter in_start, in_iter in_end) {
668 this->assertSafeToAddRange(in_start, in_end);
669 size_type NumInputs = std::distance(in_start, in_end);
670 this->reserve(this->size() + NumInputs);
671 this->uninitialized_copy(in_start, in_end, this->end());
672 this->set_size(this->size() + NumInputs);
673 }
674
675 /// Append \p NumInputs copies of \p Elt to the end.
676 void append(size_type NumInputs, ValueParamT Elt) {
677 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
678 std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr);
679 this->set_size(this->size() + NumInputs);
680 }
681
682 void append(std::initializer_list<T> IL) {
683 append(IL.begin(), IL.end());
684 }
685
686 void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); }
687
688 void assign(size_type NumElts, ValueParamT Elt) {
689 // Note that Elt could be an internal reference.
690 if (NumElts > this->capacity()) {
691 this->growAndAssign(NumElts, Elt);
692 return;
693 }
694
695 // Assign over existing elements.
696 std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt);
697 if (NumElts > this->size())
698 std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt);
699 else if (NumElts < this->size())
700 this->destroy_range(this->begin() + NumElts, this->end());
701 this->set_size(NumElts);
702 }
703
704 // FIXME: Consider assigning over existing elements, rather than clearing &
705 // re-initializing them - for all assign(...) variants.
706
707 template <typename in_iter,
708 typename = std::enable_if_t<std::is_convertible<
709 typename std::iterator_traits<in_iter>::iterator_category,
710 std::input_iterator_tag>::value>>
711 void assign(in_iter in_start, in_iter in_end) {
712 this->assertSafeToReferenceAfterClear(in_start, in_end);
713 clear();
714 append(in_start, in_end);
715 }
716
717 void assign(std::initializer_list<T> IL) {
718 clear();
719 append(IL);
720 }
721
722 void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); }
723
724 iterator erase(const_iterator CI) {
725 // Just cast away constness because this is a non-const member function.
726 iterator I = const_cast<iterator>(CI);
727
728 assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(CI) &&
"Iterator to erase is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(CI) && \"Iterator to erase is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 728, __extension__ __PRETTY_FUNCTION__
))
;
729
730 iterator N = I;
731 // Shift all elts down one.
732 std::move(I+1, this->end(), I);
733 // Drop the last elt.
734 this->pop_back();
735 return(N);
736 }
737
738 iterator erase(const_iterator CS, const_iterator CE) {
739 // Just cast away constness because this is a non-const member function.
740 iterator S = const_cast<iterator>(CS);
741 iterator E = const_cast<iterator>(CE);
742
743 assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.")(static_cast <bool> (this->isRangeInStorage(S, E) &&
"Range to erase is out of bounds.") ? void (0) : __assert_fail
("this->isRangeInStorage(S, E) && \"Range to erase is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 743, __extension__ __PRETTY_FUNCTION__
))
;
744
745 iterator N = S;
746 // Shift all elts down.
747 iterator I = std::move(E, this->end(), S);
748 // Drop the last elts.
749 this->destroy_range(I, this->end());
750 this->set_size(I - this->begin());
751 return(N);
752 }
753
754private:
755 template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) {
756 // Callers ensure that ArgType is derived from T.
757 static_assert(
758 std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>,
759 T>::value,
760 "ArgType must be derived from T!");
761
762 if (I == this->end()) { // Important special case for empty vector.
763 this->push_back(::std::forward<ArgType>(Elt));
764 return this->end()-1;
765 }
766
767 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 767, __extension__ __PRETTY_FUNCTION__
))
;
768
769 // Grow if necessary.
770 size_t Index = I - this->begin();
771 std::remove_reference_t<ArgType> *EltPtr =
772 this->reserveForParamAndGetAddress(Elt);
773 I = this->begin() + Index;
774
775 ::new ((void*) this->end()) T(::std::move(this->back()));
776 // Push everything else over.
777 std::move_backward(I, this->end()-1, this->end());
778 this->set_size(this->size() + 1);
779
780 // If we just moved the element we're inserting, be sure to update
781 // the reference (never happens if TakesParamByValue).
782 static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value,
783 "ArgType must be 'T' when taking by value!");
784 if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
785 ++EltPtr;
786
787 *I = ::std::forward<ArgType>(*EltPtr);
788 return I;
789 }
790
791public:
792 iterator insert(iterator I, T &&Elt) {
793 return insert_one_impl(I, this->forward_value_param(std::move(Elt)));
794 }
795
796 iterator insert(iterator I, const T &Elt) {
797 return insert_one_impl(I, this->forward_value_param(Elt));
798 }
799
800 iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) {
801 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
802 size_t InsertElt = I - this->begin();
803
804 if (I == this->end()) { // Important special case for empty vector.
805 append(NumToInsert, Elt);
806 return this->begin()+InsertElt;
807 }
808
809 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 809, __extension__ __PRETTY_FUNCTION__
))
;
810
811 // Ensure there is enough space, and get the (maybe updated) address of
812 // Elt.
813 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);
814
815 // Uninvalidate the iterator.
816 I = this->begin()+InsertElt;
817
818 // If there are more elements between the insertion point and the end of the
819 // range than there are being inserted, we can use a simple approach to
820 // insertion. Since we already reserved space, we know that this won't
821 // reallocate the vector.
822 if (size_t(this->end()-I) >= NumToInsert) {
823 T *OldEnd = this->end();
824 append(std::move_iterator<iterator>(this->end() - NumToInsert),
825 std::move_iterator<iterator>(this->end()));
826
827 // Copy the existing elements that get replaced.
828 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
829
830 // If we just moved the element we're inserting, be sure to update
831 // the reference (never happens if TakesParamByValue).
832 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
833 EltPtr += NumToInsert;
834
835 std::fill_n(I, NumToInsert, *EltPtr);
836 return I;
837 }
838
839 // Otherwise, we're inserting more elements than exist already, and we're
840 // not inserting at the end.
841
842 // Move over the elements that we're about to overwrite.
843 T *OldEnd = this->end();
844 this->set_size(this->size() + NumToInsert);
845 size_t NumOverwritten = OldEnd-I;
846 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
847
848 // If we just moved the element we're inserting, be sure to update
849 // the reference (never happens if TakesParamByValue).
850 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
851 EltPtr += NumToInsert;
852
853 // Replace the overwritten part.
854 std::fill_n(I, NumOverwritten, *EltPtr);
855
856 // Insert the non-overwritten middle part.
857 std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
858 return I;
859 }
860
861 template <typename ItTy,
862 typename = std::enable_if_t<std::is_convertible<
863 typename std::iterator_traits<ItTy>::iterator_category,
864 std::input_iterator_tag>::value>>
865 iterator insert(iterator I, ItTy From, ItTy To) {
866 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
867 size_t InsertElt = I - this->begin();
868
869 if (I == this->end()) { // Important special case for empty vector.
870 append(From, To);
871 return this->begin()+InsertElt;
872 }
873
874 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 874, __extension__ __PRETTY_FUNCTION__
))
;
875
876 // Check that the reserve that follows doesn't invalidate the iterators.
877 this->assertSafeToAddRange(From, To);
878
879 size_t NumToInsert = std::distance(From, To);
880
881 // Ensure there is enough space.
882 reserve(this->size() + NumToInsert);
883
884 // Uninvalidate the iterator.
885 I = this->begin()+InsertElt;
886
887 // If there are more elements between the insertion point and the end of the
888 // range than there are being inserted, we can use a simple approach to
889 // insertion. Since we already reserved space, we know that this won't
890 // reallocate the vector.
891 if (size_t(this->end()-I) >= NumToInsert) {
892 T *OldEnd = this->end();
893 append(std::move_iterator<iterator>(this->end() - NumToInsert),
894 std::move_iterator<iterator>(this->end()));
895
896 // Copy the existing elements that get replaced.
897 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
898
899 std::copy(From, To, I);
900 return I;
901 }
902
903 // Otherwise, we're inserting more elements than exist already, and we're
904 // not inserting at the end.
905
906 // Move over the elements that we're about to overwrite.
907 T *OldEnd = this->end();
908 this->set_size(this->size() + NumToInsert);
909 size_t NumOverwritten = OldEnd-I;
910 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
911
912 // Replace the overwritten part.
913 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
914 *J = *From;
915 ++J; ++From;
916 }
917
918 // Insert the non-overwritten middle part.
919 this->uninitialized_copy(From, To, OldEnd);
920 return I;
921 }
922
923 void insert(iterator I, std::initializer_list<T> IL) {
924 insert(I, IL.begin(), IL.end());
925 }
926
927 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
928 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
929 return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...);
930
931 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
932 this->set_size(this->size() + 1);
933 return this->back();
934 }
935
936 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
937
938 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
939
940 bool operator==(const SmallVectorImpl &RHS) const {
941 if (this->size() != RHS.size()) return false;
942 return std::equal(this->begin(), this->end(), RHS.begin());
943 }
944 bool operator!=(const SmallVectorImpl &RHS) const {
945 return !(*this == RHS);
946 }
947
948 bool operator<(const SmallVectorImpl &RHS) const {
949 return std::lexicographical_compare(this->begin(), this->end(),
950 RHS.begin(), RHS.end());
951 }
952};
953
954template <typename T>
955void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
956 if (this == &RHS) return;
957
958 // We can only avoid copying elements if neither vector is small.
959 if (!this->isSmall() && !RHS.isSmall()) {
960 std::swap(this->BeginX, RHS.BeginX);
961 std::swap(this->Size, RHS.Size);
962 std::swap(this->Capacity, RHS.Capacity);
963 return;
964 }
965 this->reserve(RHS.size());
966 RHS.reserve(this->size());
967
968 // Swap the shared elements.
969 size_t NumShared = this->size();
970 if (NumShared > RHS.size()) NumShared = RHS.size();
971 for (size_type i = 0; i != NumShared; ++i)
972 std::swap((*this)[i], RHS[i]);
973
974 // Copy over the extra elts.
975 if (this->size() > RHS.size()) {
976 size_t EltDiff = this->size() - RHS.size();
977 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
978 RHS.set_size(RHS.size() + EltDiff);
979 this->destroy_range(this->begin()+NumShared, this->end());
980 this->set_size(NumShared);
981 } else if (RHS.size() > this->size()) {
982 size_t EltDiff = RHS.size() - this->size();
983 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
984 this->set_size(this->size() + EltDiff);
985 this->destroy_range(RHS.begin()+NumShared, RHS.end());
986 RHS.set_size(NumShared);
987 }
988}
989
990template <typename T>
991SmallVectorImpl<T> &SmallVectorImpl<T>::
992 operator=(const SmallVectorImpl<T> &RHS) {
993 // Avoid self-assignment.
994 if (this == &RHS) return *this;
995
996 // If we already have sufficient space, assign the common elements, then
997 // destroy any excess.
998 size_t RHSSize = RHS.size();
999 size_t CurSize = this->size();
1000 if (CurSize >= RHSSize) {
1001 // Assign common elements.
1002 iterator NewEnd;
1003 if (RHSSize)
1004 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
1005 else
1006 NewEnd = this->begin();
1007
1008 // Destroy excess elements.
1009 this->destroy_range(NewEnd, this->end());
1010
1011 // Trim.
1012 this->set_size(RHSSize);
1013 return *this;
1014 }
1015
1016 // If we have to grow to have enough elements, destroy the current elements.
1017 // This allows us to avoid copying them during the grow.
1018 // FIXME: don't do this if they're efficiently moveable.
1019 if (this->capacity() < RHSSize) {
1020 // Destroy current elements.
1021 this->clear();
1022 CurSize = 0;
1023 this->grow(RHSSize);
1024 } else if (CurSize) {
1025 // Otherwise, use assignment for the already-constructed elements.
1026 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
1027 }
1028
1029 // Copy construct the new elements in place.
1030 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
1031 this->begin()+CurSize);
1032
1033 // Set end.
1034 this->set_size(RHSSize);
1035 return *this;
1036}
1037
1038template <typename T>
1039SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
1040 // Avoid self-assignment.
1041 if (this == &RHS) return *this;
1042
1043 // If the RHS isn't small, clear this vector and then steal its buffer.
1044 if (!RHS.isSmall()) {
1045 this->assignRemote(std::move(RHS));
1046 return *this;
1047 }
1048
1049 // If we already have sufficient space, assign the common elements, then
1050 // destroy any excess.
1051 size_t RHSSize = RHS.size();
1052 size_t CurSize = this->size();
1053 if (CurSize >= RHSSize) {
1054 // Assign common elements.
1055 iterator NewEnd = this->begin();
1056 if (RHSSize)
1057 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
1058
1059 // Destroy excess elements and trim the bounds.
1060 this->destroy_range(NewEnd, this->end());
1061 this->set_size(RHSSize);
1062
1063 // Clear the RHS.
1064 RHS.clear();
1065
1066 return *this;
1067 }
1068
1069 // If we have to grow to have enough elements, destroy the current elements.
1070 // This allows us to avoid copying them during the grow.
1071 // FIXME: this may not actually make any sense if we can efficiently move
1072 // elements.
1073 if (this->capacity() < RHSSize) {
1074 // Destroy current elements.
1075 this->clear();
1076 CurSize = 0;
1077 this->grow(RHSSize);
1078 } else if (CurSize) {
1079 // Otherwise, use assignment for the already-constructed elements.
1080 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
1081 }
1082
1083 // Move-construct the new elements in place.
1084 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
1085 this->begin()+CurSize);
1086
1087 // Set end.
1088 this->set_size(RHSSize);
1089
1090 RHS.clear();
1091 return *this;
1092}
1093
1094/// Storage for the SmallVector elements. This is specialized for the N=0 case
1095/// to avoid allocating unnecessary storage.
1096template <typename T, unsigned N>
1097struct SmallVectorStorage {
1098 alignas(T) char InlineElts[N * sizeof(T)];
1099};
1100
1101/// We need the storage to be properly aligned even for small-size of 0 so that
1102/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
1103/// well-defined.
1104template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};
1105
1106/// Forward declaration of SmallVector so that
1107/// calculateSmallVectorDefaultInlinedElements can reference
1108/// `sizeof(SmallVector<T, 0>)`.
1109template <typename T, unsigned N> class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector;
1110
1111/// Helper class for calculating the default number of inline elements for
1112/// `SmallVector<T>`.
1113///
1114/// This should be migrated to a constexpr function when our minimum
1115/// compiler support is enough for multi-statement constexpr functions.
1116template <typename T> struct CalculateSmallVectorDefaultInlinedElements {
1117 // Parameter controlling the default number of inlined elements
1118 // for `SmallVector<T>`.
1119 //
1120 // The default number of inlined elements ensures that
1121 // 1. There is at least one inlined element.
1122 // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless
1123 // it contradicts 1.
1124 static constexpr size_t kPreferredSmallVectorSizeof = 64;
1125
1126 // static_assert that sizeof(T) is not "too big".
1127 //
1128 // Because our policy guarantees at least one inlined element, it is possible
1129 // for an arbitrarily large inlined element to allocate an arbitrarily large
1130 // amount of inline storage. We generally consider it an antipattern for a
1131 // SmallVector to allocate an excessive amount of inline storage, so we want
1132 // to call attention to these cases and make sure that users are making an
1133 // intentional decision if they request a lot of inline storage.
1134 //
1135 // We want this assertion to trigger in pathological cases, but otherwise
1136 // not be too easy to hit. To accomplish that, the cutoff is actually somewhat
1137 // larger than kPreferredSmallVectorSizeof (otherwise,
1138 // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that
1139 // pattern seems useful in practice).
1140 //
1141 // One wrinkle is that this assertion is in theory non-portable, since
1142 // sizeof(T) is in general platform-dependent. However, we don't expect this
1143 // to be much of an issue, because most LLVM development happens on 64-bit
1144 // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for
1145 // 32-bit hosts, dodging the issue. The reverse situation, where development
1146 // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a
1147 // 64-bit host, is expected to be very rare.
1148 static_assert(
1149 sizeof(T) <= 256,
1150 "You are trying to use a default number of inlined elements for "
1151 "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
1152 "explicit number of inlined elements with `SmallVector<T, N>` to make "
1153 "sure you really want that much inline storage.");
1154
1155 // Discount the size of the header itself when calculating the maximum inline
1156 // bytes.
1157 static constexpr size_t PreferredInlineBytes =
1158 kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>);
1159 static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T);
1160 static constexpr size_t value =
1161 NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
1162};
1163
1164/// This is a 'vector' (really, a variable-sized array), optimized
1165/// for the case when the array is small. It contains some number of elements
1166/// in-place, which allows it to avoid heap allocation when the actual number of
1167/// elements is below that threshold. This allows normal "small" cases to be
1168/// fast without losing generality for large inputs.
1169///
1170/// \note
1171/// In the absence of a well-motivated choice for the number of inlined
1172/// elements \p N, it is recommended to use \c SmallVector<T> (that is,
1173/// omitting the \p N). This will choose a default number of inlined elements
1174/// reasonable for allocation on the stack (for example, trying to keep \c
1175/// sizeof(SmallVector<T>) around 64 bytes).
1176///
1177/// \warning This does not attempt to be exception safe.
1178///
1179/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
1180template <typename T,
1181 unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value>
1182class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>,
1183 SmallVectorStorage<T, N> {
1184public:
1185 SmallVector() : SmallVectorImpl<T>(N) {}
1186
1187 ~SmallVector() {
1188 // Destroy the constructed elements in the vector.
1189 this->destroy_range(this->begin(), this->end());
1190 }
1191
1192 explicit SmallVector(size_t Size, const T &Value = T())
1193 : SmallVectorImpl<T>(N) {
1194 this->assign(Size, Value);
1195 }
1196
1197 template <typename ItTy,
1198 typename = std::enable_if_t<std::is_convertible<
1199 typename std::iterator_traits<ItTy>::iterator_category,
1200 std::input_iterator_tag>::value>>
1201 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
1202 this->append(S, E);
1203 }
1204
1205 template <typename RangeTy>
1206 explicit SmallVector(const iterator_range<RangeTy> &R)
1207 : SmallVectorImpl<T>(N) {
1208 this->append(R.begin(), R.end());
1209 }
1210
1211 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
1212 this->assign(IL);
1213 }
1214
1215 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
1216 if (!RHS.empty())
1217 SmallVectorImpl<T>::operator=(RHS);
1218 }
1219
1220 SmallVector &operator=(const SmallVector &RHS) {
1221 SmallVectorImpl<T>::operator=(RHS);
1222 return *this;
1223 }
1224
1225 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
1226 if (!RHS.empty())
1227 SmallVectorImpl<T>::operator=(::std::move(RHS));
1228 }
1229
1230 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
1231 if (!RHS.empty())
1232 SmallVectorImpl<T>::operator=(::std::move(RHS));
1233 }
1234
1235 SmallVector &operator=(SmallVector &&RHS) {
1236 if (N) {
1237 SmallVectorImpl<T>::operator=(::std::move(RHS));
1238 return *this;
1239 }
1240 // SmallVectorImpl<T>::operator= does not leverage N==0. Optimize the
1241 // case.
1242 if (this == &RHS)
1243 return *this;
1244 if (RHS.empty()) {
1245 this->destroy_range(this->begin(), this->end());
1246 this->Size = 0;
1247 } else {
1248 this->assignRemote(std::move(RHS));
1249 }
1250 return *this;
1251 }
1252
1253 SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
1254 SmallVectorImpl<T>::operator=(::std::move(RHS));
1255 return *this;
1256 }
1257
1258 SmallVector &operator=(std::initializer_list<T> IL) {
1259 this->assign(IL);
1260 return *this;
1261 }
1262};
1263
1264template <typename T, unsigned N>
1265inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
1266 return X.capacity_in_bytes();
1267}
1268
1269template <typename RangeType>
1270using ValueTypeFromRangeType =
1271 typename std::remove_const<typename std::remove_reference<
1272 decltype(*std::begin(std::declval<RangeType &>()))>::type>::type;
1273
1274/// Given a range of type R, iterate the entire range and return a
1275/// SmallVector with elements of the vector. This is useful, for example,
1276/// when you want to iterate a range and then sort the results.
1277template <unsigned Size, typename R>
1278SmallVector<ValueTypeFromRangeType<R>, Size> to_vector(R &&Range) {
1279 return {std::begin(Range), std::end(Range)};
1280}
1281template <typename R>
1282SmallVector<ValueTypeFromRangeType<R>,
1283 CalculateSmallVectorDefaultInlinedElements<
1284 ValueTypeFromRangeType<R>>::value>
1285to_vector(R &&Range) {
1286 return {std::begin(Range), std::end(Range)};
1287}
1288
1289} // end namespace llvm
1290
1291namespace std {
1292
1293 /// Implement std::swap in terms of SmallVector swap.
1294 template<typename T>
1295 inline void
1296 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
1297 LHS.swap(RHS);
1298 }
1299
1300 /// Implement std::swap in terms of SmallVector swap.
1301 template<typename T, unsigned N>
1302 inline void
1303 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
1304 LHS.swap(RHS);
1305 }
1306
1307} // end namespace std
1308
1309#endif // LLVM_ADT_SMALLVECTOR_H