Bug Summary

File:llvm/lib/Target/AArch64/AArch64StackTagging.cpp
Warning:line 656, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64StackTagging.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/build-llvm/lib/Target/AArch64 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-08-06-171148-17323-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Target/AArch64/AArch64StackTagging.cpp

/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Target/AArch64/AArch64StackTagging.cpp

1//===- AArch64StackTagging.cpp - Stack tagging in IR --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//===----------------------------------------------------------------------===//
10
11#include "AArch64.h"
12#include "AArch64InstrInfo.h"
13#include "AArch64Subtarget.h"
14#include "AArch64TargetMachine.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/DepthFirstIterator.h"
17#include "llvm/ADT/MapVector.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/Analysis/CFG.h"
24#include "llvm/Analysis/LoopInfo.h"
25#include "llvm/Analysis/PostDominators.h"
26#include "llvm/Analysis/ScalarEvolution.h"
27#include "llvm/Analysis/ScalarEvolutionExpressions.h"
28#include "llvm/Analysis/StackSafetyAnalysis.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/CodeGen/LiveRegUnits.h"
31#include "llvm/CodeGen/MachineBasicBlock.h"
32#include "llvm/CodeGen/MachineFunction.h"
33#include "llvm/CodeGen/MachineFunctionPass.h"
34#include "llvm/CodeGen/MachineInstr.h"
35#include "llvm/CodeGen/MachineInstrBuilder.h"
36#include "llvm/CodeGen/MachineLoopInfo.h"
37#include "llvm/CodeGen/MachineOperand.h"
38#include "llvm/CodeGen/MachineRegisterInfo.h"
39#include "llvm/CodeGen/TargetPassConfig.h"
40#include "llvm/CodeGen/TargetRegisterInfo.h"
41#include "llvm/IR/DebugLoc.h"
42#include "llvm/IR/Dominators.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GetElementPtrTypeIterator.h"
45#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Instructions.h"
47#include "llvm/IR/IntrinsicInst.h"
48#include "llvm/IR/IntrinsicsAArch64.h"
49#include "llvm/IR/Metadata.h"
50#include "llvm/InitializePasses.h"
51#include "llvm/Pass.h"
52#include "llvm/Support/Casting.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/raw_ostream.h"
55#include "llvm/Transforms/Utils/Local.h"
56#include <cassert>
57#include <iterator>
58#include <utility>
59
60using namespace llvm;
61
62#define DEBUG_TYPE"aarch64-stack-tagging" "aarch64-stack-tagging"
63
64static cl::opt<bool> ClMergeInit(
65 "stack-tagging-merge-init", cl::Hidden, cl::init(true), cl::ZeroOrMore,
66 cl::desc("merge stack variable initializers with tagging when possible"));
67
68static cl::opt<bool>
69 ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden,
70 cl::init(true), cl::ZeroOrMore,
71 cl::desc("Use Stack Safety analysis results"));
72
73static cl::opt<unsigned> ClScanLimit("stack-tagging-merge-init-scan-limit",
74 cl::init(40), cl::Hidden);
75
76static const Align kTagGranuleSize = Align(16);
77
78namespace {
79
80class InitializerBuilder {
81 uint64_t Size;
82 const DataLayout *DL;
83 Value *BasePtr;
84 Function *SetTagFn;
85 Function *SetTagZeroFn;
86 Function *StgpFn;
87
88 // List of initializers sorted by start offset.
89 struct Range {
90 uint64_t Start, End;
91 Instruction *Inst;
92 };
93 SmallVector<Range, 4> Ranges;
94 // 8-aligned offset => 8-byte initializer
95 // Missing keys are zero initialized.
96 std::map<uint64_t, Value *> Out;
97
98public:
99 InitializerBuilder(uint64_t Size, const DataLayout *DL, Value *BasePtr,
100 Function *SetTagFn, Function *SetTagZeroFn,
101 Function *StgpFn)
102 : Size(Size), DL(DL), BasePtr(BasePtr), SetTagFn(SetTagFn),
103 SetTagZeroFn(SetTagZeroFn), StgpFn(StgpFn) {}
104
105 bool addRange(uint64_t Start, uint64_t End, Instruction *Inst) {
106 auto I = std::lower_bound(
107 Ranges.begin(), Ranges.end(), Start,
108 [](const Range &LHS, uint64_t RHS) { return LHS.End <= RHS; });
109 if (I != Ranges.end() && End > I->Start) {
110 // Overlap - bail.
111 return false;
112 }
113 Ranges.insert(I, {Start, End, Inst});
114 return true;
115 }
116
117 bool addStore(uint64_t Offset, StoreInst *SI, const DataLayout *DL) {
118 int64_t StoreSize = DL->getTypeStoreSize(SI->getOperand(0)->getType());
119 if (!addRange(Offset, Offset + StoreSize, SI))
120 return false;
121 IRBuilder<> IRB(SI);
122 applyStore(IRB, Offset, Offset + StoreSize, SI->getOperand(0));
123 return true;
124 }
125
126 bool addMemSet(uint64_t Offset, MemSetInst *MSI) {
127 uint64_t StoreSize = cast<ConstantInt>(MSI->getLength())->getZExtValue();
128 if (!addRange(Offset, Offset + StoreSize, MSI))
129 return false;
130 IRBuilder<> IRB(MSI);
131 applyMemSet(IRB, Offset, Offset + StoreSize,
132 cast<ConstantInt>(MSI->getValue()));
133 return true;
134 }
135
136 void applyMemSet(IRBuilder<> &IRB, int64_t Start, int64_t End,
137 ConstantInt *V) {
138 // Out[] does not distinguish between zero and undef, and we already know
139 // that this memset does not overlap with any other initializer. Nothing to
140 // do for memset(0).
141 if (V->isZero())
142 return;
143 for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
144 uint64_t Cst = 0x0101010101010101UL;
145 int LowBits = Offset < Start ? (Start - Offset) * 8 : 0;
146 if (LowBits)
147 Cst = (Cst >> LowBits) << LowBits;
148 int HighBits = End - Offset < 8 ? (8 - (End - Offset)) * 8 : 0;
149 if (HighBits)
150 Cst = (Cst << HighBits) >> HighBits;
151 ConstantInt *C =
152 ConstantInt::get(IRB.getInt64Ty(), Cst * V->getZExtValue());
153
154 Value *&CurrentV = Out[Offset];
155 if (!CurrentV) {
156 CurrentV = C;
157 } else {
158 CurrentV = IRB.CreateOr(CurrentV, C);
159 }
160 }
161 }
162
163 // Take a 64-bit slice of the value starting at the given offset (in bytes).
164 // Offset can be negative. Pad with zeroes on both sides when necessary.
165 Value *sliceValue(IRBuilder<> &IRB, Value *V, int64_t Offset) {
166 if (Offset > 0) {
167 V = IRB.CreateLShr(V, Offset * 8);
168 V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
169 } else if (Offset < 0) {
170 V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
171 V = IRB.CreateShl(V, -Offset * 8);
172 } else {
173 V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
174 }
175 return V;
176 }
177
178 void applyStore(IRBuilder<> &IRB, int64_t Start, int64_t End,
179 Value *StoredValue) {
180 StoredValue = flatten(IRB, StoredValue);
181 for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
182 Value *V = sliceValue(IRB, StoredValue, Offset - Start);
183 Value *&CurrentV = Out[Offset];
184 if (!CurrentV) {
185 CurrentV = V;
186 } else {
187 CurrentV = IRB.CreateOr(CurrentV, V);
188 }
189 }
190 }
191
192 void generate(IRBuilder<> &IRB) {
193 LLVM_DEBUG(dbgs() << "Combined initializer\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << "Combined initializer\n"
; } } while (false)
;
194 // No initializers => the entire allocation is undef.
195 if (Ranges.empty()) {
196 emitUndef(IRB, 0, Size);
197 return;
198 }
199
200 // Look through 8-byte initializer list 16 bytes at a time;
201 // If one of the two 8-byte halfs is non-zero non-undef, emit STGP.
202 // Otherwise, emit zeroes up to next available item.
203 uint64_t LastOffset = 0;
204 for (uint64_t Offset = 0; Offset < Size; Offset += 16) {
205 auto I1 = Out.find(Offset);
206 auto I2 = Out.find(Offset + 8);
207 if (I1 == Out.end() && I2 == Out.end())
208 continue;
209
210 if (Offset > LastOffset)
211 emitZeroes(IRB, LastOffset, Offset - LastOffset);
212
213 Value *Store1 = I1 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
214 : I1->second;
215 Value *Store2 = I2 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
216 : I2->second;
217 emitPair(IRB, Offset, Store1, Store2);
218 LastOffset = Offset + 16;
219 }
220
221 // memset(0) does not update Out[], therefore the tail can be either undef
222 // or zero.
223 if (LastOffset < Size)
224 emitZeroes(IRB, LastOffset, Size - LastOffset);
225
226 for (const auto &R : Ranges) {
227 R.Inst->eraseFromParent();
228 }
229 }
230
231 void emitZeroes(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
232 LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Sizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " [" << Offset
<< ", " << Offset + Size << ") zero\n"; } }
while (false)
233 << ") zero\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " [" << Offset
<< ", " << Offset + Size << ") zero\n"; } }
while (false)
;
234 Value *Ptr = BasePtr;
235 if (Offset)
236 Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
237 IRB.CreateCall(SetTagZeroFn,
238 {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
239 }
240
241 void emitUndef(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
242 LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Sizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " [" << Offset
<< ", " << Offset + Size << ") undef\n"; }
} while (false)
243 << ") undef\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " [" << Offset
<< ", " << Offset + Size << ") undef\n"; }
} while (false)
;
244 Value *Ptr = BasePtr;
245 if (Offset)
246 Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
247 IRB.CreateCall(SetTagFn, {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
248 }
249
250 void emitPair(IRBuilder<> &IRB, uint64_t Offset, Value *A, Value *B) {
251 LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + 16 << "):\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " [" << Offset
<< ", " << Offset + 16 << "):\n"; } } while
(false)
;
252 LLVM_DEBUG(dbgs() << " " << *A << "\n " << *B << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " " << *
A << "\n " << *B << "\n"; } } while (false
)
;
253 Value *Ptr = BasePtr;
254 if (Offset)
255 Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
256 IRB.CreateCall(StgpFn, {Ptr, A, B});
257 }
258
259 Value *flatten(IRBuilder<> &IRB, Value *V) {
260 if (V->getType()->isIntegerTy())
261 return V;
262 // vector of pointers -> vector of ints
263 if (VectorType *VecTy = dyn_cast<VectorType>(V->getType())) {
264 LLVMContext &Ctx = IRB.getContext();
265 Type *EltTy = VecTy->getElementType();
266 if (EltTy->isPointerTy()) {
267 uint32_t EltSize = DL->getTypeSizeInBits(EltTy);
268 auto *NewTy = FixedVectorType::get(
269 IntegerType::get(Ctx, EltSize),
270 cast<FixedVectorType>(VecTy)->getNumElements());
271 V = IRB.CreatePointerCast(V, NewTy);
272 }
273 }
274 return IRB.CreateBitOrPointerCast(
275 V, IRB.getIntNTy(DL->getTypeStoreSize(V->getType()) * 8));
276 }
277};
278
279class AArch64StackTagging : public FunctionPass {
280 struct AllocaInfo {
281 AllocaInst *AI;
282 SmallVector<IntrinsicInst *, 2> LifetimeStart;
283 SmallVector<IntrinsicInst *, 2> LifetimeEnd;
284 SmallVector<DbgVariableIntrinsic *, 2> DbgVariableIntrinsics;
285 int Tag; // -1 for non-tagged allocations
286 };
287
288 const bool MergeInit;
289 const bool UseStackSafety;
290
291public:
292 static char ID; // Pass ID, replacement for typeid
293
294 AArch64StackTagging(bool IsOptNone = false)
295 : FunctionPass(ID),
296 MergeInit(ClMergeInit.getNumOccurrences() ? ClMergeInit : !IsOptNone),
297 UseStackSafety(ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
298 : !IsOptNone) {
299 initializeAArch64StackTaggingPass(*PassRegistry::getPassRegistry());
300 }
301
302 bool isInterestingAlloca(const AllocaInst &AI);
303 void alignAndPadAlloca(AllocaInfo &Info);
304
305 void tagAlloca(AllocaInst *AI, Instruction *InsertBefore, Value *Ptr,
306 uint64_t Size);
307 void untagAlloca(AllocaInst *AI, Instruction *InsertBefore, uint64_t Size);
308
309 Instruction *collectInitializers(Instruction *StartInst, Value *StartPtr,
310 uint64_t Size, InitializerBuilder &IB);
311
312 Instruction *
313 insertBaseTaggedPointer(const MapVector<AllocaInst *, AllocaInfo> &Allocas,
314 const DominatorTree *DT);
315 bool runOnFunction(Function &F) override;
316
317 StringRef getPassName() const override { return "AArch64 Stack Tagging"; }
318
319private:
320 Function *F = nullptr;
321 Function *SetTagFunc = nullptr;
322 const DataLayout *DL = nullptr;
323 AAResults *AA = nullptr;
324 const StackSafetyGlobalInfo *SSI = nullptr;
325
326 void getAnalysisUsage(AnalysisUsage &AU) const override {
327 AU.setPreservesCFG();
328 if (UseStackSafety)
329 AU.addRequired<StackSafetyGlobalInfoWrapperPass>();
330 if (MergeInit)
331 AU.addRequired<AAResultsWrapperPass>();
332 }
333};
334
335} // end anonymous namespace
336
337char AArch64StackTagging::ID = 0;
338
339INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",static void *initializeAArch64StackTaggingPassOnce(PassRegistry
&Registry) {
340 false, false)static void *initializeAArch64StackTaggingPassOnce(PassRegistry
&Registry) {
341INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
342INITIALIZE_PASS_DEPENDENCY(StackSafetyGlobalInfoWrapperPass)initializeStackSafetyGlobalInfoWrapperPassPass(Registry);
343INITIALIZE_PASS_END(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",PassInfo *PI = new PassInfo( "AArch64 Stack Tagging", "aarch64-stack-tagging"
, &AArch64StackTagging::ID, PassInfo::NormalCtor_t(callDefaultCtor
<AArch64StackTagging>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeAArch64StackTaggingPassFlag
; void llvm::initializeAArch64StackTaggingPass(PassRegistry &
Registry) { llvm::call_once(InitializeAArch64StackTaggingPassFlag
, initializeAArch64StackTaggingPassOnce, std::ref(Registry));
}
344 false, false)PassInfo *PI = new PassInfo( "AArch64 Stack Tagging", "aarch64-stack-tagging"
, &AArch64StackTagging::ID, PassInfo::NormalCtor_t(callDefaultCtor
<AArch64StackTagging>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeAArch64StackTaggingPassFlag
; void llvm::initializeAArch64StackTaggingPass(PassRegistry &
Registry) { llvm::call_once(InitializeAArch64StackTaggingPassFlag
, initializeAArch64StackTaggingPassOnce, std::ref(Registry));
}
345
346FunctionPass *llvm::createAArch64StackTaggingPass(bool IsOptNone) {
347 return new AArch64StackTagging(IsOptNone);
348}
349
350Instruction *AArch64StackTagging::collectInitializers(Instruction *StartInst,
351 Value *StartPtr,
352 uint64_t Size,
353 InitializerBuilder &IB) {
354 MemoryLocation AllocaLoc{StartPtr, Size};
355 Instruction *LastInst = StartInst;
356 BasicBlock::iterator BI(StartInst);
357
358 unsigned Count = 0;
359 for (; Count < ClScanLimit && !BI->isTerminator(); ++BI) {
360 if (!isa<DbgInfoIntrinsic>(*BI))
361 ++Count;
362
363 if (isNoModRef(AA->getModRefInfo(&*BI, AllocaLoc)))
364 continue;
365
366 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
367 // If the instruction is readnone, ignore it, otherwise bail out. We
368 // don't even allow readonly here because we don't want something like:
369 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
370 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
371 break;
372 continue;
373 }
374
375 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
376 if (!NextStore->isSimple())
377 break;
378
379 // Check to see if this store is to a constant offset from the start ptr.
380 Optional<int64_t> Offset =
381 isPointerOffset(StartPtr, NextStore->getPointerOperand(), *DL);
382 if (!Offset)
383 break;
384
385 if (!IB.addStore(*Offset, NextStore, DL))
386 break;
387 LastInst = NextStore;
388 } else {
389 MemSetInst *MSI = cast<MemSetInst>(BI);
390
391 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
392 break;
393
394 if (!isa<ConstantInt>(MSI->getValue()))
395 break;
396
397 // Check to see if this store is to a constant offset from the start ptr.
398 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), *DL);
399 if (!Offset)
400 break;
401
402 if (!IB.addMemSet(*Offset, MSI))
403 break;
404 LastInst = MSI;
405 }
406 }
407 return LastInst;
408}
409
410bool AArch64StackTagging::isInterestingAlloca(const AllocaInst &AI) {
411 // FIXME: support dynamic allocas
412 bool IsInteresting =
413 AI.getAllocatedType()->isSized() && AI.isStaticAlloca() &&
414 // alloca() may be called with 0 size, ignore it.
415 AI.getAllocationSizeInBits(*DL).getValue() > 0 &&
416 // inalloca allocas are not treated as static, and we don't want
417 // dynamic alloca instrumentation for them as well.
418 !AI.isUsedWithInAlloca() &&
419 // swifterror allocas are register promoted by ISel
420 !AI.isSwiftError() &&
421 // safe allocas are not interesting
422 !(SSI && SSI->isSafe(AI));
423 return IsInteresting;
424}
425
426void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore,
427 Value *Ptr, uint64_t Size) {
428 auto SetTagZeroFunc =
429 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag_zero);
430 auto StgpFunc =
431 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_stgp);
432
433 InitializerBuilder IB(Size, DL, Ptr, SetTagFunc, SetTagZeroFunc, StgpFunc);
434 bool LittleEndian =
435 Triple(AI->getModule()->getTargetTriple()).isLittleEndian();
436 // Current implementation of initializer merging assumes little endianness.
437 if (MergeInit && !F->hasOptNone() && LittleEndian) {
438 LLVM_DEBUG(dbgs() << "collecting initializers for " << *AIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << "collecting initializers for "
<< *AI << ", size = " << Size << "\n"
; } } while (false)
439 << ", size = " << Size << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << "collecting initializers for "
<< *AI << ", size = " << Size << "\n"
; } } while (false)
;
440 InsertBefore = collectInitializers(InsertBefore, Ptr, Size, IB);
441 }
442
443 IRBuilder<> IRB(InsertBefore);
444 IB.generate(IRB);
445}
446
447void AArch64StackTagging::untagAlloca(AllocaInst *AI, Instruction *InsertBefore,
448 uint64_t Size) {
449 IRBuilder<> IRB(InsertBefore);
450 IRB.CreateCall(SetTagFunc, {IRB.CreatePointerCast(AI, IRB.getInt8PtrTy()),
451 ConstantInt::get(IRB.getInt64Ty(), Size)});
452}
453
454Instruction *AArch64StackTagging::insertBaseTaggedPointer(
455 const MapVector<AllocaInst *, AllocaInfo> &Allocas,
456 const DominatorTree *DT) {
457 BasicBlock *PrologueBB = nullptr;
458 // Try sinking IRG as deep as possible to avoid hurting shrink wrap.
459 for (auto &I : Allocas) {
460 const AllocaInfo &Info = I.second;
461 AllocaInst *AI = Info.AI;
462 if (Info.Tag < 0)
463 continue;
464 if (!PrologueBB) {
465 PrologueBB = AI->getParent();
466 continue;
467 }
468 PrologueBB = DT->findNearestCommonDominator(PrologueBB, AI->getParent());
469 }
470 assert(PrologueBB)((PrologueBB) ? static_cast<void> (0) : __assert_fail (
"PrologueBB", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Target/AArch64/AArch64StackTagging.cpp"
, 470, __PRETTY_FUNCTION__))
;
471
472 IRBuilder<> IRB(&PrologueBB->front());
473 Function *IRG_SP =
474 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_irg_sp);
475 Instruction *Base =
476 IRB.CreateCall(IRG_SP, {Constant::getNullValue(IRB.getInt64Ty())});
477 Base->setName("basetag");
478 return Base;
479}
480
481void AArch64StackTagging::alignAndPadAlloca(AllocaInfo &Info) {
482 const Align NewAlignment =
483 max(MaybeAlign(Info.AI->getAlignment()), kTagGranuleSize);
484 Info.AI->setAlignment(NewAlignment);
485
486 uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
487 uint64_t AlignedSize = alignTo(Size, kTagGranuleSize);
488 if (Size == AlignedSize)
489 return;
490
491 // Add padding to the alloca.
492 Type *AllocatedType =
493 Info.AI->isArrayAllocation()
494 ? ArrayType::get(
495 Info.AI->getAllocatedType(),
496 cast<ConstantInt>(Info.AI->getArraySize())->getZExtValue())
497 : Info.AI->getAllocatedType();
498 Type *PaddingType =
499 ArrayType::get(Type::getInt8Ty(F->getContext()), AlignedSize - Size);
500 Type *TypeWithPadding = StructType::get(AllocatedType, PaddingType);
501 auto *NewAI = new AllocaInst(
502 TypeWithPadding, Info.AI->getType()->getAddressSpace(), nullptr, "", Info.AI);
503 NewAI->takeName(Info.AI);
504 NewAI->setAlignment(Info.AI->getAlign());
505 NewAI->setUsedWithInAlloca(Info.AI->isUsedWithInAlloca());
506 NewAI->setSwiftError(Info.AI->isSwiftError());
507 NewAI->copyMetadata(*Info.AI);
508
509 auto *NewPtr = new BitCastInst(NewAI, Info.AI->getType(), "", Info.AI);
510 Info.AI->replaceAllUsesWith(NewPtr);
511 Info.AI->eraseFromParent();
512 Info.AI = NewAI;
513}
514
515// Helper function to check for post-dominance.
516static bool postDominates(const PostDominatorTree *PDT, const IntrinsicInst *A,
517 const IntrinsicInst *B) {
518 const BasicBlock *ABB = A->getParent();
519 const BasicBlock *BBB = B->getParent();
520
521 if (ABB != BBB)
522 return PDT->dominates(ABB, BBB);
523
524 for (const Instruction &I : *ABB) {
525 if (&I == B)
526 return true;
527 if (&I == A)
528 return false;
529 }
530 llvm_unreachable("Corrupt instruction list")::llvm::llvm_unreachable_internal("Corrupt instruction list",
"/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Target/AArch64/AArch64StackTagging.cpp"
, 530)
;
531}
532
533// FIXME: check for MTE extension
534bool AArch64StackTagging::runOnFunction(Function &Fn) {
535 if (!Fn.hasFnAttribute(Attribute::SanitizeMemTag))
1
Assuming the condition is false
2
Taking false branch
536 return false;
537
538 if (UseStackSafety)
3
Assuming field 'UseStackSafety' is false
4
Taking false branch
539 SSI = &getAnalysis<StackSafetyGlobalInfoWrapperPass>().getResult();
540 F = &Fn;
541 DL = &Fn.getParent()->getDataLayout();
542 if (MergeInit)
5
Assuming field 'MergeInit' is false
6
Taking false branch
543 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
544
545 MapVector<AllocaInst *, AllocaInfo> Allocas; // need stable iteration order
546 SmallVector<Instruction *, 8> RetVec;
547 SmallVector<Instruction *, 4> UnrecognizedLifetimes;
548
549 for (auto &BB : *F) {
550 for (BasicBlock::iterator IT = BB.begin(); IT != BB.end(); ++IT) {
551 Instruction *I = &*IT;
552 if (auto *AI = dyn_cast<AllocaInst>(I)) {
553 Allocas[AI].AI = AI;
554 continue;
555 }
556
557 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(I)) {
558 if (auto *AI =
559 dyn_cast_or_null<AllocaInst>(DVI->getVariableLocation())) {
560 Allocas[AI].DbgVariableIntrinsics.push_back(DVI);
561 }
562 continue;
563 }
564
565 auto *II = dyn_cast<IntrinsicInst>(I);
566 if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
567 II->getIntrinsicID() == Intrinsic::lifetime_end)) {
568 AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
569 if (!AI) {
570 UnrecognizedLifetimes.push_back(I);
571 continue;
572 }
573 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
574 Allocas[AI].LifetimeStart.push_back(II);
575 else
576 Allocas[AI].LifetimeEnd.push_back(II);
577 }
578
579 if (isa<ReturnInst>(I) || isa<ResumeInst>(I) || isa<CleanupReturnInst>(I))
580 RetVec.push_back(I);
581 }
582 }
583
584 if (Allocas.empty())
7
Assuming the condition is false
8
Taking false branch
585 return false;
586
587 int NextTag = 0;
588 int NumInterestingAllocas = 0;
589 for (auto &I : Allocas) {
590 AllocaInfo &Info = I.second;
591 assert(Info.AI)((Info.AI) ? static_cast<void> (0) : __assert_fail ("Info.AI"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Target/AArch64/AArch64StackTagging.cpp"
, 591, __PRETTY_FUNCTION__))
;
9
Assuming field 'AI' is non-null
10
'?' condition is true
13
Assuming field 'AI' is non-null
14
'?' condition is true
17
Assuming field 'AI' is non-null
18
'?' condition is true
592
593 if (!isInterestingAlloca(*Info.AI)) {
11
Taking true branch
15
Taking true branch
19
Taking false branch
594 Info.Tag = -1;
595 continue;
12
Execution continues on line 589
16
Execution continues on line 589
596 }
597
598 alignAndPadAlloca(Info);
599 NumInterestingAllocas++;
600 Info.Tag = NextTag;
601 NextTag = (NextTag + 1) % 16;
602 }
603
604 if (NumInterestingAllocas
19.1
'NumInterestingAllocas' is not equal to 0
19.1
'NumInterestingAllocas' is not equal to 0
== 0)
20
Taking false branch
605 return true;
606
607 std::unique_ptr<DominatorTree> DeleteDT;
608 DominatorTree *DT = nullptr;
609 if (auto *P
20.1
'P' is null
20.1
'P' is null
= getAnalysisIfAvailable<DominatorTreeWrapperPass>())
21
Taking false branch
610 DT = &P->getDomTree();
611
612 if (DT == nullptr && (NumInterestingAllocas
21.1
'NumInterestingAllocas' is <= 1
21.1
'NumInterestingAllocas' is <= 1
> 1 ||
23
Taking false branch
613 !F->hasFnAttribute(Attribute::OptimizeNone))) {
22
Assuming the condition is false
614 DeleteDT = std::make_unique<DominatorTree>(*F);
615 DT = DeleteDT.get();
616 }
617
618 std::unique_ptr<PostDominatorTree> DeletePDT;
619 PostDominatorTree *PDT = nullptr;
620 if (auto *P
23.1
'P' is null
23.1
'P' is null
= getAnalysisIfAvailable<PostDominatorTreeWrapperPass>())
24
Taking false branch
621 PDT = &P->getPostDomTree();
622
623 if (PDT == nullptr && !F->hasFnAttribute(Attribute::OptimizeNone)) {
25
Assuming the condition is false
26
Taking false branch
624 DeletePDT = std::make_unique<PostDominatorTree>(*F);
625 PDT = DeletePDT.get();
626 }
627
628 SetTagFunc =
629 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag);
630
631 Instruction *Base = insertBaseTaggedPointer(Allocas, DT);
632
633 for (auto &I : Allocas) {
634 const AllocaInfo &Info = I.second;
635 AllocaInst *AI = Info.AI;
636 if (Info.Tag < 0)
27
Assuming field 'Tag' is >= 0
28
Taking false branch
637 continue;
638
639 // Replace alloca with tagp(alloca).
640 IRBuilder<> IRB(Info.AI->getNextNode());
641 Function *TagP = Intrinsic::getDeclaration(
642 F->getParent(), Intrinsic::aarch64_tagp, {Info.AI->getType()});
643 Instruction *TagPCall =
644 IRB.CreateCall(TagP, {Constant::getNullValue(Info.AI->getType()), Base,
645 ConstantInt::get(IRB.getInt64Ty(), Info.Tag)});
646 if (Info.AI->hasName())
29
Assuming the condition is false
30
Taking false branch
647 TagPCall->setName(Info.AI->getName() + ".tag");
648 Info.AI->replaceAllUsesWith(TagPCall);
649 TagPCall->setOperand(0, Info.AI);
650
651 if (UnrecognizedLifetimes.empty() && Info.LifetimeStart.size() == 1 &&
31
Calling 'SmallVectorBase::empty'
34
Returning from 'SmallVectorBase::empty'
35
Assuming the condition is true
37
Taking true branch
652 Info.LifetimeEnd.size() == 1) {
36
Assuming the condition is true
653 IntrinsicInst *Start = Info.LifetimeStart[0];
654 IntrinsicInst *End = Info.LifetimeEnd[0];
655 uint64_t Size =
656 dyn_cast<ConstantInt>(Start->getArgOperand(0))->getZExtValue();
38
Assuming the object is not a 'ConstantInt'
39
Called C++ object pointer is null
657 Size = alignTo(Size, kTagGranuleSize);
658 tagAlloca(AI, Start->getNextNode(), Start->getArgOperand(1), Size);
659 // We need to ensure that if we tag some object, we certainly untag it
660 // before the function exits.
661 if (PDT != nullptr && postDominates(PDT, End, Start)) {
662 untagAlloca(AI, End, Size);
663 } else {
664 SmallVector<Instruction *, 8> ReachableRetVec;
665 unsigned NumCoveredExits = 0;
666 for (auto &RI : RetVec) {
667 if (!isPotentiallyReachable(Start, RI, nullptr, DT))
668 continue;
669 ReachableRetVec.push_back(RI);
670 if (DT != nullptr && DT->dominates(End, RI))
671 ++NumCoveredExits;
672 }
673 // If there's a mix of covered and non-covered exits, just put the untag
674 // on exits, so we avoid the redundancy of untagging twice.
675 if (NumCoveredExits == ReachableRetVec.size()) {
676 untagAlloca(AI, End, Size);
677 } else {
678 for (auto &RI : ReachableRetVec)
679 untagAlloca(AI, RI, Size);
680 // We may have inserted untag outside of the lifetime interval.
681 // Remove the lifetime end call for this alloca.
682 End->eraseFromParent();
683 }
684 }
685 } else {
686 uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
687 Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getInt8PtrTy());
688 tagAlloca(AI, &*IRB.GetInsertPoint(), Ptr, Size);
689 for (auto &RI : RetVec) {
690 untagAlloca(AI, RI, Size);
691 }
692 // We may have inserted tag/untag outside of any lifetime interval.
693 // Remove all lifetime intrinsics for this alloca.
694 for (auto &II : Info.LifetimeStart)
695 II->eraseFromParent();
696 for (auto &II : Info.LifetimeEnd)
697 II->eraseFromParent();
698 }
699
700 // Fixup debug intrinsics to point to the new alloca.
701 for (auto DVI : Info.DbgVariableIntrinsics)
702 DVI->setArgOperand(
703 0,
704 MetadataAsValue::get(F->getContext(), LocalAsMetadata::get(Info.AI)));
705 }
706
707 // If we have instrumented at least one alloca, all unrecognized lifetime
708 // instrinsics have to go.
709 for (auto &I : UnrecognizedLifetimes)
710 I->eraseFromParent();
711
712 return true;
713}

/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Support/AlignOf.h"
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/ErrorHandling.h"
20#include "llvm/Support/MathExtras.h"
21#include "llvm/Support/MemAlloc.h"
22#include "llvm/Support/type_traits.h"
23#include <algorithm>
24#include <cassert>
25#include <cstddef>
26#include <cstdlib>
27#include <cstring>
28#include <initializer_list>
29#include <iterator>
30#include <limits>
31#include <memory>
32#include <new>
33#include <type_traits>
34#include <utility>
35
36namespace llvm {
37
38/// This is all the stuff common to all SmallVectors.
39///
40/// The template parameter specifies the type which should be used to hold the
41/// Size and Capacity of the SmallVector, so it can be adjusted.
42/// Using 32 bit size is desirable to shrink the size of the SmallVector.
43/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
44/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
45/// buffering bitcode output - which can exceed 4GB.
46template <class Size_T> class SmallVectorBase {
47protected:
48 void *BeginX;
49 Size_T Size = 0, Capacity;
50
51 /// The maximum value of the Size_T used.
52 static constexpr size_t SizeTypeMax() {
53 return std::numeric_limits<Size_T>::max();
54 }
55
56 SmallVectorBase() = delete;
57 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
58 : BeginX(FirstEl), Capacity(TotalCapacity) {}
59
60 /// This is an implementation of the grow() method which only works
61 /// on POD-like data types and is out of line to reduce code duplication.
62 /// This function will report a fatal error if it cannot increase capacity.
63 void grow_pod(void *FirstEl, size_t MinCapacity, size_t TSize);
64
65public:
66 size_t size() const { return Size; }
67 size_t capacity() const { return Capacity; }
68
69 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
32
Assuming field 'Size' is 0, which participates in a condition later
33
Returning the value 1, which participates in a condition later
70
71 /// Set the array size to \p N, which the current array must have enough
72 /// capacity for.
73 ///
74 /// This does not construct or destroy any elements in the vector.
75 ///
76 /// Clients can use this in conjunction with capacity() to write past the end
77 /// of the buffer when they know that more elements are available, and only
78 /// update the size later. This avoids the cost of value initializing elements
79 /// which will only be overwritten.
80 void set_size(size_t N) {
81 assert(N <= capacity())((N <= capacity()) ? static_cast<void> (0) : __assert_fail
("N <= capacity()", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 81, __PRETTY_FUNCTION__))
;
82 Size = N;
83 }
84};
85
86template <class T>
87using SmallVectorSizeType =
88 typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
89 uint32_t>::type;
90
91/// Figure out the offset of the first element.
92template <class T, typename = void> struct SmallVectorAlignmentAndSize {
93 AlignedCharArrayUnion<SmallVectorBase<SmallVectorSizeType<T>>> Base;
94 AlignedCharArrayUnion<T> FirstEl;
95};
96
97/// This is the part of SmallVectorTemplateBase which does not depend on whether
98/// the type T is a POD. The extra dummy template argument is used by ArrayRef
99/// to avoid unnecessarily requiring T to be complete.
100template <typename T, typename = void>
101class SmallVectorTemplateCommon
102 : public SmallVectorBase<SmallVectorSizeType<T>> {
103 using Base = SmallVectorBase<SmallVectorSizeType<T>>;
104
105 /// Find the address of the first element. For this pointer math to be valid
106 /// with small-size of 0 for T with lots of alignment, it's important that
107 /// SmallVectorStorage is properly-aligned even for small-size of 0.
108 void *getFirstEl() const {
109 return const_cast<void *>(reinterpret_cast<const void *>(
110 reinterpret_cast<const char *>(this) +
111 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
112 }
113 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
114
115protected:
116 SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
117
118 void grow_pod(size_t MinCapacity, size_t TSize) {
119 Base::grow_pod(getFirstEl(), MinCapacity, TSize);
120 }
121
122 /// Return true if this is a smallvector which has not had dynamic
123 /// memory allocated for it.
124 bool isSmall() const { return this->BeginX == getFirstEl(); }
125
126 /// Put this vector in a state of being small.
127 void resetToSmall() {
128 this->BeginX = getFirstEl();
129 this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
130 }
131
132public:
133 using size_type = size_t;
134 using difference_type = ptrdiff_t;
135 using value_type = T;
136 using iterator = T *;
137 using const_iterator = const T *;
138
139 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
140 using reverse_iterator = std::reverse_iterator<iterator>;
141
142 using reference = T &;
143 using const_reference = const T &;
144 using pointer = T *;
145 using const_pointer = const T *;
146
147 using Base::capacity;
148 using Base::empty;
149 using Base::size;
150
151 // forward iterator creation methods.
152 iterator begin() { return (iterator)this->BeginX; }
153 const_iterator begin() const { return (const_iterator)this->BeginX; }
154 iterator end() { return begin() + size(); }
155 const_iterator end() const { return begin() + size(); }
156
157 // reverse iterator creation methods.
158 reverse_iterator rbegin() { return reverse_iterator(end()); }
159 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
160 reverse_iterator rend() { return reverse_iterator(begin()); }
161 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
162
163 size_type size_in_bytes() const { return size() * sizeof(T); }
164 size_type max_size() const {
165 return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
166 }
167
168 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
169
170 /// Return a pointer to the vector's buffer, even if empty().
171 pointer data() { return pointer(begin()); }
172 /// Return a pointer to the vector's buffer, even if empty().
173 const_pointer data() const { return const_pointer(begin()); }
174
175 reference operator[](size_type idx) {
176 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 176, __PRETTY_FUNCTION__))
;
177 return begin()[idx];
178 }
179 const_reference operator[](size_type idx) const {
180 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 180, __PRETTY_FUNCTION__))
;
181 return begin()[idx];
182 }
183
184 reference front() {
185 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 185, __PRETTY_FUNCTION__))
;
186 return begin()[0];
187 }
188 const_reference front() const {
189 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 189, __PRETTY_FUNCTION__))
;
190 return begin()[0];
191 }
192
193 reference back() {
194 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 194, __PRETTY_FUNCTION__))
;
195 return end()[-1];
196 }
197 const_reference back() const {
198 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 198, __PRETTY_FUNCTION__))
;
199 return end()[-1];
200 }
201};
202
203/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
204/// method implementations that are designed to work with non-trivial T's.
205///
206/// We approximate is_trivially_copyable with trivial move/copy construction and
207/// trivial destruction. While the standard doesn't specify that you're allowed
208/// copy these types with memcpy, there is no way for the type to observe this.
209/// This catches the important case of std::pair<POD, POD>, which is not
210/// trivially assignable.
211template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
212 (is_trivially_move_constructible<T>::value) &&
213 std::is_trivially_destructible<T>::value>
214class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
215protected:
216 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
217
218 static void destroy_range(T *S, T *E) {
219 while (S != E) {
220 --E;
221 E->~T();
222 }
223 }
224
225 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
226 /// constructing elements as needed.
227 template<typename It1, typename It2>
228 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
229 std::uninitialized_copy(std::make_move_iterator(I),
230 std::make_move_iterator(E), Dest);
231 }
232
233 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
234 /// constructing elements as needed.
235 template<typename It1, typename It2>
236 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
237 std::uninitialized_copy(I, E, Dest);
238 }
239
240 /// Grow the allocated memory (without initializing new elements), doubling
241 /// the size of the allocated memory. Guarantees space for at least one more
242 /// element, or MinSize more elements if specified.
243 void grow(size_t MinSize = 0);
244
245public:
246 void push_back(const T &Elt) {
247 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
248 this->grow();
249 ::new ((void*) this->end()) T(Elt);
250 this->set_size(this->size() + 1);
251 }
252
253 void push_back(T &&Elt) {
254 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
255 this->grow();
256 ::new ((void*) this->end()) T(::std::move(Elt));
257 this->set_size(this->size() + 1);
258 }
259
260 void pop_back() {
261 this->set_size(this->size() - 1);
262 this->end()->~T();
263 }
264};
265
266// Define this out-of-line to dissuade the C++ compiler from inlining it.
267template <typename T, bool TriviallyCopyable>
268void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
269 // Ensure we can fit the new capacity.
270 // This is only going to be applicable when the capacity is 32 bit.
271 if (MinSize > this->SizeTypeMax())
272 report_bad_alloc_error("SmallVector capacity overflow during allocation");
273
274 // Ensure we can meet the guarantee of space for at least one more element.
275 // The above check alone will not catch the case where grow is called with a
276 // default MinCapacity of 0, but the current capacity cannot be increased.
277 // This is only going to be applicable when the capacity is 32 bit.
278 if (this->capacity() == this->SizeTypeMax())
279 report_bad_alloc_error("SmallVector capacity unable to grow");
280
281 // Always grow, even from zero.
282 size_t NewCapacity = size_t(NextPowerOf2(this->capacity() + 2));
283 NewCapacity = std::min(std::max(NewCapacity, MinSize), this->SizeTypeMax());
284 T *NewElts = static_cast<T*>(llvm::safe_malloc(NewCapacity*sizeof(T)));
285
286 // Move the elements over.
287 this->uninitialized_move(this->begin(), this->end(), NewElts);
288
289 // Destroy the original elements.
290 destroy_range(this->begin(), this->end());
291
292 // If this wasn't grown from the inline copy, deallocate the old space.
293 if (!this->isSmall())
294 free(this->begin());
295
296 this->BeginX = NewElts;
297 this->Capacity = NewCapacity;
298}
299
300/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
301/// method implementations that are designed to work with trivially copyable
302/// T's. This allows using memcpy in place of copy/move construction and
303/// skipping destruction.
304template <typename T>
305class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
306protected:
307 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
308
309 // No need to do a destroy loop for POD's.
310 static void destroy_range(T *, T *) {}
311
312 /// Move the range [I, E) onto the uninitialized memory
313 /// starting with "Dest", constructing elements into it as needed.
314 template<typename It1, typename It2>
315 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
316 // Just do a copy.
317 uninitialized_copy(I, E, Dest);
318 }
319
320 /// Copy the range [I, E) onto the uninitialized memory
321 /// starting with "Dest", constructing elements into it as needed.
322 template<typename It1, typename It2>
323 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
324 // Arbitrary iterator types; just use the basic implementation.
325 std::uninitialized_copy(I, E, Dest);
326 }
327
328 /// Copy the range [I, E) onto the uninitialized memory
329 /// starting with "Dest", constructing elements into it as needed.
330 template <typename T1, typename T2>
331 static void uninitialized_copy(
332 T1 *I, T1 *E, T2 *Dest,
333 std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
334 T2>::value> * = nullptr) {
335 // Use memcpy for PODs iterated by pointers (which includes SmallVector
336 // iterators): std::uninitialized_copy optimizes to memmove, but we can
337 // use memcpy here. Note that I and E are iterators and thus might be
338 // invalid for memcpy if they are equal.
339 if (I != E)
340 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
341 }
342
343 /// Double the size of the allocated memory, guaranteeing space for at
344 /// least one more element or MinSize if specified.
345 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
346
347public:
348 void push_back(const T &Elt) {
349 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
350 this->grow();
351 memcpy(reinterpret_cast<void *>(this->end()), &Elt, sizeof(T));
352 this->set_size(this->size() + 1);
353 }
354
355 void pop_back() { this->set_size(this->size() - 1); }
356};
357
358/// This class consists of common code factored out of the SmallVector class to
359/// reduce code duplication based on the SmallVector 'N' template parameter.
360template <typename T>
361class SmallVectorImpl : public SmallVectorTemplateBase<T> {
362 using SuperClass = SmallVectorTemplateBase<T>;
363
364public:
365 using iterator = typename SuperClass::iterator;
366 using const_iterator = typename SuperClass::const_iterator;
367 using reference = typename SuperClass::reference;
368 using size_type = typename SuperClass::size_type;
369
370protected:
371 // Default ctor - Initialize to empty.
372 explicit SmallVectorImpl(unsigned N)
373 : SmallVectorTemplateBase<T>(N) {}
374
375public:
376 SmallVectorImpl(const SmallVectorImpl &) = delete;
377
378 ~SmallVectorImpl() {
379 // Subclass has already destructed this vector's elements.
380 // If this wasn't grown from the inline copy, deallocate the old space.
381 if (!this->isSmall())
382 free(this->begin());
383 }
384
385 void clear() {
386 this->destroy_range(this->begin(), this->end());
387 this->Size = 0;
388 }
389
390 void resize(size_type N) {
391 if (N < this->size()) {
392 this->destroy_range(this->begin()+N, this->end());
393 this->set_size(N);
394 } else if (N > this->size()) {
395 if (this->capacity() < N)
396 this->grow(N);
397 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
398 new (&*I) T();
399 this->set_size(N);
400 }
401 }
402
403 void resize(size_type N, const T &NV) {
404 if (N < this->size()) {
405 this->destroy_range(this->begin()+N, this->end());
406 this->set_size(N);
407 } else if (N > this->size()) {
408 if (this->capacity() < N)
409 this->grow(N);
410 std::uninitialized_fill(this->end(), this->begin()+N, NV);
411 this->set_size(N);
412 }
413 }
414
415 void reserve(size_type N) {
416 if (this->capacity() < N)
417 this->grow(N);
418 }
419
420 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
421 T Result = ::std::move(this->back());
422 this->pop_back();
423 return Result;
424 }
425
426 void swap(SmallVectorImpl &RHS);
427
428 /// Add the specified range to the end of the SmallVector.
429 template <typename in_iter,
430 typename = std::enable_if_t<std::is_convertible<
431 typename std::iterator_traits<in_iter>::iterator_category,
432 std::input_iterator_tag>::value>>
433 void append(in_iter in_start, in_iter in_end) {
434 size_type NumInputs = std::distance(in_start, in_end);
435 if (NumInputs > this->capacity() - this->size())
436 this->grow(this->size()+NumInputs);
437
438 this->uninitialized_copy(in_start, in_end, this->end());
439 this->set_size(this->size() + NumInputs);
440 }
441
442 /// Append \p NumInputs copies of \p Elt to the end.
443 void append(size_type NumInputs, const T &Elt) {
444 if (NumInputs > this->capacity() - this->size())
445 this->grow(this->size()+NumInputs);
446
447 std::uninitialized_fill_n(this->end(), NumInputs, Elt);
448 this->set_size(this->size() + NumInputs);
449 }
450
451 void append(std::initializer_list<T> IL) {
452 append(IL.begin(), IL.end());
453 }
454
455 // FIXME: Consider assigning over existing elements, rather than clearing &
456 // re-initializing them - for all assign(...) variants.
457
458 void assign(size_type NumElts, const T &Elt) {
459 clear();
460 if (this->capacity() < NumElts)
461 this->grow(NumElts);
462 this->set_size(NumElts);
463 std::uninitialized_fill(this->begin(), this->end(), Elt);
464 }
465
466 template <typename in_iter,
467 typename = std::enable_if_t<std::is_convertible<
468 typename std::iterator_traits<in_iter>::iterator_category,
469 std::input_iterator_tag>::value>>
470 void assign(in_iter in_start, in_iter in_end) {
471 clear();
472 append(in_start, in_end);
473 }
474
475 void assign(std::initializer_list<T> IL) {
476 clear();
477 append(IL);
478 }
479
480 iterator erase(const_iterator CI) {
481 // Just cast away constness because this is a non-const member function.
482 iterator I = const_cast<iterator>(CI);
483
484 assert(I >= this->begin() && "Iterator to erase is out of bounds.")((I >= this->begin() && "Iterator to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Iterator to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 484, __PRETTY_FUNCTION__))
;
485 assert(I < this->end() && "Erasing at past-the-end iterator.")((I < this->end() && "Erasing at past-the-end iterator."
) ? static_cast<void> (0) : __assert_fail ("I < this->end() && \"Erasing at past-the-end iterator.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 485, __PRETTY_FUNCTION__))
;
486
487 iterator N = I;
488 // Shift all elts down one.
489 std::move(I+1, this->end(), I);
490 // Drop the last elt.
491 this->pop_back();
492 return(N);
493 }
494
495 iterator erase(const_iterator CS, const_iterator CE) {
496 // Just cast away constness because this is a non-const member function.
497 iterator S = const_cast<iterator>(CS);
498 iterator E = const_cast<iterator>(CE);
499
500 assert(S >= this->begin() && "Range to erase is out of bounds.")((S >= this->begin() && "Range to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("S >= this->begin() && \"Range to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 500, __PRETTY_FUNCTION__))
;
501 assert(S <= E && "Trying to erase invalid range.")((S <= E && "Trying to erase invalid range.") ? static_cast
<void> (0) : __assert_fail ("S <= E && \"Trying to erase invalid range.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 501, __PRETTY_FUNCTION__))
;
502 assert(E <= this->end() && "Trying to erase past the end.")((E <= this->end() && "Trying to erase past the end."
) ? static_cast<void> (0) : __assert_fail ("E <= this->end() && \"Trying to erase past the end.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 502, __PRETTY_FUNCTION__))
;
503
504 iterator N = S;
505 // Shift all elts down.
506 iterator I = std::move(E, this->end(), S);
507 // Drop the last elts.
508 this->destroy_range(I, this->end());
509 this->set_size(I - this->begin());
510 return(N);
511 }
512
513 iterator insert(iterator I, T &&Elt) {
514 if (I == this->end()) { // Important special case for empty vector.
515 this->push_back(::std::move(Elt));
516 return this->end()-1;
517 }
518
519 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 519, __PRETTY_FUNCTION__))
;
520 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 520, __PRETTY_FUNCTION__))
;
521
522 if (this->size() >= this->capacity()) {
523 size_t EltNo = I-this->begin();
524 this->grow();
525 I = this->begin()+EltNo;
526 }
527
528 ::new ((void*) this->end()) T(::std::move(this->back()));
529 // Push everything else over.
530 std::move_backward(I, this->end()-1, this->end());
531 this->set_size(this->size() + 1);
532
533 // If we just moved the element we're inserting, be sure to update
534 // the reference.
535 T *EltPtr = &Elt;
536 if (I <= EltPtr && EltPtr < this->end())
537 ++EltPtr;
538
539 *I = ::std::move(*EltPtr);
540 return I;
541 }
542
543 iterator insert(iterator I, const T &Elt) {
544 if (I == this->end()) { // Important special case for empty vector.
545 this->push_back(Elt);
546 return this->end()-1;
547 }
548
549 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 549, __PRETTY_FUNCTION__))
;
550 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 550, __PRETTY_FUNCTION__))
;
551
552 if (this->size() >= this->capacity()) {
553 size_t EltNo = I-this->begin();
554 this->grow();
555 I = this->begin()+EltNo;
556 }
557 ::new ((void*) this->end()) T(std::move(this->back()));
558 // Push everything else over.
559 std::move_backward(I, this->end()-1, this->end());
560 this->set_size(this->size() + 1);
561
562 // If we just moved the element we're inserting, be sure to update
563 // the reference.
564 const T *EltPtr = &Elt;
565 if (I <= EltPtr && EltPtr < this->end())
566 ++EltPtr;
567
568 *I = *EltPtr;
569 return I;
570 }
571
572 iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
573 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
574 size_t InsertElt = I - this->begin();
575
576 if (I == this->end()) { // Important special case for empty vector.
577 append(NumToInsert, Elt);
578 return this->begin()+InsertElt;
579 }
580
581 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 581, __PRETTY_FUNCTION__))
;
582 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 582, __PRETTY_FUNCTION__))
;
583
584 // Ensure there is enough space.
585 reserve(this->size() + NumToInsert);
586
587 // Uninvalidate the iterator.
588 I = this->begin()+InsertElt;
589
590 // If there are more elements between the insertion point and the end of the
591 // range than there are being inserted, we can use a simple approach to
592 // insertion. Since we already reserved space, we know that this won't
593 // reallocate the vector.
594 if (size_t(this->end()-I) >= NumToInsert) {
595 T *OldEnd = this->end();
596 append(std::move_iterator<iterator>(this->end() - NumToInsert),
597 std::move_iterator<iterator>(this->end()));
598
599 // Copy the existing elements that get replaced.
600 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
601
602 std::fill_n(I, NumToInsert, Elt);
603 return I;
604 }
605
606 // Otherwise, we're inserting more elements than exist already, and we're
607 // not inserting at the end.
608
609 // Move over the elements that we're about to overwrite.
610 T *OldEnd = this->end();
611 this->set_size(this->size() + NumToInsert);
612 size_t NumOverwritten = OldEnd-I;
613 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
614
615 // Replace the overwritten part.
616 std::fill_n(I, NumOverwritten, Elt);
617
618 // Insert the non-overwritten middle part.
619 std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
620 return I;
621 }
622
623 template <typename ItTy,
624 typename = std::enable_if_t<std::is_convertible<
625 typename std::iterator_traits<ItTy>::iterator_category,
626 std::input_iterator_tag>::value>>
627 iterator insert(iterator I, ItTy From, ItTy To) {
628 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
629 size_t InsertElt = I - this->begin();
630
631 if (I == this->end()) { // Important special case for empty vector.
632 append(From, To);
633 return this->begin()+InsertElt;
634 }
635
636 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 636, __PRETTY_FUNCTION__))
;
637 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/ADT/SmallVector.h"
, 637, __PRETTY_FUNCTION__))
;
638
639 size_t NumToInsert = std::distance(From, To);
640
641 // Ensure there is enough space.
642 reserve(this->size() + NumToInsert);
643
644 // Uninvalidate the iterator.
645 I = this->begin()+InsertElt;
646
647 // If there are more elements between the insertion point and the end of the
648 // range than there are being inserted, we can use a simple approach to
649 // insertion. Since we already reserved space, we know that this won't
650 // reallocate the vector.
651 if (size_t(this->end()-I) >= NumToInsert) {
652 T *OldEnd = this->end();
653 append(std::move_iterator<iterator>(this->end() - NumToInsert),
654 std::move_iterator<iterator>(this->end()));
655
656 // Copy the existing elements that get replaced.
657 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
658
659 std::copy(From, To, I);
660 return I;
661 }
662
663 // Otherwise, we're inserting more elements than exist already, and we're
664 // not inserting at the end.
665
666 // Move over the elements that we're about to overwrite.
667 T *OldEnd = this->end();
668 this->set_size(this->size() + NumToInsert);
669 size_t NumOverwritten = OldEnd-I;
670 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
671
672 // Replace the overwritten part.
673 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
674 *J = *From;
675 ++J; ++From;
676 }
677
678 // Insert the non-overwritten middle part.
679 this->uninitialized_copy(From, To, OldEnd);
680 return I;
681 }
682
683 void insert(iterator I, std::initializer_list<T> IL) {
684 insert(I, IL.begin(), IL.end());
685 }
686
687 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
688 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
689 this->grow();
690 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
691 this->set_size(this->size() + 1);
692 return this->back();
693 }
694
695 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
696
697 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
698
699 bool operator==(const SmallVectorImpl &RHS) const {
700 if (this->size() != RHS.size()) return false;
701 return std::equal(this->begin(), this->end(), RHS.begin());
702 }
703 bool operator!=(const SmallVectorImpl &RHS) const {
704 return !(*this == RHS);
705 }
706
707 bool operator<(const SmallVectorImpl &RHS) const {
708 return std::lexicographical_compare(this->begin(), this->end(),
709 RHS.begin(), RHS.end());
710 }
711};
712
713template <typename T>
714void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
715 if (this == &RHS) return;
716
717 // We can only avoid copying elements if neither vector is small.
718 if (!this->isSmall() && !RHS.isSmall()) {
719 std::swap(this->BeginX, RHS.BeginX);
720 std::swap(this->Size, RHS.Size);
721 std::swap(this->Capacity, RHS.Capacity);
722 return;
723 }
724 if (RHS.size() > this->capacity())
725 this->grow(RHS.size());
726 if (this->size() > RHS.capacity())
727 RHS.grow(this->size());
728
729 // Swap the shared elements.
730 size_t NumShared = this->size();
731 if (NumShared > RHS.size()) NumShared = RHS.size();
732 for (size_type i = 0; i != NumShared; ++i)
733 std::swap((*this)[i], RHS[i]);
734
735 // Copy over the extra elts.
736 if (this->size() > RHS.size()) {
737 size_t EltDiff = this->size() - RHS.size();
738 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
739 RHS.set_size(RHS.size() + EltDiff);
740 this->destroy_range(this->begin()+NumShared, this->end());
741 this->set_size(NumShared);
742 } else if (RHS.size() > this->size()) {
743 size_t EltDiff = RHS.size() - this->size();
744 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
745 this->set_size(this->size() + EltDiff);
746 this->destroy_range(RHS.begin()+NumShared, RHS.end());
747 RHS.set_size(NumShared);
748 }
749}
750
751template <typename T>
752SmallVectorImpl<T> &SmallVectorImpl<T>::
753 operator=(const SmallVectorImpl<T> &RHS) {
754 // Avoid self-assignment.
755 if (this == &RHS) return *this;
756
757 // If we already have sufficient space, assign the common elements, then
758 // destroy any excess.
759 size_t RHSSize = RHS.size();
760 size_t CurSize = this->size();
761 if (CurSize >= RHSSize) {
762 // Assign common elements.
763 iterator NewEnd;
764 if (RHSSize)
765 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
766 else
767 NewEnd = this->begin();
768
769 // Destroy excess elements.
770 this->destroy_range(NewEnd, this->end());
771
772 // Trim.
773 this->set_size(RHSSize);
774 return *this;
775 }
776
777 // If we have to grow to have enough elements, destroy the current elements.
778 // This allows us to avoid copying them during the grow.
779 // FIXME: don't do this if they're efficiently moveable.
780 if (this->capacity() < RHSSize) {
781 // Destroy current elements.
782 this->destroy_range(this->begin(), this->end());
783 this->set_size(0);
784 CurSize = 0;
785 this->grow(RHSSize);
786 } else if (CurSize) {
787 // Otherwise, use assignment for the already-constructed elements.
788 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
789 }
790
791 // Copy construct the new elements in place.
792 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
793 this->begin()+CurSize);
794
795 // Set end.
796 this->set_size(RHSSize);
797 return *this;
798}
799
800template <typename T>
801SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
802 // Avoid self-assignment.
803 if (this == &RHS) return *this;
804
805 // If the RHS isn't small, clear this vector and then steal its buffer.
806 if (!RHS.isSmall()) {
807 this->destroy_range(this->begin(), this->end());
808 if (!this->isSmall()) free(this->begin());
809 this->BeginX = RHS.BeginX;
810 this->Size = RHS.Size;
811 this->Capacity = RHS.Capacity;
812 RHS.resetToSmall();
813 return *this;
814 }
815
816 // If we already have sufficient space, assign the common elements, then
817 // destroy any excess.
818 size_t RHSSize = RHS.size();
819 size_t CurSize = this->size();
820 if (CurSize >= RHSSize) {
821 // Assign common elements.
822 iterator NewEnd = this->begin();
823 if (RHSSize)
824 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
825
826 // Destroy excess elements and trim the bounds.
827 this->destroy_range(NewEnd, this->end());
828 this->set_size(RHSSize);
829
830 // Clear the RHS.
831 RHS.clear();
832
833 return *this;
834 }
835
836 // If we have to grow to have enough elements, destroy the current elements.
837 // This allows us to avoid copying them during the grow.
838 // FIXME: this may not actually make any sense if we can efficiently move
839 // elements.
840 if (this->capacity() < RHSSize) {
841 // Destroy current elements.
842 this->destroy_range(this->begin(), this->end());
843 this->set_size(0);
844 CurSize = 0;
845 this->grow(RHSSize);
846 } else if (CurSize) {
847 // Otherwise, use assignment for the already-constructed elements.
848 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
849 }
850
851 // Move-construct the new elements in place.
852 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
853 this->begin()+CurSize);
854
855 // Set end.
856 this->set_size(RHSSize);
857
858 RHS.clear();
859 return *this;
860}
861
862/// Storage for the SmallVector elements. This is specialized for the N=0 case
863/// to avoid allocating unnecessary storage.
864template <typename T, unsigned N>
865struct SmallVectorStorage {
866 AlignedCharArrayUnion<T> InlineElts[N];
867};
868
869/// We need the storage to be properly aligned even for small-size of 0 so that
870/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
871/// well-defined.
872template <typename T> struct alignas(alignof(T)) SmallVectorStorage<T, 0> {};
873
874/// This is a 'vector' (really, a variable-sized array), optimized
875/// for the case when the array is small. It contains some number of elements
876/// in-place, which allows it to avoid heap allocation when the actual number of
877/// elements is below that threshold. This allows normal "small" cases to be
878/// fast without losing generality for large inputs.
879///
880/// Note that this does not attempt to be exception safe.
881///
882template <typename T, unsigned N>
883class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>,
884 SmallVectorStorage<T, N> {
885public:
886 SmallVector() : SmallVectorImpl<T>(N) {}
887
888 ~SmallVector() {
889 // Destroy the constructed elements in the vector.
890 this->destroy_range(this->begin(), this->end());
891 }
892
893 explicit SmallVector(size_t Size, const T &Value = T())
894 : SmallVectorImpl<T>(N) {
895 this->assign(Size, Value);
896 }
897
898 template <typename ItTy,
899 typename = std::enable_if_t<std::is_convertible<
900 typename std::iterator_traits<ItTy>::iterator_category,
901 std::input_iterator_tag>::value>>
902 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
903 this->append(S, E);
904 }
905
906 template <typename RangeTy>
907 explicit SmallVector(const iterator_range<RangeTy> &R)
908 : SmallVectorImpl<T>(N) {
909 this->append(R.begin(), R.end());
910 }
911
912 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
913 this->assign(IL);
914 }
915
916 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
917 if (!RHS.empty())
918 SmallVectorImpl<T>::operator=(RHS);
919 }
920
921 const SmallVector &operator=(const SmallVector &RHS) {
922 SmallVectorImpl<T>::operator=(RHS);
923 return *this;
924 }
925
926 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
927 if (!RHS.empty())
928 SmallVectorImpl<T>::operator=(::std::move(RHS));
929 }
930
931 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
932 if (!RHS.empty())
933 SmallVectorImpl<T>::operator=(::std::move(RHS));
934 }
935
936 const SmallVector &operator=(SmallVector &&RHS) {
937 SmallVectorImpl<T>::operator=(::std::move(RHS));
938 return *this;
939 }
940
941 const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
942 SmallVectorImpl<T>::operator=(::std::move(RHS));
943 return *this;
944 }
945
946 const SmallVector &operator=(std::initializer_list<T> IL) {
947 this->assign(IL);
948 return *this;
949 }
950};
951
952template <typename T, unsigned N>
953inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
954 return X.capacity_in_bytes();
955}
956
957/// Given a range of type R, iterate the entire range and return a
958/// SmallVector with elements of the vector. This is useful, for example,
959/// when you want to iterate a range and then sort the results.
960template <unsigned Size, typename R>
961SmallVector<typename std::remove_const<typename std::remove_reference<
962 decltype(*std::begin(std::declval<R &>()))>::type>::type,
963 Size>
964to_vector(R &&Range) {
965 return {std::begin(Range), std::end(Range)};
966}
967
968} // end namespace llvm
969
970namespace std {
971
972 /// Implement std::swap in terms of SmallVector swap.
973 template<typename T>
974 inline void
975 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
976 LHS.swap(RHS);
977 }
978
979 /// Implement std::swap in terms of SmallVector swap.
980 template<typename T, unsigned N>
981 inline void
982 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
983 LHS.swap(RHS);
984 }
985
986} // end namespace std
987
988#endif // LLVM_ADT_SMALLVECTOR_H