Bug Summary

File:llvm/lib/Target/AArch64/AArch64StackTagging.cpp
Warning:line 661, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AArch64StackTagging.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/build-llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/build-llvm/lib/Target/AArch64 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-10-30-042338-28487-1 -x c++ /build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/lib/Target/AArch64/AArch64StackTagging.cpp

/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/lib/Target/AArch64/AArch64StackTagging.cpp

1//===- AArch64StackTagging.cpp - Stack tagging in IR --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//===----------------------------------------------------------------------===//
10
11#include "AArch64.h"
12#include "AArch64InstrInfo.h"
13#include "AArch64Subtarget.h"
14#include "AArch64TargetMachine.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/DepthFirstIterator.h"
17#include "llvm/ADT/MapVector.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/Analysis/CFG.h"
24#include "llvm/Analysis/LoopInfo.h"
25#include "llvm/Analysis/PostDominators.h"
26#include "llvm/Analysis/ScalarEvolution.h"
27#include "llvm/Analysis/ScalarEvolutionExpressions.h"
28#include "llvm/Analysis/StackSafetyAnalysis.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/CodeGen/LiveRegUnits.h"
31#include "llvm/CodeGen/MachineBasicBlock.h"
32#include "llvm/CodeGen/MachineFunction.h"
33#include "llvm/CodeGen/MachineFunctionPass.h"
34#include "llvm/CodeGen/MachineInstr.h"
35#include "llvm/CodeGen/MachineInstrBuilder.h"
36#include "llvm/CodeGen/MachineLoopInfo.h"
37#include "llvm/CodeGen/MachineOperand.h"
38#include "llvm/CodeGen/MachineRegisterInfo.h"
39#include "llvm/CodeGen/TargetPassConfig.h"
40#include "llvm/CodeGen/TargetRegisterInfo.h"
41#include "llvm/IR/DebugLoc.h"
42#include "llvm/IR/Dominators.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GetElementPtrTypeIterator.h"
45#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Instructions.h"
47#include "llvm/IR/IntrinsicInst.h"
48#include "llvm/IR/IntrinsicsAArch64.h"
49#include "llvm/IR/Metadata.h"
50#include "llvm/InitializePasses.h"
51#include "llvm/Pass.h"
52#include "llvm/Support/Casting.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/raw_ostream.h"
55#include "llvm/Transforms/Utils/Local.h"
56#include <cassert>
57#include <iterator>
58#include <utility>
59
60using namespace llvm;
61
62#define DEBUG_TYPE"aarch64-stack-tagging" "aarch64-stack-tagging"
63
64static cl::opt<bool> ClMergeInit(
65 "stack-tagging-merge-init", cl::Hidden, cl::init(true), cl::ZeroOrMore,
66 cl::desc("merge stack variable initializers with tagging when possible"));
67
68static cl::opt<bool>
69 ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden,
70 cl::init(true), cl::ZeroOrMore,
71 cl::desc("Use Stack Safety analysis results"));
72
73static cl::opt<unsigned> ClScanLimit("stack-tagging-merge-init-scan-limit",
74 cl::init(40), cl::Hidden);
75
76static cl::opt<unsigned>
77 ClMergeInitSizeLimit("stack-tagging-merge-init-size-limit", cl::init(272),
78 cl::Hidden);
79
80static const Align kTagGranuleSize = Align(16);
81
82namespace {
83
84class InitializerBuilder {
85 uint64_t Size;
86 const DataLayout *DL;
87 Value *BasePtr;
88 Function *SetTagFn;
89 Function *SetTagZeroFn;
90 Function *StgpFn;
91
92 // List of initializers sorted by start offset.
93 struct Range {
94 uint64_t Start, End;
95 Instruction *Inst;
96 };
97 SmallVector<Range, 4> Ranges;
98 // 8-aligned offset => 8-byte initializer
99 // Missing keys are zero initialized.
100 std::map<uint64_t, Value *> Out;
101
102public:
103 InitializerBuilder(uint64_t Size, const DataLayout *DL, Value *BasePtr,
104 Function *SetTagFn, Function *SetTagZeroFn,
105 Function *StgpFn)
106 : Size(Size), DL(DL), BasePtr(BasePtr), SetTagFn(SetTagFn),
107 SetTagZeroFn(SetTagZeroFn), StgpFn(StgpFn) {}
108
109 bool addRange(uint64_t Start, uint64_t End, Instruction *Inst) {
110 auto I = std::lower_bound(
111 Ranges.begin(), Ranges.end(), Start,
112 [](const Range &LHS, uint64_t RHS) { return LHS.End <= RHS; });
113 if (I != Ranges.end() && End > I->Start) {
114 // Overlap - bail.
115 return false;
116 }
117 Ranges.insert(I, {Start, End, Inst});
118 return true;
119 }
120
121 bool addStore(uint64_t Offset, StoreInst *SI, const DataLayout *DL) {
122 int64_t StoreSize = DL->getTypeStoreSize(SI->getOperand(0)->getType());
123 if (!addRange(Offset, Offset + StoreSize, SI))
124 return false;
125 IRBuilder<> IRB(SI);
126 applyStore(IRB, Offset, Offset + StoreSize, SI->getOperand(0));
127 return true;
128 }
129
130 bool addMemSet(uint64_t Offset, MemSetInst *MSI) {
131 uint64_t StoreSize = cast<ConstantInt>(MSI->getLength())->getZExtValue();
132 if (!addRange(Offset, Offset + StoreSize, MSI))
133 return false;
134 IRBuilder<> IRB(MSI);
135 applyMemSet(IRB, Offset, Offset + StoreSize,
136 cast<ConstantInt>(MSI->getValue()));
137 return true;
138 }
139
140 void applyMemSet(IRBuilder<> &IRB, int64_t Start, int64_t End,
141 ConstantInt *V) {
142 // Out[] does not distinguish between zero and undef, and we already know
143 // that this memset does not overlap with any other initializer. Nothing to
144 // do for memset(0).
145 if (V->isZero())
146 return;
147 for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
148 uint64_t Cst = 0x0101010101010101UL;
149 int LowBits = Offset < Start ? (Start - Offset) * 8 : 0;
150 if (LowBits)
151 Cst = (Cst >> LowBits) << LowBits;
152 int HighBits = End - Offset < 8 ? (8 - (End - Offset)) * 8 : 0;
153 if (HighBits)
154 Cst = (Cst << HighBits) >> HighBits;
155 ConstantInt *C =
156 ConstantInt::get(IRB.getInt64Ty(), Cst * V->getZExtValue());
157
158 Value *&CurrentV = Out[Offset];
159 if (!CurrentV) {
160 CurrentV = C;
161 } else {
162 CurrentV = IRB.CreateOr(CurrentV, C);
163 }
164 }
165 }
166
167 // Take a 64-bit slice of the value starting at the given offset (in bytes).
168 // Offset can be negative. Pad with zeroes on both sides when necessary.
169 Value *sliceValue(IRBuilder<> &IRB, Value *V, int64_t Offset) {
170 if (Offset > 0) {
171 V = IRB.CreateLShr(V, Offset * 8);
172 V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
173 } else if (Offset < 0) {
174 V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
175 V = IRB.CreateShl(V, -Offset * 8);
176 } else {
177 V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
178 }
179 return V;
180 }
181
182 void applyStore(IRBuilder<> &IRB, int64_t Start, int64_t End,
183 Value *StoredValue) {
184 StoredValue = flatten(IRB, StoredValue);
185 for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
186 Value *V = sliceValue(IRB, StoredValue, Offset - Start);
187 Value *&CurrentV = Out[Offset];
188 if (!CurrentV) {
189 CurrentV = V;
190 } else {
191 CurrentV = IRB.CreateOr(CurrentV, V);
192 }
193 }
194 }
195
196 void generate(IRBuilder<> &IRB) {
197 LLVM_DEBUG(dbgs() << "Combined initializer\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << "Combined initializer\n"
; } } while (false)
;
198 // No initializers => the entire allocation is undef.
199 if (Ranges.empty()) {
200 emitUndef(IRB, 0, Size);
201 return;
202 }
203
204 // Look through 8-byte initializer list 16 bytes at a time;
205 // If one of the two 8-byte halfs is non-zero non-undef, emit STGP.
206 // Otherwise, emit zeroes up to next available item.
207 uint64_t LastOffset = 0;
208 for (uint64_t Offset = 0; Offset < Size; Offset += 16) {
209 auto I1 = Out.find(Offset);
210 auto I2 = Out.find(Offset + 8);
211 if (I1 == Out.end() && I2 == Out.end())
212 continue;
213
214 if (Offset > LastOffset)
215 emitZeroes(IRB, LastOffset, Offset - LastOffset);
216
217 Value *Store1 = I1 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
218 : I1->second;
219 Value *Store2 = I2 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
220 : I2->second;
221 emitPair(IRB, Offset, Store1, Store2);
222 LastOffset = Offset + 16;
223 }
224
225 // memset(0) does not update Out[], therefore the tail can be either undef
226 // or zero.
227 if (LastOffset < Size)
228 emitZeroes(IRB, LastOffset, Size - LastOffset);
229
230 for (const auto &R : Ranges) {
231 R.Inst->eraseFromParent();
232 }
233 }
234
235 void emitZeroes(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
236 LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Sizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " [" << Offset
<< ", " << Offset + Size << ") zero\n"; } }
while (false)
237 << ") zero\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " [" << Offset
<< ", " << Offset + Size << ") zero\n"; } }
while (false)
;
238 Value *Ptr = BasePtr;
239 if (Offset)
240 Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
241 IRB.CreateCall(SetTagZeroFn,
242 {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
243 }
244
245 void emitUndef(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
246 LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + Sizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " [" << Offset
<< ", " << Offset + Size << ") undef\n"; }
} while (false)
247 << ") undef\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " [" << Offset
<< ", " << Offset + Size << ") undef\n"; }
} while (false)
;
248 Value *Ptr = BasePtr;
249 if (Offset)
250 Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
251 IRB.CreateCall(SetTagFn, {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
252 }
253
254 void emitPair(IRBuilder<> &IRB, uint64_t Offset, Value *A, Value *B) {
255 LLVM_DEBUG(dbgs() << " [" << Offset << ", " << Offset + 16 << "):\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " [" << Offset
<< ", " << Offset + 16 << "):\n"; } } while
(false)
;
256 LLVM_DEBUG(dbgs() << " " << *A << "\n " << *B << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << " " << *
A << "\n " << *B << "\n"; } } while (false
)
;
257 Value *Ptr = BasePtr;
258 if (Offset)
259 Ptr = IRB.CreateConstGEP1_32(Ptr, Offset);
260 IRB.CreateCall(StgpFn, {Ptr, A, B});
261 }
262
263 Value *flatten(IRBuilder<> &IRB, Value *V) {
264 if (V->getType()->isIntegerTy())
265 return V;
266 // vector of pointers -> vector of ints
267 if (VectorType *VecTy = dyn_cast<VectorType>(V->getType())) {
268 LLVMContext &Ctx = IRB.getContext();
269 Type *EltTy = VecTy->getElementType();
270 if (EltTy->isPointerTy()) {
271 uint32_t EltSize = DL->getTypeSizeInBits(EltTy);
272 auto *NewTy = FixedVectorType::get(
273 IntegerType::get(Ctx, EltSize),
274 cast<FixedVectorType>(VecTy)->getNumElements());
275 V = IRB.CreatePointerCast(V, NewTy);
276 }
277 }
278 return IRB.CreateBitOrPointerCast(
279 V, IRB.getIntNTy(DL->getTypeStoreSize(V->getType()) * 8));
280 }
281};
282
283class AArch64StackTagging : public FunctionPass {
284 struct AllocaInfo {
285 AllocaInst *AI;
286 SmallVector<IntrinsicInst *, 2> LifetimeStart;
287 SmallVector<IntrinsicInst *, 2> LifetimeEnd;
288 SmallVector<DbgVariableIntrinsic *, 2> DbgVariableIntrinsics;
289 int Tag; // -1 for non-tagged allocations
290 };
291
292 const bool MergeInit;
293 const bool UseStackSafety;
294
295public:
296 static char ID; // Pass ID, replacement for typeid
297
298 AArch64StackTagging(bool IsOptNone = false)
299 : FunctionPass(ID),
300 MergeInit(ClMergeInit.getNumOccurrences() ? ClMergeInit : !IsOptNone),
301 UseStackSafety(ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
302 : !IsOptNone) {
303 initializeAArch64StackTaggingPass(*PassRegistry::getPassRegistry());
304 }
305
306 bool isInterestingAlloca(const AllocaInst &AI);
307 void alignAndPadAlloca(AllocaInfo &Info);
308
309 void tagAlloca(AllocaInst *AI, Instruction *InsertBefore, Value *Ptr,
310 uint64_t Size);
311 void untagAlloca(AllocaInst *AI, Instruction *InsertBefore, uint64_t Size);
312
313 Instruction *collectInitializers(Instruction *StartInst, Value *StartPtr,
314 uint64_t Size, InitializerBuilder &IB);
315
316 Instruction *
317 insertBaseTaggedPointer(const MapVector<AllocaInst *, AllocaInfo> &Allocas,
318 const DominatorTree *DT);
319 bool runOnFunction(Function &F) override;
320
321 StringRef getPassName() const override { return "AArch64 Stack Tagging"; }
322
323private:
324 Function *F = nullptr;
325 Function *SetTagFunc = nullptr;
326 const DataLayout *DL = nullptr;
327 AAResults *AA = nullptr;
328 const StackSafetyGlobalInfo *SSI = nullptr;
329
330 void getAnalysisUsage(AnalysisUsage &AU) const override {
331 AU.setPreservesCFG();
332 if (UseStackSafety)
333 AU.addRequired<StackSafetyGlobalInfoWrapperPass>();
334 if (MergeInit)
335 AU.addRequired<AAResultsWrapperPass>();
336 }
337};
338
339} // end anonymous namespace
340
341char AArch64StackTagging::ID = 0;
342
343INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",static void *initializeAArch64StackTaggingPassOnce(PassRegistry
&Registry) {
344 false, false)static void *initializeAArch64StackTaggingPassOnce(PassRegistry
&Registry) {
345INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
346INITIALIZE_PASS_DEPENDENCY(StackSafetyGlobalInfoWrapperPass)initializeStackSafetyGlobalInfoWrapperPassPass(Registry);
347INITIALIZE_PASS_END(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",PassInfo *PI = new PassInfo( "AArch64 Stack Tagging", "aarch64-stack-tagging"
, &AArch64StackTagging::ID, PassInfo::NormalCtor_t(callDefaultCtor
<AArch64StackTagging>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeAArch64StackTaggingPassFlag
; void llvm::initializeAArch64StackTaggingPass(PassRegistry &
Registry) { llvm::call_once(InitializeAArch64StackTaggingPassFlag
, initializeAArch64StackTaggingPassOnce, std::ref(Registry));
}
348 false, false)PassInfo *PI = new PassInfo( "AArch64 Stack Tagging", "aarch64-stack-tagging"
, &AArch64StackTagging::ID, PassInfo::NormalCtor_t(callDefaultCtor
<AArch64StackTagging>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeAArch64StackTaggingPassFlag
; void llvm::initializeAArch64StackTaggingPass(PassRegistry &
Registry) { llvm::call_once(InitializeAArch64StackTaggingPassFlag
, initializeAArch64StackTaggingPassOnce, std::ref(Registry));
}
349
350FunctionPass *llvm::createAArch64StackTaggingPass(bool IsOptNone) {
351 return new AArch64StackTagging(IsOptNone);
352}
353
354Instruction *AArch64StackTagging::collectInitializers(Instruction *StartInst,
355 Value *StartPtr,
356 uint64_t Size,
357 InitializerBuilder &IB) {
358 MemoryLocation AllocaLoc{StartPtr, Size};
359 Instruction *LastInst = StartInst;
360 BasicBlock::iterator BI(StartInst);
361
362 unsigned Count = 0;
363 for (; Count < ClScanLimit && !BI->isTerminator(); ++BI) {
364 if (!isa<DbgInfoIntrinsic>(*BI))
365 ++Count;
366
367 if (isNoModRef(AA->getModRefInfo(&*BI, AllocaLoc)))
368 continue;
369
370 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
371 // If the instruction is readnone, ignore it, otherwise bail out. We
372 // don't even allow readonly here because we don't want something like:
373 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
374 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
375 break;
376 continue;
377 }
378
379 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
380 if (!NextStore->isSimple())
381 break;
382
383 // Check to see if this store is to a constant offset from the start ptr.
384 Optional<int64_t> Offset =
385 isPointerOffset(StartPtr, NextStore->getPointerOperand(), *DL);
386 if (!Offset)
387 break;
388
389 if (!IB.addStore(*Offset, NextStore, DL))
390 break;
391 LastInst = NextStore;
392 } else {
393 MemSetInst *MSI = cast<MemSetInst>(BI);
394
395 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
396 break;
397
398 if (!isa<ConstantInt>(MSI->getValue()))
399 break;
400
401 // Check to see if this store is to a constant offset from the start ptr.
402 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), *DL);
403 if (!Offset)
404 break;
405
406 if (!IB.addMemSet(*Offset, MSI))
407 break;
408 LastInst = MSI;
409 }
410 }
411 return LastInst;
412}
413
414bool AArch64StackTagging::isInterestingAlloca(const AllocaInst &AI) {
415 // FIXME: support dynamic allocas
416 bool IsInteresting =
417 AI.getAllocatedType()->isSized() && AI.isStaticAlloca() &&
418 // alloca() may be called with 0 size, ignore it.
419 AI.getAllocationSizeInBits(*DL).getValue() > 0 &&
420 // inalloca allocas are not treated as static, and we don't want
421 // dynamic alloca instrumentation for them as well.
422 !AI.isUsedWithInAlloca() &&
423 // swifterror allocas are register promoted by ISel
424 !AI.isSwiftError() &&
425 // safe allocas are not interesting
426 !(SSI && SSI->isSafe(AI));
427 return IsInteresting;
428}
429
430void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore,
431 Value *Ptr, uint64_t Size) {
432 auto SetTagZeroFunc =
433 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag_zero);
434 auto StgpFunc =
435 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_stgp);
436
437 InitializerBuilder IB(Size, DL, Ptr, SetTagFunc, SetTagZeroFunc, StgpFunc);
438 bool LittleEndian =
439 Triple(AI->getModule()->getTargetTriple()).isLittleEndian();
440 // Current implementation of initializer merging assumes little endianness.
441 if (MergeInit && !F->hasOptNone() && LittleEndian &&
442 Size < ClMergeInitSizeLimit) {
443 LLVM_DEBUG(dbgs() << "collecting initializers for " << *AIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << "collecting initializers for "
<< *AI << ", size = " << Size << "\n"
; } } while (false)
444 << ", size = " << Size << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-stack-tagging")) { dbgs() << "collecting initializers for "
<< *AI << ", size = " << Size << "\n"
; } } while (false)
;
445 InsertBefore = collectInitializers(InsertBefore, Ptr, Size, IB);
446 }
447
448 IRBuilder<> IRB(InsertBefore);
449 IB.generate(IRB);
450}
451
452void AArch64StackTagging::untagAlloca(AllocaInst *AI, Instruction *InsertBefore,
453 uint64_t Size) {
454 IRBuilder<> IRB(InsertBefore);
455 IRB.CreateCall(SetTagFunc, {IRB.CreatePointerCast(AI, IRB.getInt8PtrTy()),
456 ConstantInt::get(IRB.getInt64Ty(), Size)});
457}
458
459Instruction *AArch64StackTagging::insertBaseTaggedPointer(
460 const MapVector<AllocaInst *, AllocaInfo> &Allocas,
461 const DominatorTree *DT) {
462 BasicBlock *PrologueBB = nullptr;
463 // Try sinking IRG as deep as possible to avoid hurting shrink wrap.
464 for (auto &I : Allocas) {
465 const AllocaInfo &Info = I.second;
466 AllocaInst *AI = Info.AI;
467 if (Info.Tag < 0)
468 continue;
469 if (!PrologueBB) {
470 PrologueBB = AI->getParent();
471 continue;
472 }
473 PrologueBB = DT->findNearestCommonDominator(PrologueBB, AI->getParent());
474 }
475 assert(PrologueBB)((PrologueBB) ? static_cast<void> (0) : __assert_fail (
"PrologueBB", "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/lib/Target/AArch64/AArch64StackTagging.cpp"
, 475, __PRETTY_FUNCTION__))
;
476
477 IRBuilder<> IRB(&PrologueBB->front());
478 Function *IRG_SP =
479 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_irg_sp);
480 Instruction *Base =
481 IRB.CreateCall(IRG_SP, {Constant::getNullValue(IRB.getInt64Ty())});
482 Base->setName("basetag");
483 return Base;
484}
485
486void AArch64StackTagging::alignAndPadAlloca(AllocaInfo &Info) {
487 const Align NewAlignment =
488 max(MaybeAlign(Info.AI->getAlignment()), kTagGranuleSize);
489 Info.AI->setAlignment(NewAlignment);
490
491 uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
492 uint64_t AlignedSize = alignTo(Size, kTagGranuleSize);
493 if (Size == AlignedSize)
494 return;
495
496 // Add padding to the alloca.
497 Type *AllocatedType =
498 Info.AI->isArrayAllocation()
499 ? ArrayType::get(
500 Info.AI->getAllocatedType(),
501 cast<ConstantInt>(Info.AI->getArraySize())->getZExtValue())
502 : Info.AI->getAllocatedType();
503 Type *PaddingType =
504 ArrayType::get(Type::getInt8Ty(F->getContext()), AlignedSize - Size);
505 Type *TypeWithPadding = StructType::get(AllocatedType, PaddingType);
506 auto *NewAI = new AllocaInst(
507 TypeWithPadding, Info.AI->getType()->getAddressSpace(), nullptr, "", Info.AI);
508 NewAI->takeName(Info.AI);
509 NewAI->setAlignment(Info.AI->getAlign());
510 NewAI->setUsedWithInAlloca(Info.AI->isUsedWithInAlloca());
511 NewAI->setSwiftError(Info.AI->isSwiftError());
512 NewAI->copyMetadata(*Info.AI);
513
514 auto *NewPtr = new BitCastInst(NewAI, Info.AI->getType(), "", Info.AI);
515 Info.AI->replaceAllUsesWith(NewPtr);
516 Info.AI->eraseFromParent();
517 Info.AI = NewAI;
518}
519
520// Helper function to check for post-dominance.
521static bool postDominates(const PostDominatorTree *PDT, const IntrinsicInst *A,
522 const IntrinsicInst *B) {
523 const BasicBlock *ABB = A->getParent();
524 const BasicBlock *BBB = B->getParent();
525
526 if (ABB != BBB)
527 return PDT->dominates(ABB, BBB);
528
529 for (const Instruction &I : *ABB) {
530 if (&I == B)
531 return true;
532 if (&I == A)
533 return false;
534 }
535 llvm_unreachable("Corrupt instruction list")::llvm::llvm_unreachable_internal("Corrupt instruction list",
"/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/lib/Target/AArch64/AArch64StackTagging.cpp"
, 535)
;
536}
537
538// FIXME: check for MTE extension
539bool AArch64StackTagging::runOnFunction(Function &Fn) {
540 if (!Fn.hasFnAttribute(Attribute::SanitizeMemTag))
1
Assuming the condition is false
2
Taking false branch
541 return false;
542
543 if (UseStackSafety)
3
Assuming field 'UseStackSafety' is false
4
Taking false branch
544 SSI = &getAnalysis<StackSafetyGlobalInfoWrapperPass>().getResult();
545 F = &Fn;
546 DL = &Fn.getParent()->getDataLayout();
547 if (MergeInit)
5
Assuming field 'MergeInit' is false
6
Taking false branch
548 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
549
550 MapVector<AllocaInst *, AllocaInfo> Allocas; // need stable iteration order
551 SmallVector<Instruction *, 8> RetVec;
552 SmallVector<Instruction *, 4> UnrecognizedLifetimes;
553
554 for (auto &BB : *F) {
555 for (BasicBlock::iterator IT = BB.begin(); IT != BB.end(); ++IT) {
556 Instruction *I = &*IT;
557 if (auto *AI = dyn_cast<AllocaInst>(I)) {
558 Allocas[AI].AI = AI;
559 continue;
560 }
561
562 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(I)) {
563 if (auto *AI =
564 dyn_cast_or_null<AllocaInst>(DVI->getVariableLocation())) {
565 Allocas[AI].DbgVariableIntrinsics.push_back(DVI);
566 }
567 continue;
568 }
569
570 auto *II = dyn_cast<IntrinsicInst>(I);
571 if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
572 II->getIntrinsicID() == Intrinsic::lifetime_end)) {
573 AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
574 if (!AI) {
575 UnrecognizedLifetimes.push_back(I);
576 continue;
577 }
578 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
579 Allocas[AI].LifetimeStart.push_back(II);
580 else
581 Allocas[AI].LifetimeEnd.push_back(II);
582 }
583
584 if (isa<ReturnInst>(I) || isa<ResumeInst>(I) || isa<CleanupReturnInst>(I))
585 RetVec.push_back(I);
586 }
587 }
588
589 if (Allocas.empty())
7
Assuming the condition is false
8
Taking false branch
590 return false;
591
592 int NextTag = 0;
593 int NumInterestingAllocas = 0;
594 for (auto &I : Allocas) {
595 AllocaInfo &Info = I.second;
596 assert(Info.AI)((Info.AI) ? static_cast<void> (0) : __assert_fail ("Info.AI"
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/lib/Target/AArch64/AArch64StackTagging.cpp"
, 596, __PRETTY_FUNCTION__))
;
9
Assuming field 'AI' is non-null
10
'?' condition is true
13
Assuming field 'AI' is non-null
14
'?' condition is true
17
Assuming field 'AI' is non-null
18
'?' condition is true
597
598 if (!isInterestingAlloca(*Info.AI)) {
11
Taking true branch
15
Taking true branch
19
Taking false branch
599 Info.Tag = -1;
600 continue;
12
Execution continues on line 594
16
Execution continues on line 594
601 }
602
603 alignAndPadAlloca(Info);
604 NumInterestingAllocas++;
605 Info.Tag = NextTag;
606 NextTag = (NextTag + 1) % 16;
607 }
608
609 if (NumInterestingAllocas
19.1
'NumInterestingAllocas' is not equal to 0
19.1
'NumInterestingAllocas' is not equal to 0
== 0)
20
Taking false branch
610 return true;
611
612 std::unique_ptr<DominatorTree> DeleteDT;
613 DominatorTree *DT = nullptr;
614 if (auto *P
20.1
'P' is null
20.1
'P' is null
= getAnalysisIfAvailable<DominatorTreeWrapperPass>())
21
Taking false branch
615 DT = &P->getDomTree();
616
617 if (DT == nullptr && (NumInterestingAllocas
21.1
'NumInterestingAllocas' is <= 1
21.1
'NumInterestingAllocas' is <= 1
> 1 ||
23
Taking false branch
618 !F->hasFnAttribute(Attribute::OptimizeNone))) {
22
Assuming the condition is false
619 DeleteDT = std::make_unique<DominatorTree>(*F);
620 DT = DeleteDT.get();
621 }
622
623 std::unique_ptr<PostDominatorTree> DeletePDT;
624 PostDominatorTree *PDT = nullptr;
625 if (auto *P
23.1
'P' is null
23.1
'P' is null
= getAnalysisIfAvailable<PostDominatorTreeWrapperPass>())
24
Taking false branch
626 PDT = &P->getPostDomTree();
627
628 if (PDT == nullptr && !F->hasFnAttribute(Attribute::OptimizeNone)) {
25
Assuming the condition is false
26
Taking false branch
629 DeletePDT = std::make_unique<PostDominatorTree>(*F);
630 PDT = DeletePDT.get();
631 }
632
633 SetTagFunc =
634 Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag);
635
636 Instruction *Base = insertBaseTaggedPointer(Allocas, DT);
637
638 for (auto &I : Allocas) {
639 const AllocaInfo &Info = I.second;
640 AllocaInst *AI = Info.AI;
641 if (Info.Tag < 0)
27
Assuming field 'Tag' is >= 0
28
Taking false branch
642 continue;
643
644 // Replace alloca with tagp(alloca).
645 IRBuilder<> IRB(Info.AI->getNextNode());
646 Function *TagP = Intrinsic::getDeclaration(
647 F->getParent(), Intrinsic::aarch64_tagp, {Info.AI->getType()});
648 Instruction *TagPCall =
649 IRB.CreateCall(TagP, {Constant::getNullValue(Info.AI->getType()), Base,
650 ConstantInt::get(IRB.getInt64Ty(), Info.Tag)});
651 if (Info.AI->hasName())
29
Assuming the condition is false
30
Taking false branch
652 TagPCall->setName(Info.AI->getName() + ".tag");
653 Info.AI->replaceAllUsesWith(TagPCall);
654 TagPCall->setOperand(0, Info.AI);
655
656 if (UnrecognizedLifetimes.empty() && Info.LifetimeStart.size() == 1 &&
31
Calling 'SmallVectorBase::empty'
34
Returning from 'SmallVectorBase::empty'
35
Assuming the condition is true
37
Taking true branch
657 Info.LifetimeEnd.size() == 1) {
36
Assuming the condition is true
658 IntrinsicInst *Start = Info.LifetimeStart[0];
659 IntrinsicInst *End = Info.LifetimeEnd[0];
660 uint64_t Size =
661 dyn_cast<ConstantInt>(Start->getArgOperand(0))->getZExtValue();
38
Assuming the object is not a 'ConstantInt'
39
Called C++ object pointer is null
662 Size = alignTo(Size, kTagGranuleSize);
663 tagAlloca(AI, Start->getNextNode(), Start->getArgOperand(1), Size);
664 // We need to ensure that if we tag some object, we certainly untag it
665 // before the function exits.
666 if (PDT != nullptr && postDominates(PDT, End, Start)) {
667 untagAlloca(AI, End, Size);
668 } else {
669 SmallVector<Instruction *, 8> ReachableRetVec;
670 unsigned NumCoveredExits = 0;
671 for (auto &RI : RetVec) {
672 if (!isPotentiallyReachable(Start, RI, nullptr, DT))
673 continue;
674 ReachableRetVec.push_back(RI);
675 if (DT != nullptr && DT->dominates(End, RI))
676 ++NumCoveredExits;
677 }
678 // If there's a mix of covered and non-covered exits, just put the untag
679 // on exits, so we avoid the redundancy of untagging twice.
680 if (NumCoveredExits == ReachableRetVec.size()) {
681 untagAlloca(AI, End, Size);
682 } else {
683 for (auto &RI : ReachableRetVec)
684 untagAlloca(AI, RI, Size);
685 // We may have inserted untag outside of the lifetime interval.
686 // Remove the lifetime end call for this alloca.
687 End->eraseFromParent();
688 }
689 }
690 } else {
691 uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
692 Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getInt8PtrTy());
693 tagAlloca(AI, &*IRB.GetInsertPoint(), Ptr, Size);
694 for (auto &RI : RetVec) {
695 untagAlloca(AI, RI, Size);
696 }
697 // We may have inserted tag/untag outside of any lifetime interval.
698 // Remove all lifetime intrinsics for this alloca.
699 for (auto &II : Info.LifetimeStart)
700 II->eraseFromParent();
701 for (auto &II : Info.LifetimeEnd)
702 II->eraseFromParent();
703 }
704
705 // Fixup debug intrinsics to point to the new alloca.
706 for (auto DVI : Info.DbgVariableIntrinsics)
707 DVI->setArgOperand(
708 0,
709 MetadataAsValue::get(F->getContext(), LocalAsMetadata::get(Info.AI)));
710 }
711
712 // If we have instrumented at least one alloca, all unrecognized lifetime
713 // instrinsics have to go.
714 for (auto &I : UnrecognizedLifetimes)
715 I->eraseFromParent();
716
717 return true;
718}

/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/ErrorHandling.h"
19#include "llvm/Support/MathExtras.h"
20#include "llvm/Support/MemAlloc.h"
21#include "llvm/Support/type_traits.h"
22#include <algorithm>
23#include <cassert>
24#include <cstddef>
25#include <cstdlib>
26#include <cstring>
27#include <initializer_list>
28#include <iterator>
29#include <limits>
30#include <memory>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37/// This is all the stuff common to all SmallVectors.
38///
39/// The template parameter specifies the type which should be used to hold the
40/// Size and Capacity of the SmallVector, so it can be adjusted.
41/// Using 32 bit size is desirable to shrink the size of the SmallVector.
42/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
43/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
44/// buffering bitcode output - which can exceed 4GB.
45template <class Size_T> class SmallVectorBase {
46protected:
47 void *BeginX;
48 Size_T Size = 0, Capacity;
49
50 /// The maximum value of the Size_T used.
51 static constexpr size_t SizeTypeMax() {
52 return std::numeric_limits<Size_T>::max();
53 }
54
55 SmallVectorBase() = delete;
56 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
57 : BeginX(FirstEl), Capacity(TotalCapacity) {}
58
59 /// This is an implementation of the grow() method which only works
60 /// on POD-like data types and is out of line to reduce code duplication.
61 /// This function will report a fatal error if it cannot increase capacity.
62 void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
63
64 /// Report that MinSize doesn't fit into this vector's size type. Throws
65 /// std::length_error or calls report_fatal_error.
66 LLVM_ATTRIBUTE_NORETURN__attribute__((noreturn)) static void report_size_overflow(size_t MinSize);
67 /// Report that this vector is already at maximum capacity. Throws
68 /// std::length_error or calls report_fatal_error.
69 LLVM_ATTRIBUTE_NORETURN__attribute__((noreturn)) static void report_at_maximum_capacity();
70
71public:
72 size_t size() const { return Size; }
73 size_t capacity() const { return Capacity; }
74
75 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
32
Assuming field 'Size' is 0, which participates in a condition later
33
Returning the value 1, which participates in a condition later
76
77 /// Set the array size to \p N, which the current array must have enough
78 /// capacity for.
79 ///
80 /// This does not construct or destroy any elements in the vector.
81 ///
82 /// Clients can use this in conjunction with capacity() to write past the end
83 /// of the buffer when they know that more elements are available, and only
84 /// update the size later. This avoids the cost of value initializing elements
85 /// which will only be overwritten.
86 void set_size(size_t N) {
87 assert(N <= capacity())((N <= capacity()) ? static_cast<void> (0) : __assert_fail
("N <= capacity()", "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 87, __PRETTY_FUNCTION__))
;
88 Size = N;
89 }
90};
91
92template <class T>
93using SmallVectorSizeType =
94 typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
95 uint32_t>::type;
96
97/// Figure out the offset of the first element.
98template <class T, typename = void> struct SmallVectorAlignmentAndSize {
99 alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
100 SmallVectorBase<SmallVectorSizeType<T>>)];
101 alignas(T) char FirstEl[sizeof(T)];
102};
103
104/// This is the part of SmallVectorTemplateBase which does not depend on whether
105/// the type T is a POD. The extra dummy template argument is used by ArrayRef
106/// to avoid unnecessarily requiring T to be complete.
107template <typename T, typename = void>
108class SmallVectorTemplateCommon
109 : public SmallVectorBase<SmallVectorSizeType<T>> {
110 using Base = SmallVectorBase<SmallVectorSizeType<T>>;
111
112 /// Find the address of the first element. For this pointer math to be valid
113 /// with small-size of 0 for T with lots of alignment, it's important that
114 /// SmallVectorStorage is properly-aligned even for small-size of 0.
115 void *getFirstEl() const {
116 return const_cast<void *>(reinterpret_cast<const void *>(
117 reinterpret_cast<const char *>(this) +
118 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
119 }
120 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
121
122protected:
123 SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
124
125 void grow_pod(size_t MinSize, size_t TSize) {
126 Base::grow_pod(getFirstEl(), MinSize, TSize);
127 }
128
129 /// Return true if this is a smallvector which has not had dynamic
130 /// memory allocated for it.
131 bool isSmall() const { return this->BeginX == getFirstEl(); }
132
133 /// Put this vector in a state of being small.
134 void resetToSmall() {
135 this->BeginX = getFirstEl();
136 this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
137 }
138
139public:
140 using size_type = size_t;
141 using difference_type = ptrdiff_t;
142 using value_type = T;
143 using iterator = T *;
144 using const_iterator = const T *;
145
146 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
147 using reverse_iterator = std::reverse_iterator<iterator>;
148
149 using reference = T &;
150 using const_reference = const T &;
151 using pointer = T *;
152 using const_pointer = const T *;
153
154 using Base::capacity;
155 using Base::empty;
156 using Base::size;
157
158 // forward iterator creation methods.
159 iterator begin() { return (iterator)this->BeginX; }
160 const_iterator begin() const { return (const_iterator)this->BeginX; }
161 iterator end() { return begin() + size(); }
162 const_iterator end() const { return begin() + size(); }
163
164 // reverse iterator creation methods.
165 reverse_iterator rbegin() { return reverse_iterator(end()); }
166 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
167 reverse_iterator rend() { return reverse_iterator(begin()); }
168 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
169
170 size_type size_in_bytes() const { return size() * sizeof(T); }
171 size_type max_size() const {
172 return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
173 }
174
175 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
176
177 /// Return a pointer to the vector's buffer, even if empty().
178 pointer data() { return pointer(begin()); }
179 /// Return a pointer to the vector's buffer, even if empty().
180 const_pointer data() const { return const_pointer(begin()); }
181
182 reference operator[](size_type idx) {
183 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 183, __PRETTY_FUNCTION__))
;
184 return begin()[idx];
185 }
186 const_reference operator[](size_type idx) const {
187 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 187, __PRETTY_FUNCTION__))
;
188 return begin()[idx];
189 }
190
191 reference front() {
192 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 192, __PRETTY_FUNCTION__))
;
193 return begin()[0];
194 }
195 const_reference front() const {
196 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 196, __PRETTY_FUNCTION__))
;
197 return begin()[0];
198 }
199
200 reference back() {
201 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 201, __PRETTY_FUNCTION__))
;
202 return end()[-1];
203 }
204 const_reference back() const {
205 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 205, __PRETTY_FUNCTION__))
;
206 return end()[-1];
207 }
208};
209
210/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
211/// method implementations that are designed to work with non-trivial T's.
212///
213/// We approximate is_trivially_copyable with trivial move/copy construction and
214/// trivial destruction. While the standard doesn't specify that you're allowed
215/// copy these types with memcpy, there is no way for the type to observe this.
216/// This catches the important case of std::pair<POD, POD>, which is not
217/// trivially assignable.
218template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
219 (is_trivially_move_constructible<T>::value) &&
220 std::is_trivially_destructible<T>::value>
221class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
222protected:
223 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
224
225 static void destroy_range(T *S, T *E) {
226 while (S != E) {
227 --E;
228 E->~T();
229 }
230 }
231
232 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
233 /// constructing elements as needed.
234 template<typename It1, typename It2>
235 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
236 std::uninitialized_copy(std::make_move_iterator(I),
237 std::make_move_iterator(E), Dest);
238 }
239
240 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
241 /// constructing elements as needed.
242 template<typename It1, typename It2>
243 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
244 std::uninitialized_copy(I, E, Dest);
245 }
246
247 /// Grow the allocated memory (without initializing new elements), doubling
248 /// the size of the allocated memory. Guarantees space for at least one more
249 /// element, or MinSize more elements if specified.
250 void grow(size_t MinSize = 0);
251
252public:
253 void push_back(const T &Elt) {
254 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
255 this->grow();
256 ::new ((void*) this->end()) T(Elt);
257 this->set_size(this->size() + 1);
258 }
259
260 void push_back(T &&Elt) {
261 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
262 this->grow();
263 ::new ((void*) this->end()) T(::std::move(Elt));
264 this->set_size(this->size() + 1);
265 }
266
267 void pop_back() {
268 this->set_size(this->size() - 1);
269 this->end()->~T();
270 }
271};
272
273// Define this out-of-line to dissuade the C++ compiler from inlining it.
274template <typename T, bool TriviallyCopyable>
275void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
276 // Ensure we can fit the new capacity.
277 // This is only going to be applicable when the capacity is 32 bit.
278 if (MinSize > this->SizeTypeMax())
279 this->report_size_overflow(MinSize);
280
281 // Ensure we can meet the guarantee of space for at least one more element.
282 // The above check alone will not catch the case where grow is called with a
283 // default MinSize of 0, but the current capacity cannot be increased.
284 // This is only going to be applicable when the capacity is 32 bit.
285 if (this->capacity() == this->SizeTypeMax())
286 this->report_at_maximum_capacity();
287
288 // Always grow, even from zero.
289 size_t NewCapacity = size_t(NextPowerOf2(this->capacity() + 2));
290 NewCapacity = std::min(std::max(NewCapacity, MinSize), this->SizeTypeMax());
291 T *NewElts = static_cast<T*>(llvm::safe_malloc(NewCapacity*sizeof(T)));
292
293 // Move the elements over.
294 this->uninitialized_move(this->begin(), this->end(), NewElts);
295
296 // Destroy the original elements.
297 destroy_range(this->begin(), this->end());
298
299 // If this wasn't grown from the inline copy, deallocate the old space.
300 if (!this->isSmall())
301 free(this->begin());
302
303 this->BeginX = NewElts;
304 this->Capacity = NewCapacity;
305}
306
307/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
308/// method implementations that are designed to work with trivially copyable
309/// T's. This allows using memcpy in place of copy/move construction and
310/// skipping destruction.
311template <typename T>
312class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
313protected:
314 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
315
316 // No need to do a destroy loop for POD's.
317 static void destroy_range(T *, T *) {}
318
319 /// Move the range [I, E) onto the uninitialized memory
320 /// starting with "Dest", constructing elements into it as needed.
321 template<typename It1, typename It2>
322 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
323 // Just do a copy.
324 uninitialized_copy(I, E, Dest);
325 }
326
327 /// Copy the range [I, E) onto the uninitialized memory
328 /// starting with "Dest", constructing elements into it as needed.
329 template<typename It1, typename It2>
330 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
331 // Arbitrary iterator types; just use the basic implementation.
332 std::uninitialized_copy(I, E, Dest);
333 }
334
335 /// Copy the range [I, E) onto the uninitialized memory
336 /// starting with "Dest", constructing elements into it as needed.
337 template <typename T1, typename T2>
338 static void uninitialized_copy(
339 T1 *I, T1 *E, T2 *Dest,
340 std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
341 T2>::value> * = nullptr) {
342 // Use memcpy for PODs iterated by pointers (which includes SmallVector
343 // iterators): std::uninitialized_copy optimizes to memmove, but we can
344 // use memcpy here. Note that I and E are iterators and thus might be
345 // invalid for memcpy if they are equal.
346 if (I != E)
347 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
348 }
349
350 /// Double the size of the allocated memory, guaranteeing space for at
351 /// least one more element or MinSize if specified.
352 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
353
354public:
355 void push_back(const T &Elt) {
356 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
357 this->grow();
358 memcpy(reinterpret_cast<void *>(this->end()), &Elt, sizeof(T));
359 this->set_size(this->size() + 1);
360 }
361
362 void pop_back() { this->set_size(this->size() - 1); }
363};
364
365/// This class consists of common code factored out of the SmallVector class to
366/// reduce code duplication based on the SmallVector 'N' template parameter.
367template <typename T>
368class SmallVectorImpl : public SmallVectorTemplateBase<T> {
369 using SuperClass = SmallVectorTemplateBase<T>;
370
371public:
372 using iterator = typename SuperClass::iterator;
373 using const_iterator = typename SuperClass::const_iterator;
374 using reference = typename SuperClass::reference;
375 using size_type = typename SuperClass::size_type;
376
377protected:
378 // Default ctor - Initialize to empty.
379 explicit SmallVectorImpl(unsigned N)
380 : SmallVectorTemplateBase<T>(N) {}
381
382public:
383 SmallVectorImpl(const SmallVectorImpl &) = delete;
384
385 ~SmallVectorImpl() {
386 // Subclass has already destructed this vector's elements.
387 // If this wasn't grown from the inline copy, deallocate the old space.
388 if (!this->isSmall())
389 free(this->begin());
390 }
391
392 void clear() {
393 this->destroy_range(this->begin(), this->end());
394 this->Size = 0;
395 }
396
397 void resize(size_type N) {
398 if (N < this->size()) {
399 this->destroy_range(this->begin()+N, this->end());
400 this->set_size(N);
401 } else if (N > this->size()) {
402 if (this->capacity() < N)
403 this->grow(N);
404 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
405 new (&*I) T();
406 this->set_size(N);
407 }
408 }
409
410 void resize(size_type N, const T &NV) {
411 if (N < this->size()) {
412 this->destroy_range(this->begin()+N, this->end());
413 this->set_size(N);
414 } else if (N > this->size()) {
415 if (this->capacity() < N)
416 this->grow(N);
417 std::uninitialized_fill(this->end(), this->begin()+N, NV);
418 this->set_size(N);
419 }
420 }
421
422 void reserve(size_type N) {
423 if (this->capacity() < N)
424 this->grow(N);
425 }
426
427 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
428 T Result = ::std::move(this->back());
429 this->pop_back();
430 return Result;
431 }
432
433 void swap(SmallVectorImpl &RHS);
434
435 /// Add the specified range to the end of the SmallVector.
436 template <typename in_iter,
437 typename = std::enable_if_t<std::is_convertible<
438 typename std::iterator_traits<in_iter>::iterator_category,
439 std::input_iterator_tag>::value>>
440 void append(in_iter in_start, in_iter in_end) {
441 size_type NumInputs = std::distance(in_start, in_end);
442 if (NumInputs > this->capacity() - this->size())
443 this->grow(this->size()+NumInputs);
444
445 this->uninitialized_copy(in_start, in_end, this->end());
446 this->set_size(this->size() + NumInputs);
447 }
448
449 /// Append \p NumInputs copies of \p Elt to the end.
450 void append(size_type NumInputs, const T &Elt) {
451 if (NumInputs > this->capacity() - this->size())
452 this->grow(this->size()+NumInputs);
453
454 std::uninitialized_fill_n(this->end(), NumInputs, Elt);
455 this->set_size(this->size() + NumInputs);
456 }
457
458 void append(std::initializer_list<T> IL) {
459 append(IL.begin(), IL.end());
460 }
461
462 // FIXME: Consider assigning over existing elements, rather than clearing &
463 // re-initializing them - for all assign(...) variants.
464
465 void assign(size_type NumElts, const T &Elt) {
466 clear();
467 if (this->capacity() < NumElts)
468 this->grow(NumElts);
469 this->set_size(NumElts);
470 std::uninitialized_fill(this->begin(), this->end(), Elt);
471 }
472
473 template <typename in_iter,
474 typename = std::enable_if_t<std::is_convertible<
475 typename std::iterator_traits<in_iter>::iterator_category,
476 std::input_iterator_tag>::value>>
477 void assign(in_iter in_start, in_iter in_end) {
478 clear();
479 append(in_start, in_end);
480 }
481
482 void assign(std::initializer_list<T> IL) {
483 clear();
484 append(IL);
485 }
486
487 iterator erase(const_iterator CI) {
488 // Just cast away constness because this is a non-const member function.
489 iterator I = const_cast<iterator>(CI);
490
491 assert(I >= this->begin() && "Iterator to erase is out of bounds.")((I >= this->begin() && "Iterator to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Iterator to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 491, __PRETTY_FUNCTION__))
;
492 assert(I < this->end() && "Erasing at past-the-end iterator.")((I < this->end() && "Erasing at past-the-end iterator."
) ? static_cast<void> (0) : __assert_fail ("I < this->end() && \"Erasing at past-the-end iterator.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 492, __PRETTY_FUNCTION__))
;
493
494 iterator N = I;
495 // Shift all elts down one.
496 std::move(I+1, this->end(), I);
497 // Drop the last elt.
498 this->pop_back();
499 return(N);
500 }
501
502 iterator erase(const_iterator CS, const_iterator CE) {
503 // Just cast away constness because this is a non-const member function.
504 iterator S = const_cast<iterator>(CS);
505 iterator E = const_cast<iterator>(CE);
506
507 assert(S >= this->begin() && "Range to erase is out of bounds.")((S >= this->begin() && "Range to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("S >= this->begin() && \"Range to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 507, __PRETTY_FUNCTION__))
;
508 assert(S <= E && "Trying to erase invalid range.")((S <= E && "Trying to erase invalid range.") ? static_cast
<void> (0) : __assert_fail ("S <= E && \"Trying to erase invalid range.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 508, __PRETTY_FUNCTION__))
;
509 assert(E <= this->end() && "Trying to erase past the end.")((E <= this->end() && "Trying to erase past the end."
) ? static_cast<void> (0) : __assert_fail ("E <= this->end() && \"Trying to erase past the end.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 509, __PRETTY_FUNCTION__))
;
510
511 iterator N = S;
512 // Shift all elts down.
513 iterator I = std::move(E, this->end(), S);
514 // Drop the last elts.
515 this->destroy_range(I, this->end());
516 this->set_size(I - this->begin());
517 return(N);
518 }
519
520 iterator insert(iterator I, T &&Elt) {
521 if (I == this->end()) { // Important special case for empty vector.
522 this->push_back(::std::move(Elt));
523 return this->end()-1;
524 }
525
526 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 526, __PRETTY_FUNCTION__))
;
527 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 527, __PRETTY_FUNCTION__))
;
528
529 if (this->size() >= this->capacity()) {
530 size_t EltNo = I-this->begin();
531 this->grow();
532 I = this->begin()+EltNo;
533 }
534
535 ::new ((void*) this->end()) T(::std::move(this->back()));
536 // Push everything else over.
537 std::move_backward(I, this->end()-1, this->end());
538 this->set_size(this->size() + 1);
539
540 // If we just moved the element we're inserting, be sure to update
541 // the reference.
542 T *EltPtr = &Elt;
543 if (I <= EltPtr && EltPtr < this->end())
544 ++EltPtr;
545
546 *I = ::std::move(*EltPtr);
547 return I;
548 }
549
550 iterator insert(iterator I, const T &Elt) {
551 if (I == this->end()) { // Important special case for empty vector.
552 this->push_back(Elt);
553 return this->end()-1;
554 }
555
556 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 556, __PRETTY_FUNCTION__))
;
557 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 557, __PRETTY_FUNCTION__))
;
558
559 if (this->size() >= this->capacity()) {
560 size_t EltNo = I-this->begin();
561 this->grow();
562 I = this->begin()+EltNo;
563 }
564 ::new ((void*) this->end()) T(std::move(this->back()));
565 // Push everything else over.
566 std::move_backward(I, this->end()-1, this->end());
567 this->set_size(this->size() + 1);
568
569 // If we just moved the element we're inserting, be sure to update
570 // the reference.
571 const T *EltPtr = &Elt;
572 if (I <= EltPtr && EltPtr < this->end())
573 ++EltPtr;
574
575 *I = *EltPtr;
576 return I;
577 }
578
579 iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
580 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
581 size_t InsertElt = I - this->begin();
582
583 if (I == this->end()) { // Important special case for empty vector.
584 append(NumToInsert, Elt);
585 return this->begin()+InsertElt;
586 }
587
588 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 588, __PRETTY_FUNCTION__))
;
589 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 589, __PRETTY_FUNCTION__))
;
590
591 // Ensure there is enough space.
592 reserve(this->size() + NumToInsert);
593
594 // Uninvalidate the iterator.
595 I = this->begin()+InsertElt;
596
597 // If there are more elements between the insertion point and the end of the
598 // range than there are being inserted, we can use a simple approach to
599 // insertion. Since we already reserved space, we know that this won't
600 // reallocate the vector.
601 if (size_t(this->end()-I) >= NumToInsert) {
602 T *OldEnd = this->end();
603 append(std::move_iterator<iterator>(this->end() - NumToInsert),
604 std::move_iterator<iterator>(this->end()));
605
606 // Copy the existing elements that get replaced.
607 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
608
609 std::fill_n(I, NumToInsert, Elt);
610 return I;
611 }
612
613 // Otherwise, we're inserting more elements than exist already, and we're
614 // not inserting at the end.
615
616 // Move over the elements that we're about to overwrite.
617 T *OldEnd = this->end();
618 this->set_size(this->size() + NumToInsert);
619 size_t NumOverwritten = OldEnd-I;
620 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
621
622 // Replace the overwritten part.
623 std::fill_n(I, NumOverwritten, Elt);
624
625 // Insert the non-overwritten middle part.
626 std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
627 return I;
628 }
629
630 template <typename ItTy,
631 typename = std::enable_if_t<std::is_convertible<
632 typename std::iterator_traits<ItTy>::iterator_category,
633 std::input_iterator_tag>::value>>
634 iterator insert(iterator I, ItTy From, ItTy To) {
635 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
636 size_t InsertElt = I - this->begin();
637
638 if (I == this->end()) { // Important special case for empty vector.
639 append(From, To);
640 return this->begin()+InsertElt;
641 }
642
643 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 643, __PRETTY_FUNCTION__))
;
644 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-12~++20201029100616+6c2ad4cf875/llvm/include/llvm/ADT/SmallVector.h"
, 644, __PRETTY_FUNCTION__))
;
645
646 size_t NumToInsert = std::distance(From, To);
647
648 // Ensure there is enough space.
649 reserve(this->size() + NumToInsert);
650
651 // Uninvalidate the iterator.
652 I = this->begin()+InsertElt;
653
654 // If there are more elements between the insertion point and the end of the
655 // range than there are being inserted, we can use a simple approach to
656 // insertion. Since we already reserved space, we know that this won't
657 // reallocate the vector.
658 if (size_t(this->end()-I) >= NumToInsert) {
659 T *OldEnd = this->end();
660 append(std::move_iterator<iterator>(this->end() - NumToInsert),
661 std::move_iterator<iterator>(this->end()));
662
663 // Copy the existing elements that get replaced.
664 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
665
666 std::copy(From, To, I);
667 return I;
668 }
669
670 // Otherwise, we're inserting more elements than exist already, and we're
671 // not inserting at the end.
672
673 // Move over the elements that we're about to overwrite.
674 T *OldEnd = this->end();
675 this->set_size(this->size() + NumToInsert);
676 size_t NumOverwritten = OldEnd-I;
677 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
678
679 // Replace the overwritten part.
680 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
681 *J = *From;
682 ++J; ++From;
683 }
684
685 // Insert the non-overwritten middle part.
686 this->uninitialized_copy(From, To, OldEnd);
687 return I;
688 }
689
690 void insert(iterator I, std::initializer_list<T> IL) {
691 insert(I, IL.begin(), IL.end());
692 }
693
694 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
695 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
696 this->grow();
697 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
698 this->set_size(this->size() + 1);
699 return this->back();
700 }
701
702 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
703
704 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
705
706 bool operator==(const SmallVectorImpl &RHS) const {
707 if (this->size() != RHS.size()) return false;
708 return std::equal(this->begin(), this->end(), RHS.begin());
709 }
710 bool operator!=(const SmallVectorImpl &RHS) const {
711 return !(*this == RHS);
712 }
713
714 bool operator<(const SmallVectorImpl &RHS) const {
715 return std::lexicographical_compare(this->begin(), this->end(),
716 RHS.begin(), RHS.end());
717 }
718};
719
720template <typename T>
721void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
722 if (this == &RHS) return;
723
724 // We can only avoid copying elements if neither vector is small.
725 if (!this->isSmall() && !RHS.isSmall()) {
726 std::swap(this->BeginX, RHS.BeginX);
727 std::swap(this->Size, RHS.Size);
728 std::swap(this->Capacity, RHS.Capacity);
729 return;
730 }
731 if (RHS.size() > this->capacity())
732 this->grow(RHS.size());
733 if (this->size() > RHS.capacity())
734 RHS.grow(this->size());
735
736 // Swap the shared elements.
737 size_t NumShared = this->size();
738 if (NumShared > RHS.size()) NumShared = RHS.size();
739 for (size_type i = 0; i != NumShared; ++i)
740 std::swap((*this)[i], RHS[i]);
741
742 // Copy over the extra elts.
743 if (this->size() > RHS.size()) {
744 size_t EltDiff = this->size() - RHS.size();
745 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
746 RHS.set_size(RHS.size() + EltDiff);
747 this->destroy_range(this->begin()+NumShared, this->end());
748 this->set_size(NumShared);
749 } else if (RHS.size() > this->size()) {
750 size_t EltDiff = RHS.size() - this->size();
751 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
752 this->set_size(this->size() + EltDiff);
753 this->destroy_range(RHS.begin()+NumShared, RHS.end());
754 RHS.set_size(NumShared);
755 }
756}
757
758template <typename T>
759SmallVectorImpl<T> &SmallVectorImpl<T>::
760 operator=(const SmallVectorImpl<T> &RHS) {
761 // Avoid self-assignment.
762 if (this == &RHS) return *this;
763
764 // If we already have sufficient space, assign the common elements, then
765 // destroy any excess.
766 size_t RHSSize = RHS.size();
767 size_t CurSize = this->size();
768 if (CurSize >= RHSSize) {
769 // Assign common elements.
770 iterator NewEnd;
771 if (RHSSize)
772 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
773 else
774 NewEnd = this->begin();
775
776 // Destroy excess elements.
777 this->destroy_range(NewEnd, this->end());
778
779 // Trim.
780 this->set_size(RHSSize);
781 return *this;
782 }
783
784 // If we have to grow to have enough elements, destroy the current elements.
785 // This allows us to avoid copying them during the grow.
786 // FIXME: don't do this if they're efficiently moveable.
787 if (this->capacity() < RHSSize) {
788 // Destroy current elements.
789 this->destroy_range(this->begin(), this->end());
790 this->set_size(0);
791 CurSize = 0;
792 this->grow(RHSSize);
793 } else if (CurSize) {
794 // Otherwise, use assignment for the already-constructed elements.
795 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
796 }
797
798 // Copy construct the new elements in place.
799 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
800 this->begin()+CurSize);
801
802 // Set end.
803 this->set_size(RHSSize);
804 return *this;
805}
806
807template <typename T>
808SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
809 // Avoid self-assignment.
810 if (this == &RHS) return *this;
811
812 // If the RHS isn't small, clear this vector and then steal its buffer.
813 if (!RHS.isSmall()) {
814 this->destroy_range(this->begin(), this->end());
815 if (!this->isSmall()) free(this->begin());
816 this->BeginX = RHS.BeginX;
817 this->Size = RHS.Size;
818 this->Capacity = RHS.Capacity;
819 RHS.resetToSmall();
820 return *this;
821 }
822
823 // If we already have sufficient space, assign the common elements, then
824 // destroy any excess.
825 size_t RHSSize = RHS.size();
826 size_t CurSize = this->size();
827 if (CurSize >= RHSSize) {
828 // Assign common elements.
829 iterator NewEnd = this->begin();
830 if (RHSSize)
831 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
832
833 // Destroy excess elements and trim the bounds.
834 this->destroy_range(NewEnd, this->end());
835 this->set_size(RHSSize);
836
837 // Clear the RHS.
838 RHS.clear();
839
840 return *this;
841 }
842
843 // If we have to grow to have enough elements, destroy the current elements.
844 // This allows us to avoid copying them during the grow.
845 // FIXME: this may not actually make any sense if we can efficiently move
846 // elements.
847 if (this->capacity() < RHSSize) {
848 // Destroy current elements.
849 this->destroy_range(this->begin(), this->end());
850 this->set_size(0);
851 CurSize = 0;
852 this->grow(RHSSize);
853 } else if (CurSize) {
854 // Otherwise, use assignment for the already-constructed elements.
855 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
856 }
857
858 // Move-construct the new elements in place.
859 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
860 this->begin()+CurSize);
861
862 // Set end.
863 this->set_size(RHSSize);
864
865 RHS.clear();
866 return *this;
867}
868
869/// Storage for the SmallVector elements. This is specialized for the N=0 case
870/// to avoid allocating unnecessary storage.
871template <typename T, unsigned N>
872struct SmallVectorStorage {
873 alignas(T) char InlineElts[N * sizeof(T)];
874};
875
876/// We need the storage to be properly aligned even for small-size of 0 so that
877/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
878/// well-defined.
879template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};
880
881/// This is a 'vector' (really, a variable-sized array), optimized
882/// for the case when the array is small. It contains some number of elements
883/// in-place, which allows it to avoid heap allocation when the actual number of
884/// elements is below that threshold. This allows normal "small" cases to be
885/// fast without losing generality for large inputs.
886///
887/// Note that this does not attempt to be exception safe.
888///
889template <typename T, unsigned N>
890class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>,
891 SmallVectorStorage<T, N> {
892public:
893 SmallVector() : SmallVectorImpl<T>(N) {}
894
895 ~SmallVector() {
896 // Destroy the constructed elements in the vector.
897 this->destroy_range(this->begin(), this->end());
898 }
899
900 explicit SmallVector(size_t Size, const T &Value = T())
901 : SmallVectorImpl<T>(N) {
902 this->assign(Size, Value);
903 }
904
905 template <typename ItTy,
906 typename = std::enable_if_t<std::is_convertible<
907 typename std::iterator_traits<ItTy>::iterator_category,
908 std::input_iterator_tag>::value>>
909 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
910 this->append(S, E);
911 }
912
913 template <typename RangeTy>
914 explicit SmallVector(const iterator_range<RangeTy> &R)
915 : SmallVectorImpl<T>(N) {
916 this->append(R.begin(), R.end());
917 }
918
919 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
920 this->assign(IL);
921 }
922
923 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
924 if (!RHS.empty())
925 SmallVectorImpl<T>::operator=(RHS);
926 }
927
928 const SmallVector &operator=(const SmallVector &RHS) {
929 SmallVectorImpl<T>::operator=(RHS);
930 return *this;
931 }
932
933 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
934 if (!RHS.empty())
935 SmallVectorImpl<T>::operator=(::std::move(RHS));
936 }
937
938 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
939 if (!RHS.empty())
940 SmallVectorImpl<T>::operator=(::std::move(RHS));
941 }
942
943 const SmallVector &operator=(SmallVector &&RHS) {
944 SmallVectorImpl<T>::operator=(::std::move(RHS));
945 return *this;
946 }
947
948 const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
949 SmallVectorImpl<T>::operator=(::std::move(RHS));
950 return *this;
951 }
952
953 const SmallVector &operator=(std::initializer_list<T> IL) {
954 this->assign(IL);
955 return *this;
956 }
957};
958
959template <typename T, unsigned N>
960inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
961 return X.capacity_in_bytes();
962}
963
964/// Given a range of type R, iterate the entire range and return a
965/// SmallVector with elements of the vector. This is useful, for example,
966/// when you want to iterate a range and then sort the results.
967template <unsigned Size, typename R>
968SmallVector<typename std::remove_const<typename std::remove_reference<
969 decltype(*std::begin(std::declval<R &>()))>::type>::type,
970 Size>
971to_vector(R &&Range) {
972 return {std::begin(Range), std::end(Range)};
973}
974
975} // end namespace llvm
976
977namespace std {
978
979 /// Implement std::swap in terms of SmallVector swap.
980 template<typename T>
981 inline void
982 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
983 LHS.swap(RHS);
984 }
985
986 /// Implement std::swap in terms of SmallVector swap.
987 template<typename T, unsigned N>
988 inline void
989 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
990 LHS.swap(RHS);
991 }
992
993} // end namespace std
994
995#endif // LLVM_ADT_SMALLVECTOR_H