File: | llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp |
Warning: | line 490, column 25 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This pass performs various transformations related to eliminating memcpy | ||||
10 | // calls, or transforming sets of stores into memset's. | ||||
11 | // | ||||
12 | //===----------------------------------------------------------------------===// | ||||
13 | |||||
14 | #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" | ||||
15 | #include "llvm/ADT/DenseSet.h" | ||||
16 | #include "llvm/ADT/None.h" | ||||
17 | #include "llvm/ADT/STLExtras.h" | ||||
18 | #include "llvm/ADT/SmallVector.h" | ||||
19 | #include "llvm/ADT/Statistic.h" | ||||
20 | #include "llvm/ADT/iterator_range.h" | ||||
21 | #include "llvm/Analysis/AliasAnalysis.h" | ||||
22 | #include "llvm/Analysis/AssumptionCache.h" | ||||
23 | #include "llvm/Analysis/GlobalsModRef.h" | ||||
24 | #include "llvm/Analysis/Loads.h" | ||||
25 | #include "llvm/Analysis/MemoryLocation.h" | ||||
26 | #include "llvm/Analysis/MemorySSA.h" | ||||
27 | #include "llvm/Analysis/MemorySSAUpdater.h" | ||||
28 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||
29 | #include "llvm/Analysis/ValueTracking.h" | ||||
30 | #include "llvm/IR/Argument.h" | ||||
31 | #include "llvm/IR/BasicBlock.h" | ||||
32 | #include "llvm/IR/Constants.h" | ||||
33 | #include "llvm/IR/DataLayout.h" | ||||
34 | #include "llvm/IR/DerivedTypes.h" | ||||
35 | #include "llvm/IR/Dominators.h" | ||||
36 | #include "llvm/IR/Function.h" | ||||
37 | #include "llvm/IR/GetElementPtrTypeIterator.h" | ||||
38 | #include "llvm/IR/GlobalVariable.h" | ||||
39 | #include "llvm/IR/IRBuilder.h" | ||||
40 | #include "llvm/IR/InstrTypes.h" | ||||
41 | #include "llvm/IR/Instruction.h" | ||||
42 | #include "llvm/IR/Instructions.h" | ||||
43 | #include "llvm/IR/IntrinsicInst.h" | ||||
44 | #include "llvm/IR/Intrinsics.h" | ||||
45 | #include "llvm/IR/LLVMContext.h" | ||||
46 | #include "llvm/IR/Module.h" | ||||
47 | #include "llvm/IR/Operator.h" | ||||
48 | #include "llvm/IR/PassManager.h" | ||||
49 | #include "llvm/IR/Type.h" | ||||
50 | #include "llvm/IR/User.h" | ||||
51 | #include "llvm/IR/Value.h" | ||||
52 | #include "llvm/InitializePasses.h" | ||||
53 | #include "llvm/Pass.h" | ||||
54 | #include "llvm/Support/Casting.h" | ||||
55 | #include "llvm/Support/Debug.h" | ||||
56 | #include "llvm/Support/MathExtras.h" | ||||
57 | #include "llvm/Support/raw_ostream.h" | ||||
58 | #include "llvm/Transforms/Scalar.h" | ||||
59 | #include "llvm/Transforms/Utils/Local.h" | ||||
60 | #include <algorithm> | ||||
61 | #include <cassert> | ||||
62 | #include <cstdint> | ||||
63 | #include <utility> | ||||
64 | |||||
65 | using namespace llvm; | ||||
66 | |||||
67 | #define DEBUG_TYPE"memcpyopt" "memcpyopt" | ||||
68 | |||||
69 | static cl::opt<bool> EnableMemCpyOptWithoutLibcalls( | ||||
70 | "enable-memcpyopt-without-libcalls", cl::init(false), cl::Hidden, | ||||
71 | cl::ZeroOrMore, | ||||
72 | cl::desc("Enable memcpyopt even when libcalls are disabled")); | ||||
73 | |||||
74 | STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted")static llvm::Statistic NumMemCpyInstr = {"memcpyopt", "NumMemCpyInstr" , "Number of memcpy instructions deleted"}; | ||||
75 | STATISTIC(NumMemSetInfer, "Number of memsets inferred")static llvm::Statistic NumMemSetInfer = {"memcpyopt", "NumMemSetInfer" , "Number of memsets inferred"}; | ||||
76 | STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy")static llvm::Statistic NumMoveToCpy = {"memcpyopt", "NumMoveToCpy" , "Number of memmoves converted to memcpy"}; | ||||
77 | STATISTIC(NumCpyToSet, "Number of memcpys converted to memset")static llvm::Statistic NumCpyToSet = {"memcpyopt", "NumCpyToSet" , "Number of memcpys converted to memset"}; | ||||
78 | STATISTIC(NumCallSlot, "Number of call slot optimizations performed")static llvm::Statistic NumCallSlot = {"memcpyopt", "NumCallSlot" , "Number of call slot optimizations performed"}; | ||||
79 | |||||
80 | namespace { | ||||
81 | |||||
82 | /// Represents a range of memset'd bytes with the ByteVal value. | ||||
83 | /// This allows us to analyze stores like: | ||||
84 | /// store 0 -> P+1 | ||||
85 | /// store 0 -> P+0 | ||||
86 | /// store 0 -> P+3 | ||||
87 | /// store 0 -> P+2 | ||||
88 | /// which sometimes happens with stores to arrays of structs etc. When we see | ||||
89 | /// the first store, we make a range [1, 2). The second store extends the range | ||||
90 | /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the | ||||
91 | /// two ranges into [0, 3) which is memset'able. | ||||
92 | struct MemsetRange { | ||||
93 | // Start/End - A semi range that describes the span that this range covers. | ||||
94 | // The range is closed at the start and open at the end: [Start, End). | ||||
95 | int64_t Start, End; | ||||
96 | |||||
97 | /// StartPtr - The getelementptr instruction that points to the start of the | ||||
98 | /// range. | ||||
99 | Value *StartPtr; | ||||
100 | |||||
101 | /// Alignment - The known alignment of the first store. | ||||
102 | unsigned Alignment; | ||||
103 | |||||
104 | /// TheStores - The actual stores that make up this range. | ||||
105 | SmallVector<Instruction*, 16> TheStores; | ||||
106 | |||||
107 | bool isProfitableToUseMemset(const DataLayout &DL) const; | ||||
108 | }; | ||||
109 | |||||
110 | } // end anonymous namespace | ||||
111 | |||||
112 | bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { | ||||
113 | // If we found more than 4 stores to merge or 16 bytes, use memset. | ||||
114 | if (TheStores.size() >= 4 || End-Start >= 16) return true; | ||||
115 | |||||
116 | // If there is nothing to merge, don't do anything. | ||||
117 | if (TheStores.size() < 2) return false; | ||||
118 | |||||
119 | // If any of the stores are a memset, then it is always good to extend the | ||||
120 | // memset. | ||||
121 | for (Instruction *SI : TheStores) | ||||
122 | if (!isa<StoreInst>(SI)) | ||||
123 | return true; | ||||
124 | |||||
125 | // Assume that the code generator is capable of merging pairs of stores | ||||
126 | // together if it wants to. | ||||
127 | if (TheStores.size() == 2) return false; | ||||
128 | |||||
129 | // If we have fewer than 8 stores, it can still be worthwhile to do this. | ||||
130 | // For example, merging 4 i8 stores into an i32 store is useful almost always. | ||||
131 | // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the | ||||
132 | // memset will be split into 2 32-bit stores anyway) and doing so can | ||||
133 | // pessimize the llvm optimizer. | ||||
134 | // | ||||
135 | // Since we don't have perfect knowledge here, make some assumptions: assume | ||||
136 | // the maximum GPR width is the same size as the largest legal integer | ||||
137 | // size. If so, check to see whether we will end up actually reducing the | ||||
138 | // number of stores used. | ||||
139 | unsigned Bytes = unsigned(End-Start); | ||||
140 | unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; | ||||
141 | if (MaxIntSize == 0) | ||||
142 | MaxIntSize = 1; | ||||
143 | unsigned NumPointerStores = Bytes / MaxIntSize; | ||||
144 | |||||
145 | // Assume the remaining bytes if any are done a byte at a time. | ||||
146 | unsigned NumByteStores = Bytes % MaxIntSize; | ||||
147 | |||||
148 | // If we will reduce the # stores (according to this heuristic), do the | ||||
149 | // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 | ||||
150 | // etc. | ||||
151 | return TheStores.size() > NumPointerStores+NumByteStores; | ||||
152 | } | ||||
153 | |||||
154 | namespace { | ||||
155 | |||||
156 | class MemsetRanges { | ||||
157 | using range_iterator = SmallVectorImpl<MemsetRange>::iterator; | ||||
158 | |||||
159 | /// A sorted list of the memset ranges. | ||||
160 | SmallVector<MemsetRange, 8> Ranges; | ||||
161 | |||||
162 | const DataLayout &DL; | ||||
163 | |||||
164 | public: | ||||
165 | MemsetRanges(const DataLayout &DL) : DL(DL) {} | ||||
166 | |||||
167 | using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; | ||||
168 | |||||
169 | const_iterator begin() const { return Ranges.begin(); } | ||||
170 | const_iterator end() const { return Ranges.end(); } | ||||
171 | bool empty() const { return Ranges.empty(); } | ||||
172 | |||||
173 | void addInst(int64_t OffsetFromFirst, Instruction *Inst) { | ||||
174 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) | ||||
175 | addStore(OffsetFromFirst, SI); | ||||
176 | else | ||||
177 | addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); | ||||
178 | } | ||||
179 | |||||
180 | void addStore(int64_t OffsetFromFirst, StoreInst *SI) { | ||||
181 | int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); | ||||
182 | |||||
183 | addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(), | ||||
184 | SI->getAlign().value(), SI); | ||||
185 | } | ||||
186 | |||||
187 | void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { | ||||
188 | int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); | ||||
189 | addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); | ||||
190 | } | ||||
191 | |||||
192 | void addRange(int64_t Start, int64_t Size, Value *Ptr, | ||||
193 | unsigned Alignment, Instruction *Inst); | ||||
194 | }; | ||||
195 | |||||
196 | } // end anonymous namespace | ||||
197 | |||||
198 | /// Add a new store to the MemsetRanges data structure. This adds a | ||||
199 | /// new range for the specified store at the specified offset, merging into | ||||
200 | /// existing ranges as appropriate. | ||||
201 | void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, | ||||
202 | unsigned Alignment, Instruction *Inst) { | ||||
203 | int64_t End = Start+Size; | ||||
204 | |||||
205 | range_iterator I = partition_point( | ||||
206 | Ranges, [=](const MemsetRange &O) { return O.End < Start; }); | ||||
207 | |||||
208 | // We now know that I == E, in which case we didn't find anything to merge | ||||
209 | // with, or that Start <= I->End. If End < I->Start or I == E, then we need | ||||
210 | // to insert a new range. Handle this now. | ||||
211 | if (I == Ranges.end() || End < I->Start) { | ||||
212 | MemsetRange &R = *Ranges.insert(I, MemsetRange()); | ||||
213 | R.Start = Start; | ||||
214 | R.End = End; | ||||
215 | R.StartPtr = Ptr; | ||||
216 | R.Alignment = Alignment; | ||||
217 | R.TheStores.push_back(Inst); | ||||
218 | return; | ||||
219 | } | ||||
220 | |||||
221 | // This store overlaps with I, add it. | ||||
222 | I->TheStores.push_back(Inst); | ||||
223 | |||||
224 | // At this point, we may have an interval that completely contains our store. | ||||
225 | // If so, just add it to the interval and return. | ||||
226 | if (I->Start <= Start && I->End >= End) | ||||
227 | return; | ||||
228 | |||||
229 | // Now we know that Start <= I->End and End >= I->Start so the range overlaps | ||||
230 | // but is not entirely contained within the range. | ||||
231 | |||||
232 | // See if the range extends the start of the range. In this case, it couldn't | ||||
233 | // possibly cause it to join the prior range, because otherwise we would have | ||||
234 | // stopped on *it*. | ||||
235 | if (Start < I->Start) { | ||||
236 | I->Start = Start; | ||||
237 | I->StartPtr = Ptr; | ||||
238 | I->Alignment = Alignment; | ||||
239 | } | ||||
240 | |||||
241 | // Now we know that Start <= I->End and Start >= I->Start (so the startpoint | ||||
242 | // is in or right at the end of I), and that End >= I->Start. Extend I out to | ||||
243 | // End. | ||||
244 | if (End > I->End) { | ||||
245 | I->End = End; | ||||
246 | range_iterator NextI = I; | ||||
247 | while (++NextI != Ranges.end() && End >= NextI->Start) { | ||||
248 | // Merge the range in. | ||||
249 | I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); | ||||
250 | if (NextI->End > I->End) | ||||
251 | I->End = NextI->End; | ||||
252 | Ranges.erase(NextI); | ||||
253 | NextI = I; | ||||
254 | } | ||||
255 | } | ||||
256 | } | ||||
257 | |||||
258 | //===----------------------------------------------------------------------===// | ||||
259 | // MemCpyOptLegacyPass Pass | ||||
260 | //===----------------------------------------------------------------------===// | ||||
261 | |||||
262 | namespace { | ||||
263 | |||||
264 | class MemCpyOptLegacyPass : public FunctionPass { | ||||
265 | MemCpyOptPass Impl; | ||||
266 | |||||
267 | public: | ||||
268 | static char ID; // Pass identification, replacement for typeid | ||||
269 | |||||
270 | MemCpyOptLegacyPass() : FunctionPass(ID) { | ||||
271 | initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||
272 | } | ||||
273 | |||||
274 | bool runOnFunction(Function &F) override; | ||||
275 | |||||
276 | private: | ||||
277 | // This transformation requires dominator postdominator info | ||||
278 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
279 | AU.setPreservesCFG(); | ||||
280 | AU.addRequired<AssumptionCacheTracker>(); | ||||
281 | AU.addRequired<DominatorTreeWrapperPass>(); | ||||
282 | AU.addPreserved<DominatorTreeWrapperPass>(); | ||||
283 | AU.addPreserved<GlobalsAAWrapperPass>(); | ||||
284 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | ||||
285 | AU.addRequired<AAResultsWrapperPass>(); | ||||
286 | AU.addPreserved<AAResultsWrapperPass>(); | ||||
287 | AU.addRequired<MemorySSAWrapperPass>(); | ||||
288 | AU.addPreserved<MemorySSAWrapperPass>(); | ||||
289 | } | ||||
290 | }; | ||||
291 | |||||
292 | } // end anonymous namespace | ||||
293 | |||||
294 | char MemCpyOptLegacyPass::ID = 0; | ||||
295 | |||||
296 | /// The public interface to this file... | ||||
297 | FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } | ||||
298 | |||||
299 | INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry &Registry) { | ||||
300 | false, false)static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry &Registry) { | ||||
301 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | ||||
302 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||
303 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||
304 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | ||||
305 | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry); | ||||
306 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | ||||
307 | INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt" , &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemCpyOptLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag ; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag , initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry)); } | ||||
308 | false, false)PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt" , &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemCpyOptLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag ; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag , initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry)); } | ||||
309 | |||||
310 | // Check that V is either not accessible by the caller, or unwinding cannot | ||||
311 | // occur between Start and End. | ||||
312 | static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start, | ||||
313 | Instruction *End) { | ||||
314 | assert(Start->getParent() == End->getParent() && "Must be in same block")(static_cast<void> (0)); | ||||
315 | if (!Start->getFunction()->doesNotThrow() && | ||||
316 | !isa<AllocaInst>(getUnderlyingObject(V))) { | ||||
317 | for (const Instruction &I : | ||||
318 | make_range(Start->getIterator(), End->getIterator())) { | ||||
319 | if (I.mayThrow()) | ||||
320 | return true; | ||||
321 | } | ||||
322 | } | ||||
323 | return false; | ||||
324 | } | ||||
325 | |||||
326 | void MemCpyOptPass::eraseInstruction(Instruction *I) { | ||||
327 | MSSAU->removeMemoryAccess(I); | ||||
328 | I->eraseFromParent(); | ||||
329 | } | ||||
330 | |||||
331 | // Check for mod or ref of Loc between Start and End, excluding both boundaries. | ||||
332 | // Start and End must be in the same block | ||||
333 | static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc, | ||||
334 | const MemoryUseOrDef *Start, | ||||
335 | const MemoryUseOrDef *End) { | ||||
336 | assert(Start->getBlock() == End->getBlock() && "Only local supported")(static_cast<void> (0)); | ||||
337 | for (const MemoryAccess &MA : | ||||
338 | make_range(++Start->getIterator(), End->getIterator())) { | ||||
339 | if (isModOrRefSet(AA.getModRefInfo(cast<MemoryUseOrDef>(MA).getMemoryInst(), | ||||
340 | Loc))) | ||||
341 | return true; | ||||
342 | } | ||||
343 | return false; | ||||
344 | } | ||||
345 | |||||
346 | // Check for mod of Loc between Start and End, excluding both boundaries. | ||||
347 | // Start and End can be in different blocks. | ||||
348 | static bool writtenBetween(MemorySSA *MSSA, MemoryLocation Loc, | ||||
349 | const MemoryUseOrDef *Start, | ||||
350 | const MemoryUseOrDef *End) { | ||||
351 | // TODO: Only walk until we hit Start. | ||||
352 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||
353 | End->getDefiningAccess(), Loc); | ||||
354 | return !MSSA->dominates(Clobber, Start); | ||||
355 | } | ||||
356 | |||||
357 | /// When scanning forward over instructions, we look for some other patterns to | ||||
358 | /// fold away. In particular, this looks for stores to neighboring locations of | ||||
359 | /// memory. If it sees enough consecutive ones, it attempts to merge them | ||||
360 | /// together into a memcpy/memset. | ||||
361 | Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, | ||||
362 | Value *StartPtr, | ||||
363 | Value *ByteVal) { | ||||
364 | const DataLayout &DL = StartInst->getModule()->getDataLayout(); | ||||
365 | |||||
366 | // Okay, so we now have a single store that can be splatable. Scan to find | ||||
367 | // all subsequent stores of the same value to offset from the same pointer. | ||||
368 | // Join these together into ranges, so we can decide whether contiguous blocks | ||||
369 | // are stored. | ||||
370 | MemsetRanges Ranges(DL); | ||||
371 | |||||
372 | BasicBlock::iterator BI(StartInst); | ||||
373 | |||||
374 | // Keeps track of the last memory use or def before the insertion point for | ||||
375 | // the new memset. The new MemoryDef for the inserted memsets will be inserted | ||||
376 | // after MemInsertPoint. It points to either LastMemDef or to the last user | ||||
377 | // before the insertion point of the memset, if there are any such users. | ||||
378 | MemoryUseOrDef *MemInsertPoint = nullptr; | ||||
379 | // Keeps track of the last MemoryDef between StartInst and the insertion point | ||||
380 | // for the new memset. This will become the defining access of the inserted | ||||
381 | // memsets. | ||||
382 | MemoryDef *LastMemDef = nullptr; | ||||
383 | for (++BI; !BI->isTerminator(); ++BI) { | ||||
384 | auto *CurrentAcc = cast_or_null<MemoryUseOrDef>( | ||||
385 | MSSAU->getMemorySSA()->getMemoryAccess(&*BI)); | ||||
386 | if (CurrentAcc) { | ||||
387 | MemInsertPoint = CurrentAcc; | ||||
388 | if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc)) | ||||
389 | LastMemDef = CurrentDef; | ||||
390 | } | ||||
391 | |||||
392 | // Calls that only access inaccessible memory do not block merging | ||||
393 | // accessible stores. | ||||
394 | if (auto *CB = dyn_cast<CallBase>(BI)) { | ||||
395 | if (CB->onlyAccessesInaccessibleMemory()) | ||||
396 | continue; | ||||
397 | } | ||||
398 | |||||
399 | if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { | ||||
400 | // If the instruction is readnone, ignore it, otherwise bail out. We | ||||
401 | // don't even allow readonly here because we don't want something like: | ||||
402 | // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). | ||||
403 | if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) | ||||
404 | break; | ||||
405 | continue; | ||||
406 | } | ||||
407 | |||||
408 | if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { | ||||
409 | // If this is a store, see if we can merge it in. | ||||
410 | if (!NextStore->isSimple()) break; | ||||
411 | |||||
412 | Value *StoredVal = NextStore->getValueOperand(); | ||||
413 | |||||
414 | // Don't convert stores of non-integral pointer types to memsets (which | ||||
415 | // stores integers). | ||||
416 | if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) | ||||
417 | break; | ||||
418 | |||||
419 | // Check to see if this stored value is of the same byte-splattable value. | ||||
420 | Value *StoredByte = isBytewiseValue(StoredVal, DL); | ||||
421 | if (isa<UndefValue>(ByteVal) && StoredByte) | ||||
422 | ByteVal = StoredByte; | ||||
423 | if (ByteVal != StoredByte) | ||||
424 | break; | ||||
425 | |||||
426 | // Check to see if this store is to a constant offset from the start ptr. | ||||
427 | Optional<int64_t> Offset = | ||||
428 | isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL); | ||||
429 | if (!Offset) | ||||
430 | break; | ||||
431 | |||||
432 | Ranges.addStore(*Offset, NextStore); | ||||
433 | } else { | ||||
434 | MemSetInst *MSI = cast<MemSetInst>(BI); | ||||
435 | |||||
436 | if (MSI->isVolatile() || ByteVal != MSI->getValue() || | ||||
437 | !isa<ConstantInt>(MSI->getLength())) | ||||
438 | break; | ||||
439 | |||||
440 | // Check to see if this store is to a constant offset from the start ptr. | ||||
441 | Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL); | ||||
442 | if (!Offset) | ||||
443 | break; | ||||
444 | |||||
445 | Ranges.addMemSet(*Offset, MSI); | ||||
446 | } | ||||
447 | } | ||||
448 | |||||
449 | // If we have no ranges, then we just had a single store with nothing that | ||||
450 | // could be merged in. This is a very common case of course. | ||||
451 | if (Ranges.empty()) | ||||
452 | return nullptr; | ||||
453 | |||||
454 | // If we had at least one store that could be merged in, add the starting | ||||
455 | // store as well. We try to avoid this unless there is at least something | ||||
456 | // interesting as a small compile-time optimization. | ||||
457 | Ranges.addInst(0, StartInst); | ||||
458 | |||||
459 | // If we create any memsets, we put it right before the first instruction that | ||||
460 | // isn't part of the memset block. This ensure that the memset is dominated | ||||
461 | // by any addressing instruction needed by the start of the block. | ||||
462 | IRBuilder<> Builder(&*BI); | ||||
463 | |||||
464 | // Now that we have full information about ranges, loop over the ranges and | ||||
465 | // emit memset's for anything big enough to be worthwhile. | ||||
466 | Instruction *AMemSet = nullptr; | ||||
467 | for (const MemsetRange &Range : Ranges) { | ||||
468 | if (Range.TheStores.size() == 1) continue; | ||||
469 | |||||
470 | // If it is profitable to lower this range to memset, do so now. | ||||
471 | if (!Range.isProfitableToUseMemset(DL)) | ||||
472 | continue; | ||||
473 | |||||
474 | // Otherwise, we do want to transform this! Create a new memset. | ||||
475 | // Get the starting pointer of the block. | ||||
476 | StartPtr = Range.StartPtr; | ||||
477 | |||||
478 | AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, | ||||
479 | MaybeAlign(Range.Alignment)); | ||||
480 | LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SIdo { } while (false) | ||||
481 | : Range.TheStores) dbgs()do { } while (false) | ||||
482 | << *SI << '\n';do { } while (false) | ||||
483 | dbgs() << "With: " << *AMemSet << '\n')do { } while (false); | ||||
484 | if (!Range.TheStores.empty()) | ||||
485 | AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); | ||||
486 | |||||
487 | assert(LastMemDef && MemInsertPoint &&(static_cast<void> (0)) | ||||
488 | "Both LastMemDef and MemInsertPoint need to be set")(static_cast<void> (0)); | ||||
489 | auto *NewDef = | ||||
490 | cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI | ||||
| |||||
491 | ? MSSAU->createMemoryAccessBefore( | ||||
492 | AMemSet, LastMemDef, MemInsertPoint) | ||||
493 | : MSSAU->createMemoryAccessAfter( | ||||
494 | AMemSet, LastMemDef, MemInsertPoint)); | ||||
495 | MSSAU->insertDef(NewDef, /*RenameUses=*/true); | ||||
496 | LastMemDef = NewDef; | ||||
497 | MemInsertPoint = NewDef; | ||||
498 | |||||
499 | // Zap all the stores. | ||||
500 | for (Instruction *SI : Range.TheStores) | ||||
501 | eraseInstruction(SI); | ||||
502 | |||||
503 | ++NumMemSetInfer; | ||||
504 | } | ||||
505 | |||||
506 | return AMemSet; | ||||
507 | } | ||||
508 | |||||
509 | // This method try to lift a store instruction before position P. | ||||
510 | // It will lift the store and its argument + that anything that | ||||
511 | // may alias with these. | ||||
512 | // The method returns true if it was successful. | ||||
513 | bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) { | ||||
514 | // If the store alias this position, early bail out. | ||||
515 | MemoryLocation StoreLoc = MemoryLocation::get(SI); | ||||
516 | if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc))) | ||||
517 | return false; | ||||
518 | |||||
519 | // Keep track of the arguments of all instruction we plan to lift | ||||
520 | // so we can make sure to lift them as well if appropriate. | ||||
521 | DenseSet<Instruction*> Args; | ||||
522 | if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) | ||||
523 | if (Ptr->getParent() == SI->getParent()) | ||||
524 | Args.insert(Ptr); | ||||
525 | |||||
526 | // Instruction to lift before P. | ||||
527 | SmallVector<Instruction *, 8> ToLift{SI}; | ||||
528 | |||||
529 | // Memory locations of lifted instructions. | ||||
530 | SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; | ||||
531 | |||||
532 | // Lifted calls. | ||||
533 | SmallVector<const CallBase *, 8> Calls; | ||||
534 | |||||
535 | const MemoryLocation LoadLoc = MemoryLocation::get(LI); | ||||
536 | |||||
537 | for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { | ||||
538 | auto *C = &*I; | ||||
539 | |||||
540 | // Make sure hoisting does not perform a store that was not guaranteed to | ||||
541 | // happen. | ||||
542 | if (!isGuaranteedToTransferExecutionToSuccessor(C)) | ||||
543 | return false; | ||||
544 | |||||
545 | bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None)); | ||||
546 | |||||
547 | bool NeedLift = false; | ||||
548 | if (Args.erase(C)) | ||||
549 | NeedLift = true; | ||||
550 | else if (MayAlias) { | ||||
551 | NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) { | ||||
552 | return isModOrRefSet(AA->getModRefInfo(C, ML)); | ||||
553 | }); | ||||
554 | |||||
555 | if (!NeedLift) | ||||
556 | NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) { | ||||
557 | return isModOrRefSet(AA->getModRefInfo(C, Call)); | ||||
558 | }); | ||||
559 | } | ||||
560 | |||||
561 | if (!NeedLift) | ||||
562 | continue; | ||||
563 | |||||
564 | if (MayAlias) { | ||||
565 | // Since LI is implicitly moved downwards past the lifted instructions, | ||||
566 | // none of them may modify its source. | ||||
567 | if (isModSet(AA->getModRefInfo(C, LoadLoc))) | ||||
568 | return false; | ||||
569 | else if (const auto *Call = dyn_cast<CallBase>(C)) { | ||||
570 | // If we can't lift this before P, it's game over. | ||||
571 | if (isModOrRefSet(AA->getModRefInfo(P, Call))) | ||||
572 | return false; | ||||
573 | |||||
574 | Calls.push_back(Call); | ||||
575 | } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { | ||||
576 | // If we can't lift this before P, it's game over. | ||||
577 | auto ML = MemoryLocation::get(C); | ||||
578 | if (isModOrRefSet(AA->getModRefInfo(P, ML))) | ||||
579 | return false; | ||||
580 | |||||
581 | MemLocs.push_back(ML); | ||||
582 | } else | ||||
583 | // We don't know how to lift this instruction. | ||||
584 | return false; | ||||
585 | } | ||||
586 | |||||
587 | ToLift.push_back(C); | ||||
588 | for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) | ||||
589 | if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) { | ||||
590 | if (A->getParent() == SI->getParent()) { | ||||
591 | // Cannot hoist user of P above P | ||||
592 | if(A == P) return false; | ||||
593 | Args.insert(A); | ||||
594 | } | ||||
595 | } | ||||
596 | } | ||||
597 | |||||
598 | // Find MSSA insertion point. Normally P will always have a corresponding | ||||
599 | // memory access before which we can insert. However, with non-standard AA | ||||
600 | // pipelines, there may be a mismatch between AA and MSSA, in which case we | ||||
601 | // will scan for a memory access before P. In either case, we know for sure | ||||
602 | // that at least the load will have a memory access. | ||||
603 | // TODO: Simplify this once P will be determined by MSSA, in which case the | ||||
604 | // discrepancy can no longer occur. | ||||
605 | MemoryUseOrDef *MemInsertPoint = nullptr; | ||||
606 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) { | ||||
607 | MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator()); | ||||
608 | } else { | ||||
609 | const Instruction *ConstP = P; | ||||
610 | for (const Instruction &I : make_range(++ConstP->getReverseIterator(), | ||||
611 | ++LI->getReverseIterator())) { | ||||
612 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) { | ||||
613 | MemInsertPoint = MA; | ||||
614 | break; | ||||
615 | } | ||||
616 | } | ||||
617 | } | ||||
618 | |||||
619 | // We made it, we need to lift. | ||||
620 | for (auto *I : llvm::reverse(ToLift)) { | ||||
621 | LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n")do { } while (false); | ||||
622 | I->moveBefore(P); | ||||
623 | assert(MemInsertPoint && "Must have found insert point")(static_cast<void> (0)); | ||||
624 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) { | ||||
625 | MSSAU->moveAfter(MA, MemInsertPoint); | ||||
626 | MemInsertPoint = MA; | ||||
627 | } | ||||
628 | } | ||||
629 | |||||
630 | return true; | ||||
631 | } | ||||
632 | |||||
633 | bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { | ||||
634 | if (!SI->isSimple()) return false; | ||||
635 | |||||
636 | // Avoid merging nontemporal stores since the resulting | ||||
637 | // memcpy/memset would not be able to preserve the nontemporal hint. | ||||
638 | // In theory we could teach how to propagate the !nontemporal metadata to | ||||
639 | // memset calls. However, that change would force the backend to | ||||
640 | // conservatively expand !nontemporal memset calls back to sequences of | ||||
641 | // store instructions (effectively undoing the merging). | ||||
642 | if (SI->getMetadata(LLVMContext::MD_nontemporal)) | ||||
643 | return false; | ||||
644 | |||||
645 | const DataLayout &DL = SI->getModule()->getDataLayout(); | ||||
646 | |||||
647 | Value *StoredVal = SI->getValueOperand(); | ||||
648 | |||||
649 | // Not all the transforms below are correct for non-integral pointers, bail | ||||
650 | // until we've audited the individual pieces. | ||||
651 | if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) | ||||
652 | return false; | ||||
653 | |||||
654 | // Load to store forwarding can be interpreted as memcpy. | ||||
655 | if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { | ||||
656 | if (LI->isSimple() && LI->hasOneUse() && | ||||
657 | LI->getParent() == SI->getParent()) { | ||||
658 | |||||
659 | auto *T = LI->getType(); | ||||
660 | // Don't introduce calls to memcpy/memmove intrinsics out of thin air if | ||||
661 | // the corresponding libcalls are not available. | ||||
662 | // TODO: We should really distinguish between libcall availability and | ||||
663 | // our ability to introduce intrinsics. | ||||
664 | if (T->isAggregateType() && | ||||
665 | (EnableMemCpyOptWithoutLibcalls || | ||||
666 | (TLI->has(LibFunc_memcpy) && TLI->has(LibFunc_memmove)))) { | ||||
667 | MemoryLocation LoadLoc = MemoryLocation::get(LI); | ||||
668 | |||||
669 | // We use alias analysis to check if an instruction may store to | ||||
670 | // the memory we load from in between the load and the store. If | ||||
671 | // such an instruction is found, we try to promote there instead | ||||
672 | // of at the store position. | ||||
673 | // TODO: Can use MSSA for this. | ||||
674 | Instruction *P = SI; | ||||
675 | for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { | ||||
676 | if (isModSet(AA->getModRefInfo(&I, LoadLoc))) { | ||||
677 | P = &I; | ||||
678 | break; | ||||
679 | } | ||||
680 | } | ||||
681 | |||||
682 | // We found an instruction that may write to the loaded memory. | ||||
683 | // We can try to promote at this position instead of the store | ||||
684 | // position if nothing aliases the store memory after this and the store | ||||
685 | // destination is not in the range. | ||||
686 | if (P && P != SI) { | ||||
687 | if (!moveUp(SI, P, LI)) | ||||
688 | P = nullptr; | ||||
689 | } | ||||
690 | |||||
691 | // If a valid insertion position is found, then we can promote | ||||
692 | // the load/store pair to a memcpy. | ||||
693 | if (P) { | ||||
694 | // If we load from memory that may alias the memory we store to, | ||||
695 | // memmove must be used to preserve semantic. If not, memcpy can | ||||
696 | // be used. Also, if we load from constant memory, memcpy can be used | ||||
697 | // as the constant memory won't be modified. | ||||
698 | bool UseMemMove = false; | ||||
699 | if (isModSet(AA->getModRefInfo(SI, LoadLoc))) | ||||
700 | UseMemMove = true; | ||||
701 | |||||
702 | uint64_t Size = DL.getTypeStoreSize(T); | ||||
703 | |||||
704 | IRBuilder<> Builder(P); | ||||
705 | Instruction *M; | ||||
706 | if (UseMemMove) | ||||
707 | M = Builder.CreateMemMove( | ||||
708 | SI->getPointerOperand(), SI->getAlign(), | ||||
709 | LI->getPointerOperand(), LI->getAlign(), Size); | ||||
710 | else | ||||
711 | M = Builder.CreateMemCpy( | ||||
712 | SI->getPointerOperand(), SI->getAlign(), | ||||
713 | LI->getPointerOperand(), LI->getAlign(), Size); | ||||
714 | |||||
715 | LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "do { } while (false) | ||||
716 | << *M << "\n")do { } while (false); | ||||
717 | |||||
718 | auto *LastDef = | ||||
719 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); | ||||
720 | auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); | ||||
721 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||
722 | |||||
723 | eraseInstruction(SI); | ||||
724 | eraseInstruction(LI); | ||||
725 | ++NumMemCpyInstr; | ||||
726 | |||||
727 | // Make sure we do not invalidate the iterator. | ||||
728 | BBI = M->getIterator(); | ||||
729 | return true; | ||||
730 | } | ||||
731 | } | ||||
732 | |||||
733 | // Detect cases where we're performing call slot forwarding, but | ||||
734 | // happen to be using a load-store pair to implement it, rather than | ||||
735 | // a memcpy. | ||||
736 | CallInst *C = nullptr; | ||||
737 | if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>( | ||||
738 | MSSA->getWalker()->getClobberingMemoryAccess(LI))) { | ||||
739 | // The load most post-dom the call. Limit to the same block for now. | ||||
740 | // TODO: Support non-local call-slot optimization? | ||||
741 | if (LoadClobber->getBlock() == SI->getParent()) | ||||
742 | C = dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst()); | ||||
743 | } | ||||
744 | |||||
745 | if (C) { | ||||
746 | // Check that nothing touches the dest of the "copy" between | ||||
747 | // the call and the store. | ||||
748 | MemoryLocation StoreLoc = MemoryLocation::get(SI); | ||||
749 | if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C), | ||||
750 | MSSA->getMemoryAccess(SI))) | ||||
751 | C = nullptr; | ||||
752 | } | ||||
753 | |||||
754 | if (C) { | ||||
755 | bool changed = performCallSlotOptzn( | ||||
756 | LI, SI, SI->getPointerOperand()->stripPointerCasts(), | ||||
757 | LI->getPointerOperand()->stripPointerCasts(), | ||||
758 | DL.getTypeStoreSize(SI->getOperand(0)->getType()), | ||||
759 | commonAlignment(SI->getAlign(), LI->getAlign()), C); | ||||
760 | if (changed) { | ||||
761 | eraseInstruction(SI); | ||||
762 | eraseInstruction(LI); | ||||
763 | ++NumMemCpyInstr; | ||||
764 | return true; | ||||
765 | } | ||||
766 | } | ||||
767 | } | ||||
768 | } | ||||
769 | |||||
770 | // The following code creates memset intrinsics out of thin air. Don't do | ||||
771 | // this if the corresponding libfunc is not available. | ||||
772 | // TODO: We should really distinguish between libcall availability and | ||||
773 | // our ability to introduce intrinsics. | ||||
774 | if (!(TLI->has(LibFunc_memset) || EnableMemCpyOptWithoutLibcalls)) | ||||
775 | return false; | ||||
776 | |||||
777 | // There are two cases that are interesting for this code to handle: memcpy | ||||
778 | // and memset. Right now we only handle memset. | ||||
779 | |||||
780 | // Ensure that the value being stored is something that can be memset'able a | ||||
781 | // byte at a time like "0" or "-1" or any width, as well as things like | ||||
782 | // 0xA0A0A0A0 and 0.0. | ||||
783 | auto *V = SI->getOperand(0); | ||||
784 | if (Value *ByteVal = isBytewiseValue(V, DL)) { | ||||
785 | if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), | ||||
786 | ByteVal)) { | ||||
787 | BBI = I->getIterator(); // Don't invalidate iterator. | ||||
788 | return true; | ||||
789 | } | ||||
790 | |||||
791 | // If we have an aggregate, we try to promote it to memset regardless | ||||
792 | // of opportunity for merging as it can expose optimization opportunities | ||||
793 | // in subsequent passes. | ||||
794 | auto *T = V->getType(); | ||||
795 | if (T->isAggregateType()) { | ||||
796 | uint64_t Size = DL.getTypeStoreSize(T); | ||||
797 | IRBuilder<> Builder(SI); | ||||
798 | auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, | ||||
799 | SI->getAlign()); | ||||
800 | |||||
801 | LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n")do { } while (false); | ||||
802 | |||||
803 | // The newly inserted memset is immediately overwritten by the original | ||||
804 | // store, so we do not need to rename uses. | ||||
805 | auto *StoreDef = cast<MemoryDef>(MSSA->getMemoryAccess(SI)); | ||||
806 | auto *NewAccess = MSSAU->createMemoryAccessBefore( | ||||
807 | M, StoreDef->getDefiningAccess(), StoreDef); | ||||
808 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/false); | ||||
809 | |||||
810 | eraseInstruction(SI); | ||||
811 | NumMemSetInfer++; | ||||
812 | |||||
813 | // Make sure we do not invalidate the iterator. | ||||
814 | BBI = M->getIterator(); | ||||
815 | return true; | ||||
816 | } | ||||
817 | } | ||||
818 | |||||
819 | return false; | ||||
820 | } | ||||
821 | |||||
822 | bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { | ||||
823 | // See if there is another memset or store neighboring this memset which | ||||
824 | // allows us to widen out the memset to do a single larger store. | ||||
825 | if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) | ||||
826 | if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), | ||||
827 | MSI->getValue())) { | ||||
828 | BBI = I->getIterator(); // Don't invalidate iterator. | ||||
829 | return true; | ||||
830 | } | ||||
831 | return false; | ||||
832 | } | ||||
833 | |||||
834 | /// Takes a memcpy and a call that it depends on, | ||||
835 | /// and checks for the possibility of a call slot optimization by having | ||||
836 | /// the call write its result directly into the destination of the memcpy. | ||||
837 | bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad, | ||||
838 | Instruction *cpyStore, Value *cpyDest, | ||||
839 | Value *cpySrc, uint64_t cpyLen, | ||||
840 | Align cpyAlign, CallInst *C) { | ||||
841 | // The general transformation to keep in mind is | ||||
842 | // | ||||
843 | // call @func(..., src, ...) | ||||
844 | // memcpy(dest, src, ...) | ||||
845 | // | ||||
846 | // -> | ||||
847 | // | ||||
848 | // memcpy(dest, src, ...) | ||||
849 | // call @func(..., dest, ...) | ||||
850 | // | ||||
851 | // Since moving the memcpy is technically awkward, we additionally check that | ||||
852 | // src only holds uninitialized values at the moment of the call, meaning that | ||||
853 | // the memcpy can be discarded rather than moved. | ||||
854 | |||||
855 | // Lifetime marks shouldn't be operated on. | ||||
856 | if (Function *F = C->getCalledFunction()) | ||||
857 | if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) | ||||
858 | return false; | ||||
859 | |||||
860 | // Require that src be an alloca. This simplifies the reasoning considerably. | ||||
861 | AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); | ||||
862 | if (!srcAlloca) | ||||
863 | return false; | ||||
864 | |||||
865 | ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); | ||||
866 | if (!srcArraySize) | ||||
867 | return false; | ||||
868 | |||||
869 | const DataLayout &DL = cpyLoad->getModule()->getDataLayout(); | ||||
870 | uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * | ||||
871 | srcArraySize->getZExtValue(); | ||||
872 | |||||
873 | if (cpyLen < srcSize) | ||||
874 | return false; | ||||
875 | |||||
876 | // Check that accessing the first srcSize bytes of dest will not cause a | ||||
877 | // trap. Otherwise the transform is invalid since it might cause a trap | ||||
878 | // to occur earlier than it otherwise would. | ||||
879 | if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen), | ||||
880 | DL, C, DT)) | ||||
881 | return false; | ||||
882 | |||||
883 | // Make sure that nothing can observe cpyDest being written early. There are | ||||
884 | // a number of cases to consider: | ||||
885 | // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of | ||||
886 | // the transform. | ||||
887 | // 2. C itself may not access cpyDest (prior to the transform). This is | ||||
888 | // checked further below. | ||||
889 | // 3. If cpyDest is accessible to the caller of this function (potentially | ||||
890 | // captured and not based on an alloca), we need to ensure that we cannot | ||||
891 | // unwind between C and cpyStore. This is checked here. | ||||
892 | // 4. If cpyDest is potentially captured, there may be accesses to it from | ||||
893 | // another thread. In this case, we need to check that cpyStore is | ||||
894 | // guaranteed to be executed if C is. As it is a non-atomic access, it | ||||
895 | // renders accesses from other threads undefined. | ||||
896 | // TODO: This is currently not checked. | ||||
897 | if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore)) | ||||
898 | return false; | ||||
899 | |||||
900 | // Check that dest points to memory that is at least as aligned as src. | ||||
901 | Align srcAlign = srcAlloca->getAlign(); | ||||
902 | bool isDestSufficientlyAligned = srcAlign <= cpyAlign; | ||||
903 | // If dest is not aligned enough and we can't increase its alignment then | ||||
904 | // bail out. | ||||
905 | if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) | ||||
906 | return false; | ||||
907 | |||||
908 | // Check that src is not accessed except via the call and the memcpy. This | ||||
909 | // guarantees that it holds only undefined values when passed in (so the final | ||||
910 | // memcpy can be dropped), that it is not read or written between the call and | ||||
911 | // the memcpy, and that writing beyond the end of it is undefined. | ||||
912 | SmallVector<User *, 8> srcUseList(srcAlloca->users()); | ||||
913 | while (!srcUseList.empty()) { | ||||
914 | User *U = srcUseList.pop_back_val(); | ||||
915 | |||||
916 | if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { | ||||
917 | append_range(srcUseList, U->users()); | ||||
918 | continue; | ||||
919 | } | ||||
920 | if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { | ||||
921 | if (!G->hasAllZeroIndices()) | ||||
922 | return false; | ||||
923 | |||||
924 | append_range(srcUseList, U->users()); | ||||
925 | continue; | ||||
926 | } | ||||
927 | if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) | ||||
928 | if (IT->isLifetimeStartOrEnd()) | ||||
929 | continue; | ||||
930 | |||||
931 | if (U != C && U != cpyLoad) | ||||
932 | return false; | ||||
933 | } | ||||
934 | |||||
935 | // Check that src isn't captured by the called function since the | ||||
936 | // transformation can cause aliasing issues in that case. | ||||
937 | for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI) | ||||
938 | if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI)) | ||||
939 | return false; | ||||
940 | |||||
941 | // Since we're changing the parameter to the callsite, we need to make sure | ||||
942 | // that what would be the new parameter dominates the callsite. | ||||
943 | if (!DT->dominates(cpyDest, C)) { | ||||
944 | // Support moving a constant index GEP before the call. | ||||
945 | auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest); | ||||
946 | if (GEP && GEP->hasAllConstantIndices() && | ||||
947 | DT->dominates(GEP->getPointerOperand(), C)) | ||||
948 | GEP->moveBefore(C); | ||||
949 | else | ||||
950 | return false; | ||||
951 | } | ||||
952 | |||||
953 | // In addition to knowing that the call does not access src in some | ||||
954 | // unexpected manner, for example via a global, which we deduce from | ||||
955 | // the use analysis, we also need to know that it does not sneakily | ||||
956 | // access dest. We rely on AA to figure this out for us. | ||||
957 | ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); | ||||
958 | // If necessary, perform additional analysis. | ||||
959 | if (isModOrRefSet(MR)) | ||||
960 | MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT); | ||||
961 | if (isModOrRefSet(MR)) | ||||
962 | return false; | ||||
963 | |||||
964 | // We can't create address space casts here because we don't know if they're | ||||
965 | // safe for the target. | ||||
966 | if (cpySrc->getType()->getPointerAddressSpace() != | ||||
967 | cpyDest->getType()->getPointerAddressSpace()) | ||||
968 | return false; | ||||
969 | for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) | ||||
970 | if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && | ||||
971 | cpySrc->getType()->getPointerAddressSpace() != | ||||
972 | C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) | ||||
973 | return false; | ||||
974 | |||||
975 | // All the checks have passed, so do the transformation. | ||||
976 | bool changedArgument = false; | ||||
977 | for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) | ||||
978 | if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { | ||||
979 | Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest | ||||
980 | : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), | ||||
981 | cpyDest->getName(), C); | ||||
982 | changedArgument = true; | ||||
983 | if (C->getArgOperand(ArgI)->getType() == Dest->getType()) | ||||
984 | C->setArgOperand(ArgI, Dest); | ||||
985 | else | ||||
986 | C->setArgOperand(ArgI, CastInst::CreatePointerCast( | ||||
987 | Dest, C->getArgOperand(ArgI)->getType(), | ||||
988 | Dest->getName(), C)); | ||||
989 | } | ||||
990 | |||||
991 | if (!changedArgument) | ||||
992 | return false; | ||||
993 | |||||
994 | // If the destination wasn't sufficiently aligned then increase its alignment. | ||||
995 | if (!isDestSufficientlyAligned) { | ||||
996 | assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!")(static_cast<void> (0)); | ||||
997 | cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); | ||||
998 | } | ||||
999 | |||||
1000 | // Update AA metadata | ||||
1001 | // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be | ||||
1002 | // handled here, but combineMetadata doesn't support them yet | ||||
1003 | unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, | ||||
1004 | LLVMContext::MD_noalias, | ||||
1005 | LLVMContext::MD_invariant_group, | ||||
1006 | LLVMContext::MD_access_group}; | ||||
1007 | combineMetadata(C, cpyLoad, KnownIDs, true); | ||||
1008 | |||||
1009 | ++NumCallSlot; | ||||
1010 | return true; | ||||
1011 | } | ||||
1012 | |||||
1013 | /// We've found that the (upward scanning) memory dependence of memcpy 'M' is | ||||
1014 | /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. | ||||
1015 | bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, | ||||
1016 | MemCpyInst *MDep) { | ||||
1017 | // We can only transforms memcpy's where the dest of one is the source of the | ||||
1018 | // other. | ||||
1019 | if (M->getSource() != MDep->getDest() || MDep->isVolatile()) | ||||
1020 | return false; | ||||
1021 | |||||
1022 | // If dep instruction is reading from our current input, then it is a noop | ||||
1023 | // transfer and substituting the input won't change this instruction. Just | ||||
1024 | // ignore the input and let someone else zap MDep. This handles cases like: | ||||
1025 | // memcpy(a <- a) | ||||
1026 | // memcpy(b <- a) | ||||
1027 | if (M->getSource() == MDep->getSource()) | ||||
1028 | return false; | ||||
1029 | |||||
1030 | // Second, the length of the memcpy's must be the same, or the preceding one | ||||
1031 | // must be larger than the following one. | ||||
1032 | if (MDep->getLength() != M->getLength()) { | ||||
1033 | ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); | ||||
1034 | ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); | ||||
1035 | if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) | ||||
1036 | return false; | ||||
1037 | } | ||||
1038 | |||||
1039 | // Verify that the copied-from memory doesn't change in between the two | ||||
1040 | // transfers. For example, in: | ||||
1041 | // memcpy(a <- b) | ||||
1042 | // *b = 42; | ||||
1043 | // memcpy(c <- a) | ||||
1044 | // It would be invalid to transform the second memcpy into memcpy(c <- b). | ||||
1045 | // | ||||
1046 | // TODO: If the code between M and MDep is transparent to the destination "c", | ||||
1047 | // then we could still perform the xform by moving M up to the first memcpy. | ||||
1048 | // TODO: It would be sufficient to check the MDep source up to the memcpy | ||||
1049 | // size of M, rather than MDep. | ||||
1050 | if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), | ||||
1051 | MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M))) | ||||
1052 | return false; | ||||
1053 | |||||
1054 | // If the dest of the second might alias the source of the first, then the | ||||
1055 | // source and dest might overlap. In addition, if the source of the first | ||||
1056 | // points to constant memory, they won't overlap by definition. Otherwise, we | ||||
1057 | // still want to eliminate the intermediate value, but we have to generate a | ||||
1058 | // memmove instead of memcpy. | ||||
1059 | bool UseMemMove = false; | ||||
1060 | if (isModSet(AA->getModRefInfo(M, MemoryLocation::getForSource(MDep)))) | ||||
1061 | UseMemMove = true; | ||||
1062 | |||||
1063 | // If all checks passed, then we can transform M. | ||||
1064 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"do { } while (false) | ||||
1065 | << *MDep << '\n' << *M << '\n')do { } while (false); | ||||
1066 | |||||
1067 | // TODO: Is this worth it if we're creating a less aligned memcpy? For | ||||
1068 | // example we could be moving from movaps -> movq on x86. | ||||
1069 | IRBuilder<> Builder(M); | ||||
1070 | Instruction *NewM; | ||||
1071 | if (UseMemMove) | ||||
1072 | NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), | ||||
1073 | MDep->getRawSource(), MDep->getSourceAlign(), | ||||
1074 | M->getLength(), M->isVolatile()); | ||||
1075 | else if (isa<MemCpyInlineInst>(M)) { | ||||
1076 | // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is | ||||
1077 | // never allowed since that would allow the latter to be lowered as a call | ||||
1078 | // to an external function. | ||||
1079 | NewM = Builder.CreateMemCpyInline( | ||||
1080 | M->getRawDest(), M->getDestAlign(), MDep->getRawSource(), | ||||
1081 | MDep->getSourceAlign(), M->getLength(), M->isVolatile()); | ||||
1082 | } else | ||||
1083 | NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), | ||||
1084 | MDep->getRawSource(), MDep->getSourceAlign(), | ||||
1085 | M->getLength(), M->isVolatile()); | ||||
1086 | |||||
1087 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)))(static_cast<void> (0)); | ||||
1088 | auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); | ||||
1089 | auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||
1090 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||
1091 | |||||
1092 | // Remove the instruction we're replacing. | ||||
1093 | eraseInstruction(M); | ||||
1094 | ++NumMemCpyInstr; | ||||
1095 | return true; | ||||
1096 | } | ||||
1097 | |||||
1098 | /// We've found that the (upward scanning) memory dependence of \p MemCpy is | ||||
1099 | /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that | ||||
1100 | /// weren't copied over by \p MemCpy. | ||||
1101 | /// | ||||
1102 | /// In other words, transform: | ||||
1103 | /// \code | ||||
1104 | /// memset(dst, c, dst_size); | ||||
1105 | /// memcpy(dst, src, src_size); | ||||
1106 | /// \endcode | ||||
1107 | /// into: | ||||
1108 | /// \code | ||||
1109 | /// memcpy(dst, src, src_size); | ||||
1110 | /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); | ||||
1111 | /// \endcode | ||||
1112 | bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, | ||||
1113 | MemSetInst *MemSet) { | ||||
1114 | // We can only transform memset/memcpy with the same destination. | ||||
1115 | if (!AA->isMustAlias(MemSet->getDest(), MemCpy->getDest())) | ||||
1116 | return false; | ||||
1117 | |||||
1118 | // Check that src and dst of the memcpy aren't the same. While memcpy | ||||
1119 | // operands cannot partially overlap, exact equality is allowed. | ||||
1120 | if (isModSet(AA->getModRefInfo(MemCpy, MemoryLocation::getForSource(MemCpy)))) | ||||
1121 | return false; | ||||
1122 | |||||
1123 | // We know that dst up to src_size is not written. We now need to make sure | ||||
1124 | // that dst up to dst_size is not accessed. (If we did not move the memset, | ||||
1125 | // checking for reads would be sufficient.) | ||||
1126 | if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet), | ||||
1127 | MSSA->getMemoryAccess(MemSet), | ||||
1128 | MSSA->getMemoryAccess(MemCpy))) | ||||
1129 | return false; | ||||
1130 | |||||
1131 | // Use the same i8* dest as the memcpy, killing the memset dest if different. | ||||
1132 | Value *Dest = MemCpy->getRawDest(); | ||||
1133 | Value *DestSize = MemSet->getLength(); | ||||
1134 | Value *SrcSize = MemCpy->getLength(); | ||||
1135 | |||||
1136 | if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy)) | ||||
1137 | return false; | ||||
1138 | |||||
1139 | // If the sizes are the same, simply drop the memset instead of generating | ||||
1140 | // a replacement with zero size. | ||||
1141 | if (DestSize == SrcSize) { | ||||
1142 | eraseInstruction(MemSet); | ||||
1143 | return true; | ||||
1144 | } | ||||
1145 | |||||
1146 | // By default, create an unaligned memset. | ||||
1147 | unsigned Align = 1; | ||||
1148 | // If Dest is aligned, and SrcSize is constant, use the minimum alignment | ||||
1149 | // of the sum. | ||||
1150 | const unsigned DestAlign = | ||||
1151 | std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); | ||||
1152 | if (DestAlign > 1) | ||||
1153 | if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) | ||||
1154 | Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); | ||||
1155 | |||||
1156 | IRBuilder<> Builder(MemCpy); | ||||
1157 | |||||
1158 | // If the sizes have different types, zext the smaller one. | ||||
1159 | if (DestSize->getType() != SrcSize->getType()) { | ||||
1160 | if (DestSize->getType()->getIntegerBitWidth() > | ||||
1161 | SrcSize->getType()->getIntegerBitWidth()) | ||||
1162 | SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); | ||||
1163 | else | ||||
1164 | DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); | ||||
1165 | } | ||||
1166 | |||||
1167 | Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); | ||||
1168 | Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); | ||||
1169 | Value *MemsetLen = Builder.CreateSelect( | ||||
1170 | Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); | ||||
1171 | unsigned DestAS = Dest->getType()->getPointerAddressSpace(); | ||||
1172 | Instruction *NewMemSet = Builder.CreateMemSet( | ||||
1173 | Builder.CreateGEP(Builder.getInt8Ty(), | ||||
1174 | Builder.CreatePointerCast(Dest, | ||||
1175 | Builder.getInt8PtrTy(DestAS)), | ||||
1176 | SrcSize), | ||||
1177 | MemSet->getOperand(1), MemsetLen, MaybeAlign(Align)); | ||||
1178 | |||||
1179 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) &&(static_cast<void> (0)) | ||||
1180 | "MemCpy must be a MemoryDef")(static_cast<void> (0)); | ||||
1181 | // The new memset is inserted after the memcpy, but it is known that its | ||||
1182 | // defining access is the memset about to be removed which immediately | ||||
1183 | // precedes the memcpy. | ||||
1184 | auto *LastDef = | ||||
1185 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); | ||||
1186 | auto *NewAccess = MSSAU->createMemoryAccessBefore( | ||||
1187 | NewMemSet, LastDef->getDefiningAccess(), LastDef); | ||||
1188 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||
1189 | |||||
1190 | eraseInstruction(MemSet); | ||||
1191 | return true; | ||||
1192 | } | ||||
1193 | |||||
1194 | /// Determine whether the instruction has undefined content for the given Size, | ||||
1195 | /// either because it was freshly alloca'd or started its lifetime. | ||||
1196 | static bool hasUndefContents(MemorySSA *MSSA, AliasAnalysis *AA, Value *V, | ||||
1197 | MemoryDef *Def, Value *Size) { | ||||
1198 | if (MSSA->isLiveOnEntryDef(Def)) | ||||
1199 | return isa<AllocaInst>(getUnderlyingObject(V)); | ||||
1200 | |||||
1201 | if (IntrinsicInst *II = | ||||
1202 | dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) { | ||||
1203 | if (II->getIntrinsicID() == Intrinsic::lifetime_start) { | ||||
1204 | ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0)); | ||||
1205 | |||||
1206 | if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) { | ||||
1207 | if (AA->isMustAlias(V, II->getArgOperand(1)) && | ||||
1208 | LTSize->getZExtValue() >= CSize->getZExtValue()) | ||||
1209 | return true; | ||||
1210 | } | ||||
1211 | |||||
1212 | // If the lifetime.start covers a whole alloca (as it almost always | ||||
1213 | // does) and we're querying a pointer based on that alloca, then we know | ||||
1214 | // the memory is definitely undef, regardless of how exactly we alias. | ||||
1215 | // The size also doesn't matter, as an out-of-bounds access would be UB. | ||||
1216 | AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V)); | ||||
1217 | if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) { | ||||
1218 | const DataLayout &DL = Alloca->getModule()->getDataLayout(); | ||||
1219 | if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL)) | ||||
1220 | if (*AllocaSize == LTSize->getValue() * 8) | ||||
1221 | return true; | ||||
1222 | } | ||||
1223 | } | ||||
1224 | } | ||||
1225 | |||||
1226 | return false; | ||||
1227 | } | ||||
1228 | |||||
1229 | /// Transform memcpy to memset when its source was just memset. | ||||
1230 | /// In other words, turn: | ||||
1231 | /// \code | ||||
1232 | /// memset(dst1, c, dst1_size); | ||||
1233 | /// memcpy(dst2, dst1, dst2_size); | ||||
1234 | /// \endcode | ||||
1235 | /// into: | ||||
1236 | /// \code | ||||
1237 | /// memset(dst1, c, dst1_size); | ||||
1238 | /// memset(dst2, c, dst2_size); | ||||
1239 | /// \endcode | ||||
1240 | /// When dst2_size <= dst1_size. | ||||
1241 | bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, | ||||
1242 | MemSetInst *MemSet) { | ||||
1243 | // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and | ||||
1244 | // memcpying from the same address. Otherwise it is hard to reason about. | ||||
1245 | if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) | ||||
1246 | return false; | ||||
1247 | |||||
1248 | Value *MemSetSize = MemSet->getLength(); | ||||
1249 | Value *CopySize = MemCpy->getLength(); | ||||
1250 | |||||
1251 | if (MemSetSize != CopySize) { | ||||
1252 | // Make sure the memcpy doesn't read any more than what the memset wrote. | ||||
1253 | // Don't worry about sizes larger than i64. | ||||
1254 | |||||
1255 | // A known memset size is required. | ||||
1256 | ConstantInt *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize); | ||||
1257 | if (!CMemSetSize) | ||||
1258 | return false; | ||||
1259 | |||||
1260 | // A known memcpy size is also required. | ||||
1261 | ConstantInt *CCopySize = dyn_cast<ConstantInt>(CopySize); | ||||
1262 | if (!CCopySize) | ||||
1263 | return false; | ||||
1264 | if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) { | ||||
1265 | // If the memcpy is larger than the memset, but the memory was undef prior | ||||
1266 | // to the memset, we can just ignore the tail. Technically we're only | ||||
1267 | // interested in the bytes from MemSetSize..CopySize here, but as we can't | ||||
1268 | // easily represent this location, we use the full 0..CopySize range. | ||||
1269 | MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); | ||||
1270 | bool CanReduceSize = false; | ||||
1271 | MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet); | ||||
1272 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||
1273 | MemSetAccess->getDefiningAccess(), MemCpyLoc); | ||||
1274 | if (auto *MD = dyn_cast<MemoryDef>(Clobber)) | ||||
1275 | if (hasUndefContents(MSSA, AA, MemCpy->getSource(), MD, CopySize)) | ||||
1276 | CanReduceSize = true; | ||||
1277 | |||||
1278 | if (!CanReduceSize) | ||||
1279 | return false; | ||||
1280 | CopySize = MemSetSize; | ||||
1281 | } | ||||
1282 | } | ||||
1283 | |||||
1284 | IRBuilder<> Builder(MemCpy); | ||||
1285 | Instruction *NewM = | ||||
1286 | Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), | ||||
1287 | CopySize, MaybeAlign(MemCpy->getDestAlignment())); | ||||
1288 | auto *LastDef = | ||||
1289 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); | ||||
1290 | auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||
1291 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||
1292 | |||||
1293 | return true; | ||||
1294 | } | ||||
1295 | |||||
1296 | /// Perform simplification of memcpy's. If we have memcpy A | ||||
1297 | /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite | ||||
1298 | /// B to be a memcpy from X to Z (or potentially a memmove, depending on | ||||
1299 | /// circumstances). This allows later passes to remove the first memcpy | ||||
1300 | /// altogether. | ||||
1301 | bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { | ||||
1302 | // We can only optimize non-volatile memcpy's. | ||||
1303 | if (M->isVolatile()) return false; | ||||
1304 | |||||
1305 | // If the source and destination of the memcpy are the same, then zap it. | ||||
1306 | if (M->getSource() == M->getDest()) { | ||||
1307 | ++BBI; | ||||
1308 | eraseInstruction(M); | ||||
1309 | return true; | ||||
1310 | } | ||||
1311 | |||||
1312 | // If copying from a constant, try to turn the memcpy into a memset. | ||||
1313 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) | ||||
1314 | if (GV->isConstant() && GV->hasDefinitiveInitializer()) | ||||
1315 | if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), | ||||
1316 | M->getModule()->getDataLayout())) { | ||||
1317 | IRBuilder<> Builder(M); | ||||
1318 | Instruction *NewM = | ||||
1319 | Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), | ||||
1320 | MaybeAlign(M->getDestAlignment()), false); | ||||
1321 | auto *LastDef = | ||||
1322 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); | ||||
1323 | auto *NewAccess = | ||||
1324 | MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||
1325 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||
1326 | |||||
1327 | eraseInstruction(M); | ||||
1328 | ++NumCpyToSet; | ||||
1329 | return true; | ||||
1330 | } | ||||
1331 | |||||
1332 | MemoryUseOrDef *MA = MSSA->getMemoryAccess(M); | ||||
1333 | MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA); | ||||
1334 | MemoryLocation DestLoc = MemoryLocation::getForDest(M); | ||||
1335 | const MemoryAccess *DestClobber = | ||||
1336 | MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc); | ||||
1337 | |||||
1338 | // Try to turn a partially redundant memset + memcpy into | ||||
1339 | // memcpy + smaller memset. We don't need the memcpy size for this. | ||||
1340 | // The memcpy most post-dom the memset, so limit this to the same basic | ||||
1341 | // block. A non-local generalization is likely not worthwhile. | ||||
1342 | if (auto *MD = dyn_cast<MemoryDef>(DestClobber)) | ||||
1343 | if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst())) | ||||
1344 | if (DestClobber->getBlock() == M->getParent()) | ||||
1345 | if (processMemSetMemCpyDependence(M, MDep)) | ||||
1346 | return true; | ||||
1347 | |||||
1348 | MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||
1349 | AnyClobber, MemoryLocation::getForSource(M)); | ||||
1350 | |||||
1351 | // There are four possible optimizations we can do for memcpy: | ||||
1352 | // a) memcpy-memcpy xform which exposes redundance for DSE. | ||||
1353 | // b) call-memcpy xform for return slot optimization. | ||||
1354 | // c) memcpy from freshly alloca'd space or space that has just started | ||||
1355 | // its lifetime copies undefined data, and we can therefore eliminate | ||||
1356 | // the memcpy in favor of the data that was already at the destination. | ||||
1357 | // d) memcpy from a just-memset'd source can be turned into memset. | ||||
1358 | if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) { | ||||
1359 | if (Instruction *MI = MD->getMemoryInst()) { | ||||
1360 | if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) { | ||||
1361 | if (auto *C = dyn_cast<CallInst>(MI)) { | ||||
1362 | // The memcpy must post-dom the call. Limit to the same block for | ||||
1363 | // now. Additionally, we need to ensure that there are no accesses | ||||
1364 | // to dest between the call and the memcpy. Accesses to src will be | ||||
1365 | // checked by performCallSlotOptzn(). | ||||
1366 | // TODO: Support non-local call-slot optimization? | ||||
1367 | if (C->getParent() == M->getParent() && | ||||
1368 | !accessedBetween(*AA, DestLoc, MD, MA)) { | ||||
1369 | // FIXME: Can we pass in either of dest/src alignment here instead | ||||
1370 | // of conservatively taking the minimum? | ||||
1371 | Align Alignment = std::min(M->getDestAlign().valueOrOne(), | ||||
1372 | M->getSourceAlign().valueOrOne()); | ||||
1373 | if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), | ||||
1374 | CopySize->getZExtValue(), Alignment, C)) { | ||||
1375 | LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n"do { } while (false) | ||||
1376 | << " call: " << *C << "\n"do { } while (false) | ||||
1377 | << " memcpy: " << *M << "\n")do { } while (false); | ||||
1378 | eraseInstruction(M); | ||||
1379 | ++NumMemCpyInstr; | ||||
1380 | return true; | ||||
1381 | } | ||||
1382 | } | ||||
1383 | } | ||||
1384 | } | ||||
1385 | if (auto *MDep = dyn_cast<MemCpyInst>(MI)) | ||||
1386 | return processMemCpyMemCpyDependence(M, MDep); | ||||
1387 | if (auto *MDep = dyn_cast<MemSetInst>(MI)) { | ||||
1388 | if (performMemCpyToMemSetOptzn(M, MDep)) { | ||||
1389 | LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n")do { } while (false); | ||||
1390 | eraseInstruction(M); | ||||
1391 | ++NumCpyToSet; | ||||
1392 | return true; | ||||
1393 | } | ||||
1394 | } | ||||
1395 | } | ||||
1396 | |||||
1397 | if (hasUndefContents(MSSA, AA, M->getSource(), MD, M->getLength())) { | ||||
1398 | LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n")do { } while (false); | ||||
1399 | eraseInstruction(M); | ||||
1400 | ++NumMemCpyInstr; | ||||
1401 | return true; | ||||
1402 | } | ||||
1403 | } | ||||
1404 | |||||
1405 | return false; | ||||
1406 | } | ||||
1407 | |||||
1408 | /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed | ||||
1409 | /// not to alias. | ||||
1410 | bool MemCpyOptPass::processMemMove(MemMoveInst *M) { | ||||
1411 | // See if the source could be modified by this memmove potentially. | ||||
1412 | if (isModSet(AA->getModRefInfo(M, MemoryLocation::getForSource(M)))) | ||||
1413 | return false; | ||||
1414 | |||||
1415 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *Mdo { } while (false) | ||||
1416 | << "\n")do { } while (false); | ||||
1417 | |||||
1418 | // If not, then we know we can transform this. | ||||
1419 | Type *ArgTys[3] = { M->getRawDest()->getType(), | ||||
1420 | M->getRawSource()->getType(), | ||||
1421 | M->getLength()->getType() }; | ||||
1422 | M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), | ||||
1423 | Intrinsic::memcpy, ArgTys)); | ||||
1424 | |||||
1425 | // For MemorySSA nothing really changes (except that memcpy may imply stricter | ||||
1426 | // aliasing guarantees). | ||||
1427 | |||||
1428 | ++NumMoveToCpy; | ||||
1429 | return true; | ||||
1430 | } | ||||
1431 | |||||
1432 | /// This is called on every byval argument in call sites. | ||||
1433 | bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { | ||||
1434 | const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); | ||||
1435 | // Find out what feeds this byval argument. | ||||
1436 | Value *ByValArg = CB.getArgOperand(ArgNo); | ||||
1437 | Type *ByValTy = CB.getParamByValType(ArgNo); | ||||
1438 | uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); | ||||
1439 | MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize)); | ||||
1440 | MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB); | ||||
1441 | if (!CallAccess) | ||||
1442 | return false; | ||||
1443 | MemCpyInst *MDep = nullptr; | ||||
1444 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||
1445 | CallAccess->getDefiningAccess(), Loc); | ||||
1446 | if (auto *MD = dyn_cast<MemoryDef>(Clobber)) | ||||
1447 | MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst()); | ||||
1448 | |||||
1449 | // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by | ||||
1450 | // a memcpy, see if we can byval from the source of the memcpy instead of the | ||||
1451 | // result. | ||||
1452 | if (!MDep || MDep->isVolatile() || | ||||
1453 | ByValArg->stripPointerCasts() != MDep->getDest()) | ||||
1454 | return false; | ||||
1455 | |||||
1456 | // The length of the memcpy must be larger or equal to the size of the byval. | ||||
1457 | ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); | ||||
1458 | if (!C1 || C1->getValue().getZExtValue() < ByValSize) | ||||
1459 | return false; | ||||
1460 | |||||
1461 | // Get the alignment of the byval. If the call doesn't specify the alignment, | ||||
1462 | // then it is some target specific value that we can't know. | ||||
1463 | MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); | ||||
1464 | if (!ByValAlign) return false; | ||||
1465 | |||||
1466 | // If it is greater than the memcpy, then we check to see if we can force the | ||||
1467 | // source of the memcpy to the alignment we need. If we fail, we bail out. | ||||
1468 | MaybeAlign MemDepAlign = MDep->getSourceAlign(); | ||||
1469 | if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && | ||||
1470 | getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC, | ||||
1471 | DT) < *ByValAlign) | ||||
1472 | return false; | ||||
1473 | |||||
1474 | // The address space of the memcpy source must match the byval argument | ||||
1475 | if (MDep->getSource()->getType()->getPointerAddressSpace() != | ||||
1476 | ByValArg->getType()->getPointerAddressSpace()) | ||||
1477 | return false; | ||||
1478 | |||||
1479 | // Verify that the copied-from memory doesn't change in between the memcpy and | ||||
1480 | // the byval call. | ||||
1481 | // memcpy(a <- b) | ||||
1482 | // *b = 42; | ||||
1483 | // foo(*a) | ||||
1484 | // It would be invalid to transform the second memcpy into foo(*b). | ||||
1485 | if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), | ||||
1486 | MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB))) | ||||
1487 | return false; | ||||
1488 | |||||
1489 | Value *TmpCast = MDep->getSource(); | ||||
1490 | if (MDep->getSource()->getType() != ByValArg->getType()) { | ||||
1491 | BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), | ||||
1492 | "tmpcast", &CB); | ||||
1493 | // Set the tmpcast's DebugLoc to MDep's | ||||
1494 | TmpBitCast->setDebugLoc(MDep->getDebugLoc()); | ||||
1495 | TmpCast = TmpBitCast; | ||||
1496 | } | ||||
1497 | |||||
1498 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"do { } while (false) | ||||
1499 | << " " << *MDep << "\n"do { } while (false) | ||||
1500 | << " " << CB << "\n")do { } while (false); | ||||
1501 | |||||
1502 | // Otherwise we're good! Update the byval argument. | ||||
1503 | CB.setArgOperand(ArgNo, TmpCast); | ||||
1504 | ++NumMemCpyInstr; | ||||
1505 | return true; | ||||
1506 | } | ||||
1507 | |||||
1508 | /// Executes one iteration of MemCpyOptPass. | ||||
1509 | bool MemCpyOptPass::iterateOnFunction(Function &F) { | ||||
1510 | bool MadeChange = false; | ||||
1511 | |||||
1512 | // Walk all instruction in the function. | ||||
1513 | for (BasicBlock &BB : F) { | ||||
1514 | // Skip unreachable blocks. For example processStore assumes that an | ||||
1515 | // instruction in a BB can't be dominated by a later instruction in the | ||||
1516 | // same BB (which is a scenario that can happen for an unreachable BB that | ||||
1517 | // has itself as a predecessor). | ||||
1518 | if (!DT->isReachableFromEntry(&BB)) | ||||
1519 | continue; | ||||
1520 | |||||
1521 | for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { | ||||
1522 | // Avoid invalidating the iterator. | ||||
1523 | Instruction *I = &*BI++; | ||||
1524 | |||||
1525 | bool RepeatInstruction = false; | ||||
1526 | |||||
1527 | if (StoreInst *SI
| ||||
1528 | MadeChange |= processStore(SI, BI); | ||||
1529 | else if (MemSetInst *M
| ||||
1530 | RepeatInstruction = processMemSet(M, BI); | ||||
1531 | else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) | ||||
1532 | RepeatInstruction = processMemCpy(M, BI); | ||||
1533 | else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) | ||||
1534 | RepeatInstruction = processMemMove(M); | ||||
1535 | else if (auto *CB = dyn_cast<CallBase>(I)) { | ||||
1536 | for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) | ||||
1537 | if (CB->isByValArgument(i)) | ||||
1538 | MadeChange |= processByValArgument(*CB, i); | ||||
1539 | } | ||||
1540 | |||||
1541 | // Reprocess the instruction if desired. | ||||
1542 | if (RepeatInstruction) { | ||||
1543 | if (BI != BB.begin()) | ||||
1544 | --BI; | ||||
1545 | MadeChange = true; | ||||
1546 | } | ||||
1547 | } | ||||
1548 | } | ||||
1549 | |||||
1550 | return MadeChange; | ||||
1551 | } | ||||
1552 | |||||
1553 | PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { | ||||
1554 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); | ||||
1555 | auto *AA = &AM.getResult<AAManager>(F); | ||||
1556 | auto *AC = &AM.getResult<AssumptionAnalysis>(F); | ||||
1557 | auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); | ||||
1558 | auto *MSSA = &AM.getResult<MemorySSAAnalysis>(F); | ||||
1559 | |||||
1560 | bool MadeChange = runImpl(F, &TLI, AA, AC, DT, &MSSA->getMSSA()); | ||||
| |||||
1561 | if (!MadeChange) | ||||
1562 | return PreservedAnalyses::all(); | ||||
1563 | |||||
1564 | PreservedAnalyses PA; | ||||
1565 | PA.preserveSet<CFGAnalyses>(); | ||||
1566 | PA.preserve<MemorySSAAnalysis>(); | ||||
1567 | return PA; | ||||
1568 | } | ||||
1569 | |||||
1570 | bool MemCpyOptPass::runImpl(Function &F, TargetLibraryInfo *TLI_, | ||||
1571 | AliasAnalysis *AA_, AssumptionCache *AC_, | ||||
1572 | DominatorTree *DT_, MemorySSA *MSSA_) { | ||||
1573 | bool MadeChange = false; | ||||
1574 | TLI = TLI_; | ||||
1575 | AA = AA_; | ||||
1576 | AC = AC_; | ||||
1577 | DT = DT_; | ||||
1578 | MSSA = MSSA_; | ||||
1579 | MemorySSAUpdater MSSAU_(MSSA_); | ||||
1580 | MSSAU = &MSSAU_; | ||||
1581 | |||||
1582 | while (true) { | ||||
1583 | if (!iterateOnFunction(F)) | ||||
1584 | break; | ||||
1585 | MadeChange = true; | ||||
1586 | } | ||||
1587 | |||||
1588 | if (VerifyMemorySSA) | ||||
1589 | MSSA_->verifyMemorySSA(); | ||||
1590 | |||||
1591 | return MadeChange; | ||||
1592 | } | ||||
1593 | |||||
1594 | /// This is the main transformation entry point for a function. | ||||
1595 | bool MemCpyOptLegacyPass::runOnFunction(Function &F) { | ||||
1596 | if (skipFunction(F)) | ||||
1597 | return false; | ||||
1598 | |||||
1599 | auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | ||||
1600 | auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | ||||
1601 | auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | ||||
1602 | auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | ||||
1603 | auto *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA(); | ||||
1604 | |||||
1605 | return Impl.runImpl(F, TLI, AA, AC, DT, MSSA); | ||||
1606 | } |
1 | //===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the SmallVector class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_ADT_SMALLVECTOR_H |
14 | #define LLVM_ADT_SMALLVECTOR_H |
15 | |
16 | #include "llvm/ADT/iterator_range.h" |
17 | #include "llvm/Support/Compiler.h" |
18 | #include "llvm/Support/ErrorHandling.h" |
19 | #include "llvm/Support/MemAlloc.h" |
20 | #include "llvm/Support/type_traits.h" |
21 | #include <algorithm> |
22 | #include <cassert> |
23 | #include <cstddef> |
24 | #include <cstdlib> |
25 | #include <cstring> |
26 | #include <functional> |
27 | #include <initializer_list> |
28 | #include <iterator> |
29 | #include <limits> |
30 | #include <memory> |
31 | #include <new> |
32 | #include <type_traits> |
33 | #include <utility> |
34 | |
35 | namespace llvm { |
36 | |
37 | /// This is all the stuff common to all SmallVectors. |
38 | /// |
39 | /// The template parameter specifies the type which should be used to hold the |
40 | /// Size and Capacity of the SmallVector, so it can be adjusted. |
41 | /// Using 32 bit size is desirable to shrink the size of the SmallVector. |
42 | /// Using 64 bit size is desirable for cases like SmallVector<char>, where a |
43 | /// 32 bit size would limit the vector to ~4GB. SmallVectors are used for |
44 | /// buffering bitcode output - which can exceed 4GB. |
45 | template <class Size_T> class SmallVectorBase { |
46 | protected: |
47 | void *BeginX; |
48 | Size_T Size = 0, Capacity; |
49 | |
50 | /// The maximum value of the Size_T used. |
51 | static constexpr size_t SizeTypeMax() { |
52 | return std::numeric_limits<Size_T>::max(); |
53 | } |
54 | |
55 | SmallVectorBase() = delete; |
56 | SmallVectorBase(void *FirstEl, size_t TotalCapacity) |
57 | : BeginX(FirstEl), Capacity(TotalCapacity) {} |
58 | |
59 | /// This is a helper for \a grow() that's out of line to reduce code |
60 | /// duplication. This function will report a fatal error if it can't grow at |
61 | /// least to \p MinSize. |
62 | void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity); |
63 | |
64 | /// This is an implementation of the grow() method which only works |
65 | /// on POD-like data types and is out of line to reduce code duplication. |
66 | /// This function will report a fatal error if it cannot increase capacity. |
67 | void grow_pod(void *FirstEl, size_t MinSize, size_t TSize); |
68 | |
69 | public: |
70 | size_t size() const { return Size; } |
71 | size_t capacity() const { return Capacity; } |
72 | |
73 | LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; } |
74 | |
75 | /// Set the array size to \p N, which the current array must have enough |
76 | /// capacity for. |
77 | /// |
78 | /// This does not construct or destroy any elements in the vector. |
79 | /// |
80 | /// Clients can use this in conjunction with capacity() to write past the end |
81 | /// of the buffer when they know that more elements are available, and only |
82 | /// update the size later. This avoids the cost of value initializing elements |
83 | /// which will only be overwritten. |
84 | void set_size(size_t N) { |
85 | assert(N <= capacity())(static_cast<void> (0)); |
86 | Size = N; |
87 | } |
88 | }; |
89 | |
90 | template <class T> |
91 | using SmallVectorSizeType = |
92 | typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t, |
93 | uint32_t>::type; |
94 | |
95 | /// Figure out the offset of the first element. |
96 | template <class T, typename = void> struct SmallVectorAlignmentAndSize { |
97 | alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof( |
98 | SmallVectorBase<SmallVectorSizeType<T>>)]; |
99 | alignas(T) char FirstEl[sizeof(T)]; |
100 | }; |
101 | |
102 | /// This is the part of SmallVectorTemplateBase which does not depend on whether |
103 | /// the type T is a POD. The extra dummy template argument is used by ArrayRef |
104 | /// to avoid unnecessarily requiring T to be complete. |
105 | template <typename T, typename = void> |
106 | class SmallVectorTemplateCommon |
107 | : public SmallVectorBase<SmallVectorSizeType<T>> { |
108 | using Base = SmallVectorBase<SmallVectorSizeType<T>>; |
109 | |
110 | /// Find the address of the first element. For this pointer math to be valid |
111 | /// with small-size of 0 for T with lots of alignment, it's important that |
112 | /// SmallVectorStorage is properly-aligned even for small-size of 0. |
113 | void *getFirstEl() const { |
114 | return const_cast<void *>(reinterpret_cast<const void *>( |
115 | reinterpret_cast<const char *>(this) + |
116 | offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl ))); |
117 | } |
118 | // Space after 'FirstEl' is clobbered, do not add any instance vars after it. |
119 | |
120 | protected: |
121 | SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {} |
122 | |
123 | void grow_pod(size_t MinSize, size_t TSize) { |
124 | Base::grow_pod(getFirstEl(), MinSize, TSize); |
125 | } |
126 | |
127 | /// Return true if this is a smallvector which has not had dynamic |
128 | /// memory allocated for it. |
129 | bool isSmall() const { return this->BeginX == getFirstEl(); } |
130 | |
131 | /// Put this vector in a state of being small. |
132 | void resetToSmall() { |
133 | this->BeginX = getFirstEl(); |
134 | this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect. |
135 | } |
136 | |
137 | /// Return true if V is an internal reference to the given range. |
138 | bool isReferenceToRange(const void *V, const void *First, const void *Last) const { |
139 | // Use std::less to avoid UB. |
140 | std::less<> LessThan; |
141 | return !LessThan(V, First) && LessThan(V, Last); |
142 | } |
143 | |
144 | /// Return true if V is an internal reference to this vector. |
145 | bool isReferenceToStorage(const void *V) const { |
146 | return isReferenceToRange(V, this->begin(), this->end()); |
147 | } |
148 | |
149 | /// Return true if First and Last form a valid (possibly empty) range in this |
150 | /// vector's storage. |
151 | bool isRangeInStorage(const void *First, const void *Last) const { |
152 | // Use std::less to avoid UB. |
153 | std::less<> LessThan; |
154 | return !LessThan(First, this->begin()) && !LessThan(Last, First) && |
155 | !LessThan(this->end(), Last); |
156 | } |
157 | |
158 | /// Return true unless Elt will be invalidated by resizing the vector to |
159 | /// NewSize. |
160 | bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) { |
161 | // Past the end. |
162 | if (LLVM_LIKELY(!isReferenceToStorage(Elt))__builtin_expect((bool)(!isReferenceToStorage(Elt)), true)) |
163 | return true; |
164 | |
165 | // Return false if Elt will be destroyed by shrinking. |
166 | if (NewSize <= this->size()) |
167 | return Elt < this->begin() + NewSize; |
168 | |
169 | // Return false if we need to grow. |
170 | return NewSize <= this->capacity(); |
171 | } |
172 | |
173 | /// Check whether Elt will be invalidated by resizing the vector to NewSize. |
174 | void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) { |
175 | assert(isSafeToReferenceAfterResize(Elt, NewSize) &&(static_cast<void> (0)) |
176 | "Attempting to reference an element of the vector in an operation "(static_cast<void> (0)) |
177 | "that invalidates it")(static_cast<void> (0)); |
178 | } |
179 | |
180 | /// Check whether Elt will be invalidated by increasing the size of the |
181 | /// vector by N. |
182 | void assertSafeToAdd(const void *Elt, size_t N = 1) { |
183 | this->assertSafeToReferenceAfterResize(Elt, this->size() + N); |
184 | } |
185 | |
186 | /// Check whether any part of the range will be invalidated by clearing. |
187 | void assertSafeToReferenceAfterClear(const T *From, const T *To) { |
188 | if (From == To) |
189 | return; |
190 | this->assertSafeToReferenceAfterResize(From, 0); |
191 | this->assertSafeToReferenceAfterResize(To - 1, 0); |
192 | } |
193 | template < |
194 | class ItTy, |
195 | std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value, |
196 | bool> = false> |
197 | void assertSafeToReferenceAfterClear(ItTy, ItTy) {} |
198 | |
199 | /// Check whether any part of the range will be invalidated by growing. |
200 | void assertSafeToAddRange(const T *From, const T *To) { |
201 | if (From == To) |
202 | return; |
203 | this->assertSafeToAdd(From, To - From); |
204 | this->assertSafeToAdd(To - 1, To - From); |
205 | } |
206 | template < |
207 | class ItTy, |
208 | std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value, |
209 | bool> = false> |
210 | void assertSafeToAddRange(ItTy, ItTy) {} |
211 | |
212 | /// Reserve enough space to add one element, and return the updated element |
213 | /// pointer in case it was a reference to the storage. |
214 | template <class U> |
215 | static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt, |
216 | size_t N) { |
217 | size_t NewSize = This->size() + N; |
218 | if (LLVM_LIKELY(NewSize <= This->capacity())__builtin_expect((bool)(NewSize <= This->capacity()), true )) |
219 | return &Elt; |
220 | |
221 | bool ReferencesStorage = false; |
222 | int64_t Index = -1; |
223 | if (!U::TakesParamByValue) { |
224 | if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))__builtin_expect((bool)(This->isReferenceToStorage(&Elt )), false)) { |
225 | ReferencesStorage = true; |
226 | Index = &Elt - This->begin(); |
227 | } |
228 | } |
229 | This->grow(NewSize); |
230 | return ReferencesStorage ? This->begin() + Index : &Elt; |
231 | } |
232 | |
233 | public: |
234 | using size_type = size_t; |
235 | using difference_type = ptrdiff_t; |
236 | using value_type = T; |
237 | using iterator = T *; |
238 | using const_iterator = const T *; |
239 | |
240 | using const_reverse_iterator = std::reverse_iterator<const_iterator>; |
241 | using reverse_iterator = std::reverse_iterator<iterator>; |
242 | |
243 | using reference = T &; |
244 | using const_reference = const T &; |
245 | using pointer = T *; |
246 | using const_pointer = const T *; |
247 | |
248 | using Base::capacity; |
249 | using Base::empty; |
250 | using Base::size; |
251 | |
252 | // forward iterator creation methods. |
253 | iterator begin() { return (iterator)this->BeginX; } |
254 | const_iterator begin() const { return (const_iterator)this->BeginX; } |
255 | iterator end() { return begin() + size(); } |
256 | const_iterator end() const { return begin() + size(); } |
257 | |
258 | // reverse iterator creation methods. |
259 | reverse_iterator rbegin() { return reverse_iterator(end()); } |
260 | const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); } |
261 | reverse_iterator rend() { return reverse_iterator(begin()); } |
262 | const_reverse_iterator rend() const { return const_reverse_iterator(begin());} |
263 | |
264 | size_type size_in_bytes() const { return size() * sizeof(T); } |
265 | size_type max_size() const { |
266 | return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T)); |
267 | } |
268 | |
269 | size_t capacity_in_bytes() const { return capacity() * sizeof(T); } |
270 | |
271 | /// Return a pointer to the vector's buffer, even if empty(). |
272 | pointer data() { return pointer(begin()); } |
273 | /// Return a pointer to the vector's buffer, even if empty(). |
274 | const_pointer data() const { return const_pointer(begin()); } |
275 | |
276 | reference operator[](size_type idx) { |
277 | assert(idx < size())(static_cast<void> (0)); |
278 | return begin()[idx]; |
279 | } |
280 | const_reference operator[](size_type idx) const { |
281 | assert(idx < size())(static_cast<void> (0)); |
282 | return begin()[idx]; |
283 | } |
284 | |
285 | reference front() { |
286 | assert(!empty())(static_cast<void> (0)); |
287 | return begin()[0]; |
288 | } |
289 | const_reference front() const { |
290 | assert(!empty())(static_cast<void> (0)); |
291 | return begin()[0]; |
292 | } |
293 | |
294 | reference back() { |
295 | assert(!empty())(static_cast<void> (0)); |
296 | return end()[-1]; |
297 | } |
298 | const_reference back() const { |
299 | assert(!empty())(static_cast<void> (0)); |
300 | return end()[-1]; |
301 | } |
302 | }; |
303 | |
304 | /// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put |
305 | /// method implementations that are designed to work with non-trivial T's. |
306 | /// |
307 | /// We approximate is_trivially_copyable with trivial move/copy construction and |
308 | /// trivial destruction. While the standard doesn't specify that you're allowed |
309 | /// copy these types with memcpy, there is no way for the type to observe this. |
310 | /// This catches the important case of std::pair<POD, POD>, which is not |
311 | /// trivially assignable. |
312 | template <typename T, bool = (is_trivially_copy_constructible<T>::value) && |
313 | (is_trivially_move_constructible<T>::value) && |
314 | std::is_trivially_destructible<T>::value> |
315 | class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> { |
316 | friend class SmallVectorTemplateCommon<T>; |
317 | |
318 | protected: |
319 | static constexpr bool TakesParamByValue = false; |
320 | using ValueParamT = const T &; |
321 | |
322 | SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} |
323 | |
324 | static void destroy_range(T *S, T *E) { |
325 | while (S != E) { |
326 | --E; |
327 | E->~T(); |
328 | } |
329 | } |
330 | |
331 | /// Move the range [I, E) into the uninitialized memory starting with "Dest", |
332 | /// constructing elements as needed. |
333 | template<typename It1, typename It2> |
334 | static void uninitialized_move(It1 I, It1 E, It2 Dest) { |
335 | std::uninitialized_copy(std::make_move_iterator(I), |
336 | std::make_move_iterator(E), Dest); |
337 | } |
338 | |
339 | /// Copy the range [I, E) onto the uninitialized memory starting with "Dest", |
340 | /// constructing elements as needed. |
341 | template<typename It1, typename It2> |
342 | static void uninitialized_copy(It1 I, It1 E, It2 Dest) { |
343 | std::uninitialized_copy(I, E, Dest); |
344 | } |
345 | |
346 | /// Grow the allocated memory (without initializing new elements), doubling |
347 | /// the size of the allocated memory. Guarantees space for at least one more |
348 | /// element, or MinSize more elements if specified. |
349 | void grow(size_t MinSize = 0); |
350 | |
351 | /// Create a new allocation big enough for \p MinSize and pass back its size |
352 | /// in \p NewCapacity. This is the first section of \a grow(). |
353 | T *mallocForGrow(size_t MinSize, size_t &NewCapacity) { |
354 | return static_cast<T *>( |
355 | SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow( |
356 | MinSize, sizeof(T), NewCapacity)); |
357 | } |
358 | |
359 | /// Move existing elements over to the new allocation \p NewElts, the middle |
360 | /// section of \a grow(). |
361 | void moveElementsForGrow(T *NewElts); |
362 | |
363 | /// Transfer ownership of the allocation, finishing up \a grow(). |
364 | void takeAllocationForGrow(T *NewElts, size_t NewCapacity); |
365 | |
366 | /// Reserve enough space to add one element, and return the updated element |
367 | /// pointer in case it was a reference to the storage. |
368 | const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) { |
369 | return this->reserveForParamAndGetAddressImpl(this, Elt, N); |
370 | } |
371 | |
372 | /// Reserve enough space to add one element, and return the updated element |
373 | /// pointer in case it was a reference to the storage. |
374 | T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) { |
375 | return const_cast<T *>( |
376 | this->reserveForParamAndGetAddressImpl(this, Elt, N)); |
377 | } |
378 | |
379 | static T &&forward_value_param(T &&V) { return std::move(V); } |
380 | static const T &forward_value_param(const T &V) { return V; } |
381 | |
382 | void growAndAssign(size_t NumElts, const T &Elt) { |
383 | // Grow manually in case Elt is an internal reference. |
384 | size_t NewCapacity; |
385 | T *NewElts = mallocForGrow(NumElts, NewCapacity); |
386 | std::uninitialized_fill_n(NewElts, NumElts, Elt); |
387 | this->destroy_range(this->begin(), this->end()); |
388 | takeAllocationForGrow(NewElts, NewCapacity); |
389 | this->set_size(NumElts); |
390 | } |
391 | |
392 | template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) { |
393 | // Grow manually in case one of Args is an internal reference. |
394 | size_t NewCapacity; |
395 | T *NewElts = mallocForGrow(0, NewCapacity); |
396 | ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...); |
397 | moveElementsForGrow(NewElts); |
398 | takeAllocationForGrow(NewElts, NewCapacity); |
399 | this->set_size(this->size() + 1); |
400 | return this->back(); |
401 | } |
402 | |
403 | public: |
404 | void push_back(const T &Elt) { |
405 | const T *EltPtr = reserveForParamAndGetAddress(Elt); |
406 | ::new ((void *)this->end()) T(*EltPtr); |
407 | this->set_size(this->size() + 1); |
408 | } |
409 | |
410 | void push_back(T &&Elt) { |
411 | T *EltPtr = reserveForParamAndGetAddress(Elt); |
412 | ::new ((void *)this->end()) T(::std::move(*EltPtr)); |
413 | this->set_size(this->size() + 1); |
414 | } |
415 | |
416 | void pop_back() { |
417 | this->set_size(this->size() - 1); |
418 | this->end()->~T(); |
419 | } |
420 | }; |
421 | |
422 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
423 | template <typename T, bool TriviallyCopyable> |
424 | void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) { |
425 | size_t NewCapacity; |
426 | T *NewElts = mallocForGrow(MinSize, NewCapacity); |
427 | moveElementsForGrow(NewElts); |
428 | takeAllocationForGrow(NewElts, NewCapacity); |
429 | } |
430 | |
431 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
432 | template <typename T, bool TriviallyCopyable> |
433 | void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow( |
434 | T *NewElts) { |
435 | // Move the elements over. |
436 | this->uninitialized_move(this->begin(), this->end(), NewElts); |
437 | |
438 | // Destroy the original elements. |
439 | destroy_range(this->begin(), this->end()); |
440 | } |
441 | |
442 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
443 | template <typename T, bool TriviallyCopyable> |
444 | void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow( |
445 | T *NewElts, size_t NewCapacity) { |
446 | // If this wasn't grown from the inline copy, deallocate the old space. |
447 | if (!this->isSmall()) |
448 | free(this->begin()); |
449 | |
450 | this->BeginX = NewElts; |
451 | this->Capacity = NewCapacity; |
452 | } |
453 | |
454 | /// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put |
455 | /// method implementations that are designed to work with trivially copyable |
456 | /// T's. This allows using memcpy in place of copy/move construction and |
457 | /// skipping destruction. |
458 | template <typename T> |
459 | class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> { |
460 | friend class SmallVectorTemplateCommon<T>; |
461 | |
462 | protected: |
463 | /// True if it's cheap enough to take parameters by value. Doing so avoids |
464 | /// overhead related to mitigations for reference invalidation. |
465 | static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *); |
466 | |
467 | /// Either const T& or T, depending on whether it's cheap enough to take |
468 | /// parameters by value. |
469 | using ValueParamT = |
470 | typename std::conditional<TakesParamByValue, T, const T &>::type; |
471 | |
472 | SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} |
473 | |
474 | // No need to do a destroy loop for POD's. |
475 | static void destroy_range(T *, T *) {} |
476 | |
477 | /// Move the range [I, E) onto the uninitialized memory |
478 | /// starting with "Dest", constructing elements into it as needed. |
479 | template<typename It1, typename It2> |
480 | static void uninitialized_move(It1 I, It1 E, It2 Dest) { |
481 | // Just do a copy. |
482 | uninitialized_copy(I, E, Dest); |
483 | } |
484 | |
485 | /// Copy the range [I, E) onto the uninitialized memory |
486 | /// starting with "Dest", constructing elements into it as needed. |
487 | template<typename It1, typename It2> |
488 | static void uninitialized_copy(It1 I, It1 E, It2 Dest) { |
489 | // Arbitrary iterator types; just use the basic implementation. |
490 | std::uninitialized_copy(I, E, Dest); |
491 | } |
492 | |
493 | /// Copy the range [I, E) onto the uninitialized memory |
494 | /// starting with "Dest", constructing elements into it as needed. |
495 | template <typename T1, typename T2> |
496 | static void uninitialized_copy( |
497 | T1 *I, T1 *E, T2 *Dest, |
498 | std::enable_if_t<std::is_same<typename std::remove_const<T1>::type, |
499 | T2>::value> * = nullptr) { |
500 | // Use memcpy for PODs iterated by pointers (which includes SmallVector |
501 | // iterators): std::uninitialized_copy optimizes to memmove, but we can |
502 | // use memcpy here. Note that I and E are iterators and thus might be |
503 | // invalid for memcpy if they are equal. |
504 | if (I != E) |
505 | memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T)); |
506 | } |
507 | |
508 | /// Double the size of the allocated memory, guaranteeing space for at |
509 | /// least one more element or MinSize if specified. |
510 | void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); } |
511 | |
512 | /// Reserve enough space to add one element, and return the updated element |
513 | /// pointer in case it was a reference to the storage. |
514 | const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) { |
515 | return this->reserveForParamAndGetAddressImpl(this, Elt, N); |
516 | } |
517 | |
518 | /// Reserve enough space to add one element, and return the updated element |
519 | /// pointer in case it was a reference to the storage. |
520 | T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) { |
521 | return const_cast<T *>( |
522 | this->reserveForParamAndGetAddressImpl(this, Elt, N)); |
523 | } |
524 | |
525 | /// Copy \p V or return a reference, depending on \a ValueParamT. |
526 | static ValueParamT forward_value_param(ValueParamT V) { return V; } |
527 | |
528 | void growAndAssign(size_t NumElts, T Elt) { |
529 | // Elt has been copied in case it's an internal reference, side-stepping |
530 | // reference invalidation problems without losing the realloc optimization. |
531 | this->set_size(0); |
532 | this->grow(NumElts); |
533 | std::uninitialized_fill_n(this->begin(), NumElts, Elt); |
534 | this->set_size(NumElts); |
535 | } |
536 | |
537 | template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) { |
538 | // Use push_back with a copy in case Args has an internal reference, |
539 | // side-stepping reference invalidation problems without losing the realloc |
540 | // optimization. |
541 | push_back(T(std::forward<ArgTypes>(Args)...)); |
542 | return this->back(); |
543 | } |
544 | |
545 | public: |
546 | void push_back(ValueParamT Elt) { |
547 | const T *EltPtr = reserveForParamAndGetAddress(Elt); |
548 | memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T)); |
549 | this->set_size(this->size() + 1); |
550 | } |
551 | |
552 | void pop_back() { this->set_size(this->size() - 1); } |
553 | }; |
554 | |
555 | /// This class consists of common code factored out of the SmallVector class to |
556 | /// reduce code duplication based on the SmallVector 'N' template parameter. |
557 | template <typename T> |
558 | class SmallVectorImpl : public SmallVectorTemplateBase<T> { |
559 | using SuperClass = SmallVectorTemplateBase<T>; |
560 | |
561 | public: |
562 | using iterator = typename SuperClass::iterator; |
563 | using const_iterator = typename SuperClass::const_iterator; |
564 | using reference = typename SuperClass::reference; |
565 | using size_type = typename SuperClass::size_type; |
566 | |
567 | protected: |
568 | using SmallVectorTemplateBase<T>::TakesParamByValue; |
569 | using ValueParamT = typename SuperClass::ValueParamT; |
570 | |
571 | // Default ctor - Initialize to empty. |
572 | explicit SmallVectorImpl(unsigned N) |
573 | : SmallVectorTemplateBase<T>(N) {} |
574 | |
575 | public: |
576 | SmallVectorImpl(const SmallVectorImpl &) = delete; |
577 | |
578 | ~SmallVectorImpl() { |
579 | // Subclass has already destructed this vector's elements. |
580 | // If this wasn't grown from the inline copy, deallocate the old space. |
581 | if (!this->isSmall()) |
582 | free(this->begin()); |
583 | } |
584 | |
585 | void clear() { |
586 | this->destroy_range(this->begin(), this->end()); |
587 | this->Size = 0; |
588 | } |
589 | |
590 | private: |
591 | template <bool ForOverwrite> void resizeImpl(size_type N) { |
592 | if (N < this->size()) { |
593 | this->pop_back_n(this->size() - N); |
594 | } else if (N > this->size()) { |
595 | this->reserve(N); |
596 | for (auto I = this->end(), E = this->begin() + N; I != E; ++I) |
597 | if (ForOverwrite) |
598 | new (&*I) T; |
599 | else |
600 | new (&*I) T(); |
601 | this->set_size(N); |
602 | } |
603 | } |
604 | |
605 | public: |
606 | void resize(size_type N) { resizeImpl<false>(N); } |
607 | |
608 | /// Like resize, but \ref T is POD, the new values won't be initialized. |
609 | void resize_for_overwrite(size_type N) { resizeImpl<true>(N); } |
610 | |
611 | void resize(size_type N, ValueParamT NV) { |
612 | if (N == this->size()) |
613 | return; |
614 | |
615 | if (N < this->size()) { |
616 | this->pop_back_n(this->size() - N); |
617 | return; |
618 | } |
619 | |
620 | // N > this->size(). Defer to append. |
621 | this->append(N - this->size(), NV); |
622 | } |
623 | |
624 | void reserve(size_type N) { |
625 | if (this->capacity() < N) |
626 | this->grow(N); |
627 | } |
628 | |
629 | void pop_back_n(size_type NumItems) { |
630 | assert(this->size() >= NumItems)(static_cast<void> (0)); |
631 | this->destroy_range(this->end() - NumItems, this->end()); |
632 | this->set_size(this->size() - NumItems); |
633 | } |
634 | |
635 | LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() { |
636 | T Result = ::std::move(this->back()); |
637 | this->pop_back(); |
638 | return Result; |
639 | } |
640 | |
641 | void swap(SmallVectorImpl &RHS); |
642 | |
643 | /// Add the specified range to the end of the SmallVector. |
644 | template <typename in_iter, |
645 | typename = std::enable_if_t<std::is_convertible< |
646 | typename std::iterator_traits<in_iter>::iterator_category, |
647 | std::input_iterator_tag>::value>> |
648 | void append(in_iter in_start, in_iter in_end) { |
649 | this->assertSafeToAddRange(in_start, in_end); |
650 | size_type NumInputs = std::distance(in_start, in_end); |
651 | this->reserve(this->size() + NumInputs); |
652 | this->uninitialized_copy(in_start, in_end, this->end()); |
653 | this->set_size(this->size() + NumInputs); |
654 | } |
655 | |
656 | /// Append \p NumInputs copies of \p Elt to the end. |
657 | void append(size_type NumInputs, ValueParamT Elt) { |
658 | const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs); |
659 | std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr); |
660 | this->set_size(this->size() + NumInputs); |
661 | } |
662 | |
663 | void append(std::initializer_list<T> IL) { |
664 | append(IL.begin(), IL.end()); |
665 | } |
666 | |
667 | void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); } |
668 | |
669 | void assign(size_type NumElts, ValueParamT Elt) { |
670 | // Note that Elt could be an internal reference. |
671 | if (NumElts > this->capacity()) { |
672 | this->growAndAssign(NumElts, Elt); |
673 | return; |
674 | } |
675 | |
676 | // Assign over existing elements. |
677 | std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt); |
678 | if (NumElts > this->size()) |
679 | std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt); |
680 | else if (NumElts < this->size()) |
681 | this->destroy_range(this->begin() + NumElts, this->end()); |
682 | this->set_size(NumElts); |
683 | } |
684 | |
685 | // FIXME: Consider assigning over existing elements, rather than clearing & |
686 | // re-initializing them - for all assign(...) variants. |
687 | |
688 | template <typename in_iter, |
689 | typename = std::enable_if_t<std::is_convertible< |
690 | typename std::iterator_traits<in_iter>::iterator_category, |
691 | std::input_iterator_tag>::value>> |
692 | void assign(in_iter in_start, in_iter in_end) { |
693 | this->assertSafeToReferenceAfterClear(in_start, in_end); |
694 | clear(); |
695 | append(in_start, in_end); |
696 | } |
697 | |
698 | void assign(std::initializer_list<T> IL) { |
699 | clear(); |
700 | append(IL); |
701 | } |
702 | |
703 | void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); } |
704 | |
705 | iterator erase(const_iterator CI) { |
706 | // Just cast away constness because this is a non-const member function. |
707 | iterator I = const_cast<iterator>(CI); |
708 | |
709 | assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.")(static_cast<void> (0)); |
710 | |
711 | iterator N = I; |
712 | // Shift all elts down one. |
713 | std::move(I+1, this->end(), I); |
714 | // Drop the last elt. |
715 | this->pop_back(); |
716 | return(N); |
717 | } |
718 | |
719 | iterator erase(const_iterator CS, const_iterator CE) { |
720 | // Just cast away constness because this is a non-const member function. |
721 | iterator S = const_cast<iterator>(CS); |
722 | iterator E = const_cast<iterator>(CE); |
723 | |
724 | assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.")(static_cast<void> (0)); |
725 | |
726 | iterator N = S; |
727 | // Shift all elts down. |
728 | iterator I = std::move(E, this->end(), S); |
729 | // Drop the last elts. |
730 | this->destroy_range(I, this->end()); |
731 | this->set_size(I - this->begin()); |
732 | return(N); |
733 | } |
734 | |
735 | private: |
736 | template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) { |
737 | // Callers ensure that ArgType is derived from T. |
738 | static_assert( |
739 | std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>, |
740 | T>::value, |
741 | "ArgType must be derived from T!"); |
742 | |
743 | if (I == this->end()) { // Important special case for empty vector. |
744 | this->push_back(::std::forward<ArgType>(Elt)); |
745 | return this->end()-1; |
746 | } |
747 | |
748 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast<void> (0)); |
749 | |
750 | // Grow if necessary. |
751 | size_t Index = I - this->begin(); |
752 | std::remove_reference_t<ArgType> *EltPtr = |
753 | this->reserveForParamAndGetAddress(Elt); |
754 | I = this->begin() + Index; |
755 | |
756 | ::new ((void*) this->end()) T(::std::move(this->back())); |
757 | // Push everything else over. |
758 | std::move_backward(I, this->end()-1, this->end()); |
759 | this->set_size(this->size() + 1); |
760 | |
761 | // If we just moved the element we're inserting, be sure to update |
762 | // the reference (never happens if TakesParamByValue). |
763 | static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value, |
764 | "ArgType must be 'T' when taking by value!"); |
765 | if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end())) |
766 | ++EltPtr; |
767 | |
768 | *I = ::std::forward<ArgType>(*EltPtr); |
769 | return I; |
770 | } |
771 | |
772 | public: |
773 | iterator insert(iterator I, T &&Elt) { |
774 | return insert_one_impl(I, this->forward_value_param(std::move(Elt))); |
775 | } |
776 | |
777 | iterator insert(iterator I, const T &Elt) { |
778 | return insert_one_impl(I, this->forward_value_param(Elt)); |
779 | } |
780 | |
781 | iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) { |
782 | // Convert iterator to elt# to avoid invalidating iterator when we reserve() |
783 | size_t InsertElt = I - this->begin(); |
784 | |
785 | if (I == this->end()) { // Important special case for empty vector. |
786 | append(NumToInsert, Elt); |
787 | return this->begin()+InsertElt; |
788 | } |
789 | |
790 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast<void> (0)); |
791 | |
792 | // Ensure there is enough space, and get the (maybe updated) address of |
793 | // Elt. |
794 | const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert); |
795 | |
796 | // Uninvalidate the iterator. |
797 | I = this->begin()+InsertElt; |
798 | |
799 | // If there are more elements between the insertion point and the end of the |
800 | // range than there are being inserted, we can use a simple approach to |
801 | // insertion. Since we already reserved space, we know that this won't |
802 | // reallocate the vector. |
803 | if (size_t(this->end()-I) >= NumToInsert) { |
804 | T *OldEnd = this->end(); |
805 | append(std::move_iterator<iterator>(this->end() - NumToInsert), |
806 | std::move_iterator<iterator>(this->end())); |
807 | |
808 | // Copy the existing elements that get replaced. |
809 | std::move_backward(I, OldEnd-NumToInsert, OldEnd); |
810 | |
811 | // If we just moved the element we're inserting, be sure to update |
812 | // the reference (never happens if TakesParamByValue). |
813 | if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) |
814 | EltPtr += NumToInsert; |
815 | |
816 | std::fill_n(I, NumToInsert, *EltPtr); |
817 | return I; |
818 | } |
819 | |
820 | // Otherwise, we're inserting more elements than exist already, and we're |
821 | // not inserting at the end. |
822 | |
823 | // Move over the elements that we're about to overwrite. |
824 | T *OldEnd = this->end(); |
825 | this->set_size(this->size() + NumToInsert); |
826 | size_t NumOverwritten = OldEnd-I; |
827 | this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten); |
828 | |
829 | // If we just moved the element we're inserting, be sure to update |
830 | // the reference (never happens if TakesParamByValue). |
831 | if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) |
832 | EltPtr += NumToInsert; |
833 | |
834 | // Replace the overwritten part. |
835 | std::fill_n(I, NumOverwritten, *EltPtr); |
836 | |
837 | // Insert the non-overwritten middle part. |
838 | std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr); |
839 | return I; |
840 | } |
841 | |
842 | template <typename ItTy, |
843 | typename = std::enable_if_t<std::is_convertible< |
844 | typename std::iterator_traits<ItTy>::iterator_category, |
845 | std::input_iterator_tag>::value>> |
846 | iterator insert(iterator I, ItTy From, ItTy To) { |
847 | // Convert iterator to elt# to avoid invalidating iterator when we reserve() |
848 | size_t InsertElt = I - this->begin(); |
849 | |
850 | if (I == this->end()) { // Important special case for empty vector. |
851 | append(From, To); |
852 | return this->begin()+InsertElt; |
853 | } |
854 | |
855 | assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast<void> (0)); |
856 | |
857 | // Check that the reserve that follows doesn't invalidate the iterators. |
858 | this->assertSafeToAddRange(From, To); |
859 | |
860 | size_t NumToInsert = std::distance(From, To); |
861 | |
862 | // Ensure there is enough space. |
863 | reserve(this->size() + NumToInsert); |
864 | |
865 | // Uninvalidate the iterator. |
866 | I = this->begin()+InsertElt; |
867 | |
868 | // If there are more elements between the insertion point and the end of the |
869 | // range than there are being inserted, we can use a simple approach to |
870 | // insertion. Since we already reserved space, we know that this won't |
871 | // reallocate the vector. |
872 | if (size_t(this->end()-I) >= NumToInsert) { |
873 | T *OldEnd = this->end(); |
874 | append(std::move_iterator<iterator>(this->end() - NumToInsert), |
875 | std::move_iterator<iterator>(this->end())); |
876 | |
877 | // Copy the existing elements that get replaced. |
878 | std::move_backward(I, OldEnd-NumToInsert, OldEnd); |
879 | |
880 | std::copy(From, To, I); |
881 | return I; |
882 | } |
883 | |
884 | // Otherwise, we're inserting more elements than exist already, and we're |
885 | // not inserting at the end. |
886 | |
887 | // Move over the elements that we're about to overwrite. |
888 | T *OldEnd = this->end(); |
889 | this->set_size(this->size() + NumToInsert); |
890 | size_t NumOverwritten = OldEnd-I; |
891 | this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten); |
892 | |
893 | // Replace the overwritten part. |
894 | for (T *J = I; NumOverwritten > 0; --NumOverwritten) { |
895 | *J = *From; |
896 | ++J; ++From; |
897 | } |
898 | |
899 | // Insert the non-overwritten middle part. |
900 | this->uninitialized_copy(From, To, OldEnd); |
901 | return I; |
902 | } |
903 | |
904 | void insert(iterator I, std::initializer_list<T> IL) { |
905 | insert(I, IL.begin(), IL.end()); |
906 | } |
907 | |
908 | template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) { |
909 | if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity ()), false)) |
910 | return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...); |
911 | |
912 | ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...); |
913 | this->set_size(this->size() + 1); |
914 | return this->back(); |
915 | } |
916 | |
917 | SmallVectorImpl &operator=(const SmallVectorImpl &RHS); |
918 | |
919 | SmallVectorImpl &operator=(SmallVectorImpl &&RHS); |
920 | |
921 | bool operator==(const SmallVectorImpl &RHS) const { |
922 | if (this->size() != RHS.size()) return false; |
923 | return std::equal(this->begin(), this->end(), RHS.begin()); |
924 | } |
925 | bool operator!=(const SmallVectorImpl &RHS) const { |
926 | return !(*this == RHS); |
927 | } |
928 | |
929 | bool operator<(const SmallVectorImpl &RHS) const { |
930 | return std::lexicographical_compare(this->begin(), this->end(), |
931 | RHS.begin(), RHS.end()); |
932 | } |
933 | }; |
934 | |
935 | template <typename T> |
936 | void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) { |
937 | if (this == &RHS) return; |
938 | |
939 | // We can only avoid copying elements if neither vector is small. |
940 | if (!this->isSmall() && !RHS.isSmall()) { |
941 | std::swap(this->BeginX, RHS.BeginX); |
942 | std::swap(this->Size, RHS.Size); |
943 | std::swap(this->Capacity, RHS.Capacity); |
944 | return; |
945 | } |
946 | this->reserve(RHS.size()); |
947 | RHS.reserve(this->size()); |
948 | |
949 | // Swap the shared elements. |
950 | size_t NumShared = this->size(); |
951 | if (NumShared > RHS.size()) NumShared = RHS.size(); |
952 | for (size_type i = 0; i != NumShared; ++i) |
953 | std::swap((*this)[i], RHS[i]); |
954 | |
955 | // Copy over the extra elts. |
956 | if (this->size() > RHS.size()) { |
957 | size_t EltDiff = this->size() - RHS.size(); |
958 | this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end()); |
959 | RHS.set_size(RHS.size() + EltDiff); |
960 | this->destroy_range(this->begin()+NumShared, this->end()); |
961 | this->set_size(NumShared); |
962 | } else if (RHS.size() > this->size()) { |
963 | size_t EltDiff = RHS.size() - this->size(); |
964 | this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end()); |
965 | this->set_size(this->size() + EltDiff); |
966 | this->destroy_range(RHS.begin()+NumShared, RHS.end()); |
967 | RHS.set_size(NumShared); |
968 | } |
969 | } |
970 | |
971 | template <typename T> |
972 | SmallVectorImpl<T> &SmallVectorImpl<T>:: |
973 | operator=(const SmallVectorImpl<T> &RHS) { |
974 | // Avoid self-assignment. |
975 | if (this == &RHS) return *this; |
976 | |
977 | // If we already have sufficient space, assign the common elements, then |
978 | // destroy any excess. |
979 | size_t RHSSize = RHS.size(); |
980 | size_t CurSize = this->size(); |
981 | if (CurSize >= RHSSize) { |
982 | // Assign common elements. |
983 | iterator NewEnd; |
984 | if (RHSSize) |
985 | NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin()); |
986 | else |
987 | NewEnd = this->begin(); |
988 | |
989 | // Destroy excess elements. |
990 | this->destroy_range(NewEnd, this->end()); |
991 | |
992 | // Trim. |
993 | this->set_size(RHSSize); |
994 | return *this; |
995 | } |
996 | |
997 | // If we have to grow to have enough elements, destroy the current elements. |
998 | // This allows us to avoid copying them during the grow. |
999 | // FIXME: don't do this if they're efficiently moveable. |
1000 | if (this->capacity() < RHSSize) { |
1001 | // Destroy current elements. |
1002 | this->clear(); |
1003 | CurSize = 0; |
1004 | this->grow(RHSSize); |
1005 | } else if (CurSize) { |
1006 | // Otherwise, use assignment for the already-constructed elements. |
1007 | std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin()); |
1008 | } |
1009 | |
1010 | // Copy construct the new elements in place. |
1011 | this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(), |
1012 | this->begin()+CurSize); |
1013 | |
1014 | // Set end. |
1015 | this->set_size(RHSSize); |
1016 | return *this; |
1017 | } |
1018 | |
1019 | template <typename T> |
1020 | SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) { |
1021 | // Avoid self-assignment. |
1022 | if (this == &RHS) return *this; |
1023 | |
1024 | // If the RHS isn't small, clear this vector and then steal its buffer. |
1025 | if (!RHS.isSmall()) { |
1026 | this->destroy_range(this->begin(), this->end()); |
1027 | if (!this->isSmall()) free(this->begin()); |
1028 | this->BeginX = RHS.BeginX; |
1029 | this->Size = RHS.Size; |
1030 | this->Capacity = RHS.Capacity; |
1031 | RHS.resetToSmall(); |
1032 | return *this; |
1033 | } |
1034 | |
1035 | // If we already have sufficient space, assign the common elements, then |
1036 | // destroy any excess. |
1037 | size_t RHSSize = RHS.size(); |
1038 | size_t CurSize = this->size(); |
1039 | if (CurSize >= RHSSize) { |
1040 | // Assign common elements. |
1041 | iterator NewEnd = this->begin(); |
1042 | if (RHSSize) |
1043 | NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd); |
1044 | |
1045 | // Destroy excess elements and trim the bounds. |
1046 | this->destroy_range(NewEnd, this->end()); |
1047 | this->set_size(RHSSize); |
1048 | |
1049 | // Clear the RHS. |
1050 | RHS.clear(); |
1051 | |
1052 | return *this; |
1053 | } |
1054 | |
1055 | // If we have to grow to have enough elements, destroy the current elements. |
1056 | // This allows us to avoid copying them during the grow. |
1057 | // FIXME: this may not actually make any sense if we can efficiently move |
1058 | // elements. |
1059 | if (this->capacity() < RHSSize) { |
1060 | // Destroy current elements. |
1061 | this->clear(); |
1062 | CurSize = 0; |
1063 | this->grow(RHSSize); |
1064 | } else if (CurSize) { |
1065 | // Otherwise, use assignment for the already-constructed elements. |
1066 | std::move(RHS.begin(), RHS.begin()+CurSize, this->begin()); |
1067 | } |
1068 | |
1069 | // Move-construct the new elements in place. |
1070 | this->uninitialized_move(RHS.begin()+CurSize, RHS.end(), |
1071 | this->begin()+CurSize); |
1072 | |
1073 | // Set end. |
1074 | this->set_size(RHSSize); |
1075 | |
1076 | RHS.clear(); |
1077 | return *this; |
1078 | } |
1079 | |
1080 | /// Storage for the SmallVector elements. This is specialized for the N=0 case |
1081 | /// to avoid allocating unnecessary storage. |
1082 | template <typename T, unsigned N> |
1083 | struct SmallVectorStorage { |
1084 | alignas(T) char InlineElts[N * sizeof(T)]; |
1085 | }; |
1086 | |
1087 | /// We need the storage to be properly aligned even for small-size of 0 so that |
1088 | /// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is |
1089 | /// well-defined. |
1090 | template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {}; |
1091 | |
1092 | /// Forward declaration of SmallVector so that |
1093 | /// calculateSmallVectorDefaultInlinedElements can reference |
1094 | /// `sizeof(SmallVector<T, 0>)`. |
1095 | template <typename T, unsigned N> class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector; |
1096 | |
1097 | /// Helper class for calculating the default number of inline elements for |
1098 | /// `SmallVector<T>`. |
1099 | /// |
1100 | /// This should be migrated to a constexpr function when our minimum |
1101 | /// compiler support is enough for multi-statement constexpr functions. |
1102 | template <typename T> struct CalculateSmallVectorDefaultInlinedElements { |
1103 | // Parameter controlling the default number of inlined elements |
1104 | // for `SmallVector<T>`. |
1105 | // |
1106 | // The default number of inlined elements ensures that |
1107 | // 1. There is at least one inlined element. |
1108 | // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless |
1109 | // it contradicts 1. |
1110 | static constexpr size_t kPreferredSmallVectorSizeof = 64; |
1111 | |
1112 | // static_assert that sizeof(T) is not "too big". |
1113 | // |
1114 | // Because our policy guarantees at least one inlined element, it is possible |
1115 | // for an arbitrarily large inlined element to allocate an arbitrarily large |
1116 | // amount of inline storage. We generally consider it an antipattern for a |
1117 | // SmallVector to allocate an excessive amount of inline storage, so we want |
1118 | // to call attention to these cases and make sure that users are making an |
1119 | // intentional decision if they request a lot of inline storage. |
1120 | // |
1121 | // We want this assertion to trigger in pathological cases, but otherwise |
1122 | // not be too easy to hit. To accomplish that, the cutoff is actually somewhat |
1123 | // larger than kPreferredSmallVectorSizeof (otherwise, |
1124 | // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that |
1125 | // pattern seems useful in practice). |
1126 | // |
1127 | // One wrinkle is that this assertion is in theory non-portable, since |
1128 | // sizeof(T) is in general platform-dependent. However, we don't expect this |
1129 | // to be much of an issue, because most LLVM development happens on 64-bit |
1130 | // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for |
1131 | // 32-bit hosts, dodging the issue. The reverse situation, where development |
1132 | // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a |
1133 | // 64-bit host, is expected to be very rare. |
1134 | static_assert( |
1135 | sizeof(T) <= 256, |
1136 | "You are trying to use a default number of inlined elements for " |
1137 | "`SmallVector<T>` but `sizeof(T)` is really big! Please use an " |
1138 | "explicit number of inlined elements with `SmallVector<T, N>` to make " |
1139 | "sure you really want that much inline storage."); |
1140 | |
1141 | // Discount the size of the header itself when calculating the maximum inline |
1142 | // bytes. |
1143 | static constexpr size_t PreferredInlineBytes = |
1144 | kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>); |
1145 | static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T); |
1146 | static constexpr size_t value = |
1147 | NumElementsThatFit == 0 ? 1 : NumElementsThatFit; |
1148 | }; |
1149 | |
1150 | /// This is a 'vector' (really, a variable-sized array), optimized |
1151 | /// for the case when the array is small. It contains some number of elements |
1152 | /// in-place, which allows it to avoid heap allocation when the actual number of |
1153 | /// elements is below that threshold. This allows normal "small" cases to be |
1154 | /// fast without losing generality for large inputs. |
1155 | /// |
1156 | /// \note |
1157 | /// In the absence of a well-motivated choice for the number of inlined |
1158 | /// elements \p N, it is recommended to use \c SmallVector<T> (that is, |
1159 | /// omitting the \p N). This will choose a default number of inlined elements |
1160 | /// reasonable for allocation on the stack (for example, trying to keep \c |
1161 | /// sizeof(SmallVector<T>) around 64 bytes). |
1162 | /// |
1163 | /// \warning This does not attempt to be exception safe. |
1164 | /// |
1165 | /// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h |
1166 | template <typename T, |
1167 | unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value> |
1168 | class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>, |
1169 | SmallVectorStorage<T, N> { |
1170 | public: |
1171 | SmallVector() : SmallVectorImpl<T>(N) {} |
1172 | |
1173 | ~SmallVector() { |
1174 | // Destroy the constructed elements in the vector. |
1175 | this->destroy_range(this->begin(), this->end()); |
1176 | } |
1177 | |
1178 | explicit SmallVector(size_t Size, const T &Value = T()) |
1179 | : SmallVectorImpl<T>(N) { |
1180 | this->assign(Size, Value); |
1181 | } |
1182 | |
1183 | template <typename ItTy, |
1184 | typename = std::enable_if_t<std::is_convertible< |
1185 | typename std::iterator_traits<ItTy>::iterator_category, |
1186 | std::input_iterator_tag>::value>> |
1187 | SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) { |
1188 | this->append(S, E); |
1189 | } |
1190 | |
1191 | template <typename RangeTy> |
1192 | explicit SmallVector(const iterator_range<RangeTy> &R) |
1193 | : SmallVectorImpl<T>(N) { |
1194 | this->append(R.begin(), R.end()); |
1195 | } |
1196 | |
1197 | SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) { |
1198 | this->assign(IL); |
1199 | } |
1200 | |
1201 | SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) { |
1202 | if (!RHS.empty()) |
1203 | SmallVectorImpl<T>::operator=(RHS); |
1204 | } |
1205 | |
1206 | SmallVector &operator=(const SmallVector &RHS) { |
1207 | SmallVectorImpl<T>::operator=(RHS); |
1208 | return *this; |
1209 | } |
1210 | |
1211 | SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) { |
1212 | if (!RHS.empty()) |
1213 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1214 | } |
1215 | |
1216 | SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) { |
1217 | if (!RHS.empty()) |
1218 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1219 | } |
1220 | |
1221 | SmallVector &operator=(SmallVector &&RHS) { |
1222 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1223 | return *this; |
1224 | } |
1225 | |
1226 | SmallVector &operator=(SmallVectorImpl<T> &&RHS) { |
1227 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
1228 | return *this; |
1229 | } |
1230 | |
1231 | SmallVector &operator=(std::initializer_list<T> IL) { |
1232 | this->assign(IL); |
1233 | return *this; |
1234 | } |
1235 | }; |
1236 | |
1237 | template <typename T, unsigned N> |
1238 | inline size_t capacity_in_bytes(const SmallVector<T, N> &X) { |
1239 | return X.capacity_in_bytes(); |
1240 | } |
1241 | |
1242 | /// Given a range of type R, iterate the entire range and return a |
1243 | /// SmallVector with elements of the vector. This is useful, for example, |
1244 | /// when you want to iterate a range and then sort the results. |
1245 | template <unsigned Size, typename R> |
1246 | SmallVector<typename std::remove_const<typename std::remove_reference< |
1247 | decltype(*std::begin(std::declval<R &>()))>::type>::type, |
1248 | Size> |
1249 | to_vector(R &&Range) { |
1250 | return {std::begin(Range), std::end(Range)}; |
1251 | } |
1252 | |
1253 | } // end namespace llvm |
1254 | |
1255 | namespace std { |
1256 | |
1257 | /// Implement std::swap in terms of SmallVector swap. |
1258 | template<typename T> |
1259 | inline void |
1260 | swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) { |
1261 | LHS.swap(RHS); |
1262 | } |
1263 | |
1264 | /// Implement std::swap in terms of SmallVector swap. |
1265 | template<typename T, unsigned N> |
1266 | inline void |
1267 | swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) { |
1268 | LHS.swap(RHS); |
1269 | } |
1270 | |
1271 | } // end namespace std |
1272 | |
1273 | #endif // LLVM_ADT_SMALLVECTOR_H |