File: | lib/Analysis/MemorySSA.cpp |
Warning: | line 879, column 37 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// | ||||||
2 | // | ||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||
6 | // | ||||||
7 | //===----------------------------------------------------------------------===// | ||||||
8 | // | ||||||
9 | // This file implements the MemorySSA class. | ||||||
10 | // | ||||||
11 | //===----------------------------------------------------------------------===// | ||||||
12 | |||||||
13 | #include "llvm/Analysis/MemorySSA.h" | ||||||
14 | #include "llvm/ADT/DenseMap.h" | ||||||
15 | #include "llvm/ADT/DenseMapInfo.h" | ||||||
16 | #include "llvm/ADT/DenseSet.h" | ||||||
17 | #include "llvm/ADT/DepthFirstIterator.h" | ||||||
18 | #include "llvm/ADT/Hashing.h" | ||||||
19 | #include "llvm/ADT/None.h" | ||||||
20 | #include "llvm/ADT/Optional.h" | ||||||
21 | #include "llvm/ADT/STLExtras.h" | ||||||
22 | #include "llvm/ADT/SmallPtrSet.h" | ||||||
23 | #include "llvm/ADT/SmallVector.h" | ||||||
24 | #include "llvm/ADT/iterator.h" | ||||||
25 | #include "llvm/ADT/iterator_range.h" | ||||||
26 | #include "llvm/Analysis/AliasAnalysis.h" | ||||||
27 | #include "llvm/Analysis/IteratedDominanceFrontier.h" | ||||||
28 | #include "llvm/Analysis/MemoryLocation.h" | ||||||
29 | #include "llvm/Config/llvm-config.h" | ||||||
30 | #include "llvm/IR/AssemblyAnnotationWriter.h" | ||||||
31 | #include "llvm/IR/BasicBlock.h" | ||||||
32 | #include "llvm/IR/Dominators.h" | ||||||
33 | #include "llvm/IR/Function.h" | ||||||
34 | #include "llvm/IR/Instruction.h" | ||||||
35 | #include "llvm/IR/Instructions.h" | ||||||
36 | #include "llvm/IR/IntrinsicInst.h" | ||||||
37 | #include "llvm/IR/Intrinsics.h" | ||||||
38 | #include "llvm/IR/LLVMContext.h" | ||||||
39 | #include "llvm/IR/PassManager.h" | ||||||
40 | #include "llvm/IR/Use.h" | ||||||
41 | #include "llvm/Pass.h" | ||||||
42 | #include "llvm/Support/AtomicOrdering.h" | ||||||
43 | #include "llvm/Support/Casting.h" | ||||||
44 | #include "llvm/Support/CommandLine.h" | ||||||
45 | #include "llvm/Support/Compiler.h" | ||||||
46 | #include "llvm/Support/Debug.h" | ||||||
47 | #include "llvm/Support/ErrorHandling.h" | ||||||
48 | #include "llvm/Support/FormattedStream.h" | ||||||
49 | #include "llvm/Support/raw_ostream.h" | ||||||
50 | #include <algorithm> | ||||||
51 | #include <cassert> | ||||||
52 | #include <cstdlib> | ||||||
53 | #include <iterator> | ||||||
54 | #include <memory> | ||||||
55 | #include <utility> | ||||||
56 | |||||||
57 | using namespace llvm; | ||||||
58 | |||||||
59 | #define DEBUG_TYPE"memoryssa" "memoryssa" | ||||||
60 | |||||||
61 | INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { | ||||||
62 | true)static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { | ||||||
63 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||||
64 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | ||||||
65 | INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } | ||||||
66 | true)PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } | ||||||
67 | |||||||
68 | INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { | ||||||
69 | "Memory SSA Printer", false, false)static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { | ||||||
70 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | ||||||
71 | INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } | ||||||
72 | "Memory SSA Printer", false, false)PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } | ||||||
73 | |||||||
74 | static cl::opt<unsigned> MaxCheckLimit( | ||||||
75 | "memssa-check-limit", cl::Hidden, cl::init(100), | ||||||
76 | cl::desc("The maximum number of stores/phis MemorySSA" | ||||||
77 | "will consider trying to walk past (default = 100)")); | ||||||
78 | |||||||
79 | // Always verify MemorySSA if expensive checking is enabled. | ||||||
80 | #ifdef EXPENSIVE_CHECKS | ||||||
81 | bool llvm::VerifyMemorySSA = true; | ||||||
82 | #else | ||||||
83 | bool llvm::VerifyMemorySSA = false; | ||||||
84 | #endif | ||||||
85 | /// Enables memory ssa as a dependency for loop passes in legacy pass manager. | ||||||
86 | cl::opt<bool> llvm::EnableMSSALoopDependency( | ||||||
87 | "enable-mssa-loop-dependency", cl::Hidden, cl::init(true), | ||||||
88 | cl::desc("Enable MemorySSA dependency for loop pass manager")); | ||||||
89 | |||||||
90 | static cl::opt<bool, true> | ||||||
91 | VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), | ||||||
92 | cl::Hidden, cl::desc("Enable verification of MemorySSA.")); | ||||||
93 | |||||||
94 | namespace llvm { | ||||||
95 | |||||||
96 | /// An assembly annotator class to print Memory SSA information in | ||||||
97 | /// comments. | ||||||
98 | class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { | ||||||
99 | friend class MemorySSA; | ||||||
100 | |||||||
101 | const MemorySSA *MSSA; | ||||||
102 | |||||||
103 | public: | ||||||
104 | MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} | ||||||
105 | |||||||
106 | void emitBasicBlockStartAnnot(const BasicBlock *BB, | ||||||
107 | formatted_raw_ostream &OS) override { | ||||||
108 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) | ||||||
109 | OS << "; " << *MA << "\n"; | ||||||
110 | } | ||||||
111 | |||||||
112 | void emitInstructionAnnot(const Instruction *I, | ||||||
113 | formatted_raw_ostream &OS) override { | ||||||
114 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) | ||||||
115 | OS << "; " << *MA << "\n"; | ||||||
116 | } | ||||||
117 | }; | ||||||
118 | |||||||
119 | } // end namespace llvm | ||||||
120 | |||||||
121 | namespace { | ||||||
122 | |||||||
123 | /// Our current alias analysis API differentiates heavily between calls and | ||||||
124 | /// non-calls, and functions called on one usually assert on the other. | ||||||
125 | /// This class encapsulates the distinction to simplify other code that wants | ||||||
126 | /// "Memory affecting instructions and related data" to use as a key. | ||||||
127 | /// For example, this class is used as a densemap key in the use optimizer. | ||||||
128 | class MemoryLocOrCall { | ||||||
129 | public: | ||||||
130 | bool IsCall = false; | ||||||
131 | |||||||
132 | MemoryLocOrCall(MemoryUseOrDef *MUD) | ||||||
133 | : MemoryLocOrCall(MUD->getMemoryInst()) {} | ||||||
134 | MemoryLocOrCall(const MemoryUseOrDef *MUD) | ||||||
135 | : MemoryLocOrCall(MUD->getMemoryInst()) {} | ||||||
136 | |||||||
137 | MemoryLocOrCall(Instruction *Inst) { | ||||||
138 | if (auto *C = dyn_cast<CallBase>(Inst)) { | ||||||
139 | IsCall = true; | ||||||
140 | Call = C; | ||||||
141 | } else { | ||||||
142 | IsCall = false; | ||||||
143 | // There is no such thing as a memorylocation for a fence inst, and it is | ||||||
144 | // unique in that regard. | ||||||
145 | if (!isa<FenceInst>(Inst)) | ||||||
146 | Loc = MemoryLocation::get(Inst); | ||||||
147 | } | ||||||
148 | } | ||||||
149 | |||||||
150 | explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} | ||||||
151 | |||||||
152 | const CallBase *getCall() const { | ||||||
153 | assert(IsCall)((IsCall) ? static_cast<void> (0) : __assert_fail ("IsCall" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 153, __PRETTY_FUNCTION__)); | ||||||
154 | return Call; | ||||||
155 | } | ||||||
156 | |||||||
157 | MemoryLocation getLoc() const { | ||||||
158 | assert(!IsCall)((!IsCall) ? static_cast<void> (0) : __assert_fail ("!IsCall" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 158, __PRETTY_FUNCTION__)); | ||||||
159 | return Loc; | ||||||
160 | } | ||||||
161 | |||||||
162 | bool operator==(const MemoryLocOrCall &Other) const { | ||||||
163 | if (IsCall != Other.IsCall) | ||||||
164 | return false; | ||||||
165 | |||||||
166 | if (!IsCall) | ||||||
167 | return Loc == Other.Loc; | ||||||
168 | |||||||
169 | if (Call->getCalledValue() != Other.Call->getCalledValue()) | ||||||
170 | return false; | ||||||
171 | |||||||
172 | return Call->arg_size() == Other.Call->arg_size() && | ||||||
173 | std::equal(Call->arg_begin(), Call->arg_end(), | ||||||
174 | Other.Call->arg_begin()); | ||||||
175 | } | ||||||
176 | |||||||
177 | private: | ||||||
178 | union { | ||||||
179 | const CallBase *Call; | ||||||
180 | MemoryLocation Loc; | ||||||
181 | }; | ||||||
182 | }; | ||||||
183 | |||||||
184 | } // end anonymous namespace | ||||||
185 | |||||||
186 | namespace llvm { | ||||||
187 | |||||||
188 | template <> struct DenseMapInfo<MemoryLocOrCall> { | ||||||
189 | static inline MemoryLocOrCall getEmptyKey() { | ||||||
190 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); | ||||||
191 | } | ||||||
192 | |||||||
193 | static inline MemoryLocOrCall getTombstoneKey() { | ||||||
194 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); | ||||||
195 | } | ||||||
196 | |||||||
197 | static unsigned getHashValue(const MemoryLocOrCall &MLOC) { | ||||||
198 | if (!MLOC.IsCall) | ||||||
199 | return hash_combine( | ||||||
200 | MLOC.IsCall, | ||||||
201 | DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); | ||||||
202 | |||||||
203 | hash_code hash = | ||||||
204 | hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue( | ||||||
205 | MLOC.getCall()->getCalledValue())); | ||||||
206 | |||||||
207 | for (const Value *Arg : MLOC.getCall()->args()) | ||||||
208 | hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg)); | ||||||
209 | return hash; | ||||||
210 | } | ||||||
211 | |||||||
212 | static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { | ||||||
213 | return LHS == RHS; | ||||||
214 | } | ||||||
215 | }; | ||||||
216 | |||||||
217 | } // end namespace llvm | ||||||
218 | |||||||
219 | /// This does one-way checks to see if Use could theoretically be hoisted above | ||||||
220 | /// MayClobber. This will not check the other way around. | ||||||
221 | /// | ||||||
222 | /// This assumes that, for the purposes of MemorySSA, Use comes directly after | ||||||
223 | /// MayClobber, with no potentially clobbering operations in between them. | ||||||
224 | /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) | ||||||
225 | static bool areLoadsReorderable(const LoadInst *Use, | ||||||
226 | const LoadInst *MayClobber) { | ||||||
227 | bool VolatileUse = Use->isVolatile(); | ||||||
228 | bool VolatileClobber = MayClobber->isVolatile(); | ||||||
229 | // Volatile operations may never be reordered with other volatile operations. | ||||||
230 | if (VolatileUse && VolatileClobber) | ||||||
231 | return false; | ||||||
232 | // Otherwise, volatile doesn't matter here. From the language reference: | ||||||
233 | // 'optimizers may change the order of volatile operations relative to | ||||||
234 | // non-volatile operations.'" | ||||||
235 | |||||||
236 | // If a load is seq_cst, it cannot be moved above other loads. If its ordering | ||||||
237 | // is weaker, it can be moved above other loads. We just need to be sure that | ||||||
238 | // MayClobber isn't an acquire load, because loads can't be moved above | ||||||
239 | // acquire loads. | ||||||
240 | // | ||||||
241 | // Note that this explicitly *does* allow the free reordering of monotonic (or | ||||||
242 | // weaker) loads of the same address. | ||||||
243 | bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; | ||||||
244 | bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), | ||||||
245 | AtomicOrdering::Acquire); | ||||||
246 | return !(SeqCstUse || MayClobberIsAcquire); | ||||||
247 | } | ||||||
248 | |||||||
249 | namespace { | ||||||
250 | |||||||
251 | struct ClobberAlias { | ||||||
252 | bool IsClobber; | ||||||
253 | Optional<AliasResult> AR; | ||||||
254 | }; | ||||||
255 | |||||||
256 | } // end anonymous namespace | ||||||
257 | |||||||
258 | // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being | ||||||
259 | // ignored if IsClobber = false. | ||||||
260 | template <typename AliasAnalysisType> | ||||||
261 | static ClobberAlias | ||||||
262 | instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, | ||||||
263 | const Instruction *UseInst, AliasAnalysisType &AA) { | ||||||
264 | Instruction *DefInst = MD->getMemoryInst(); | ||||||
265 | assert(DefInst && "Defining instruction not actually an instruction")((DefInst && "Defining instruction not actually an instruction" ) ? static_cast<void> (0) : __assert_fail ("DefInst && \"Defining instruction not actually an instruction\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 265, __PRETTY_FUNCTION__)); | ||||||
266 | const auto *UseCall = dyn_cast<CallBase>(UseInst); | ||||||
267 | Optional<AliasResult> AR; | ||||||
268 | |||||||
269 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { | ||||||
270 | // These intrinsics will show up as affecting memory, but they are just | ||||||
271 | // markers, mostly. | ||||||
272 | // | ||||||
273 | // FIXME: We probably don't actually want MemorySSA to model these at all | ||||||
274 | // (including creating MemoryAccesses for them): we just end up inventing | ||||||
275 | // clobbers where they don't really exist at all. Please see D43269 for | ||||||
276 | // context. | ||||||
277 | switch (II->getIntrinsicID()) { | ||||||
278 | case Intrinsic::lifetime_start: | ||||||
279 | if (UseCall) | ||||||
280 | return {false, NoAlias}; | ||||||
281 | AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc); | ||||||
282 | return {AR != NoAlias, AR}; | ||||||
283 | case Intrinsic::lifetime_end: | ||||||
284 | case Intrinsic::invariant_start: | ||||||
285 | case Intrinsic::invariant_end: | ||||||
286 | case Intrinsic::assume: | ||||||
287 | return {false, NoAlias}; | ||||||
288 | case Intrinsic::dbg_addr: | ||||||
289 | case Intrinsic::dbg_declare: | ||||||
290 | case Intrinsic::dbg_label: | ||||||
291 | case Intrinsic::dbg_value: | ||||||
292 | llvm_unreachable("debuginfo shouldn't have associated defs!")::llvm::llvm_unreachable_internal("debuginfo shouldn't have associated defs!" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 292); | ||||||
293 | default: | ||||||
294 | break; | ||||||
295 | } | ||||||
296 | } | ||||||
297 | |||||||
298 | if (UseCall) { | ||||||
299 | ModRefInfo I = AA.getModRefInfo(DefInst, UseCall); | ||||||
300 | AR = isMustSet(I) ? MustAlias : MayAlias; | ||||||
301 | return {isModOrRefSet(I), AR}; | ||||||
302 | } | ||||||
303 | |||||||
304 | if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) | ||||||
305 | if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) | ||||||
306 | return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias}; | ||||||
307 | |||||||
308 | ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc); | ||||||
309 | AR = isMustSet(I) ? MustAlias : MayAlias; | ||||||
310 | return {isModSet(I), AR}; | ||||||
311 | } | ||||||
312 | |||||||
313 | template <typename AliasAnalysisType> | ||||||
314 | static ClobberAlias instructionClobbersQuery(MemoryDef *MD, | ||||||
315 | const MemoryUseOrDef *MU, | ||||||
316 | const MemoryLocOrCall &UseMLOC, | ||||||
317 | AliasAnalysisType &AA) { | ||||||
318 | // FIXME: This is a temporary hack to allow a single instructionClobbersQuery | ||||||
319 | // to exist while MemoryLocOrCall is pushed through places. | ||||||
320 | if (UseMLOC.IsCall) | ||||||
321 | return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), | ||||||
322 | AA); | ||||||
323 | return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), | ||||||
324 | AA); | ||||||
325 | } | ||||||
326 | |||||||
327 | // Return true when MD may alias MU, return false otherwise. | ||||||
328 | bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, | ||||||
329 | AliasAnalysis &AA) { | ||||||
330 | return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber; | ||||||
331 | } | ||||||
332 | |||||||
333 | namespace { | ||||||
334 | |||||||
335 | struct UpwardsMemoryQuery { | ||||||
336 | // True if our original query started off as a call | ||||||
337 | bool IsCall = false; | ||||||
338 | // The pointer location we started the query with. This will be empty if | ||||||
339 | // IsCall is true. | ||||||
340 | MemoryLocation StartingLoc; | ||||||
341 | // This is the instruction we were querying about. | ||||||
342 | const Instruction *Inst = nullptr; | ||||||
343 | // The MemoryAccess we actually got called with, used to test local domination | ||||||
344 | const MemoryAccess *OriginalAccess = nullptr; | ||||||
345 | Optional<AliasResult> AR = MayAlias; | ||||||
346 | bool SkipSelfAccess = false; | ||||||
347 | |||||||
348 | UpwardsMemoryQuery() = default; | ||||||
349 | |||||||
350 | UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) | ||||||
351 | : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) { | ||||||
352 | if (!IsCall) | ||||||
353 | StartingLoc = MemoryLocation::get(Inst); | ||||||
354 | } | ||||||
355 | }; | ||||||
356 | |||||||
357 | } // end anonymous namespace | ||||||
358 | |||||||
359 | static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, | ||||||
360 | BatchAAResults &AA) { | ||||||
361 | Instruction *Inst = MD->getMemoryInst(); | ||||||
362 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { | ||||||
363 | switch (II->getIntrinsicID()) { | ||||||
364 | case Intrinsic::lifetime_end: | ||||||
365 | return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias; | ||||||
366 | default: | ||||||
367 | return false; | ||||||
368 | } | ||||||
369 | } | ||||||
370 | return false; | ||||||
371 | } | ||||||
372 | |||||||
373 | template <typename AliasAnalysisType> | ||||||
374 | static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, | ||||||
375 | const Instruction *I) { | ||||||
376 | // If the memory can't be changed, then loads of the memory can't be | ||||||
377 | // clobbered. | ||||||
378 | return isa<LoadInst>(I) && (I->hasMetadata(LLVMContext::MD_invariant_load) || | ||||||
379 | AA.pointsToConstantMemory(MemoryLocation( | ||||||
380 | cast<LoadInst>(I)->getPointerOperand()))); | ||||||
381 | } | ||||||
382 | |||||||
383 | /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing | ||||||
384 | /// inbetween `Start` and `ClobberAt` can clobbers `Start`. | ||||||
385 | /// | ||||||
386 | /// This is meant to be as simple and self-contained as possible. Because it | ||||||
387 | /// uses no cache, etc., it can be relatively expensive. | ||||||
388 | /// | ||||||
389 | /// \param Start The MemoryAccess that we want to walk from. | ||||||
390 | /// \param ClobberAt A clobber for Start. | ||||||
391 | /// \param StartLoc The MemoryLocation for Start. | ||||||
392 | /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to. | ||||||
393 | /// \param Query The UpwardsMemoryQuery we used for our search. | ||||||
394 | /// \param AA The AliasAnalysis we used for our search. | ||||||
395 | /// \param AllowImpreciseClobber Always false, unless we do relaxed verify. | ||||||
396 | |||||||
397 | template <typename AliasAnalysisType> | ||||||
398 | LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) static void | ||||||
399 | checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, | ||||||
400 | const MemoryLocation &StartLoc, const MemorySSA &MSSA, | ||||||
401 | const UpwardsMemoryQuery &Query, AliasAnalysisType &AA, | ||||||
402 | bool AllowImpreciseClobber = false) { | ||||||
403 | assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?")((MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?" ) ? static_cast<void> (0) : __assert_fail ("MSSA.dominates(ClobberAt, Start) && \"Clobber doesn't dominate start?\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 403, __PRETTY_FUNCTION__)); | ||||||
404 | |||||||
405 | if (MSSA.isLiveOnEntryDef(Start)) { | ||||||
406 | assert(MSSA.isLiveOnEntryDef(ClobberAt) &&((MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself" ) ? static_cast<void> (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 407, __PRETTY_FUNCTION__)) | ||||||
407 | "liveOnEntry must clobber itself")((MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself" ) ? static_cast<void> (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 407, __PRETTY_FUNCTION__)); | ||||||
408 | return; | ||||||
409 | } | ||||||
410 | |||||||
411 | bool FoundClobber = false; | ||||||
412 | DenseSet<ConstMemoryAccessPair> VisitedPhis; | ||||||
413 | SmallVector<ConstMemoryAccessPair, 8> Worklist; | ||||||
414 | Worklist.emplace_back(Start, StartLoc); | ||||||
415 | // Walk all paths from Start to ClobberAt, while looking for clobbers. If one | ||||||
416 | // is found, complain. | ||||||
417 | while (!Worklist.empty()) { | ||||||
418 | auto MAP = Worklist.pop_back_val(); | ||||||
419 | // All we care about is that nothing from Start to ClobberAt clobbers Start. | ||||||
420 | // We learn nothing from revisiting nodes. | ||||||
421 | if (!VisitedPhis.insert(MAP).second) | ||||||
422 | continue; | ||||||
423 | |||||||
424 | for (const auto *MA : def_chain(MAP.first)) { | ||||||
425 | if (MA == ClobberAt) { | ||||||
426 | if (const auto *MD = dyn_cast<MemoryDef>(MA)) { | ||||||
427 | // instructionClobbersQuery isn't essentially free, so don't use `|=`, | ||||||
428 | // since it won't let us short-circuit. | ||||||
429 | // | ||||||
430 | // Also, note that this can't be hoisted out of the `Worklist` loop, | ||||||
431 | // since MD may only act as a clobber for 1 of N MemoryLocations. | ||||||
432 | FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD); | ||||||
433 | if (!FoundClobber) { | ||||||
434 | ClobberAlias CA = | ||||||
435 | instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); | ||||||
436 | if (CA.IsClobber) { | ||||||
437 | FoundClobber = true; | ||||||
438 | // Not used: CA.AR; | ||||||
439 | } | ||||||
440 | } | ||||||
441 | } | ||||||
442 | break; | ||||||
443 | } | ||||||
444 | |||||||
445 | // We should never hit liveOnEntry, unless it's the clobber. | ||||||
446 | assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?")((!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(MA) && \"Hit liveOnEntry before clobber?\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 446, __PRETTY_FUNCTION__)); | ||||||
447 | |||||||
448 | if (const auto *MD = dyn_cast<MemoryDef>(MA)) { | ||||||
449 | // If Start is a Def, skip self. | ||||||
450 | if (MD == Start) | ||||||
451 | continue; | ||||||
452 | |||||||
453 | assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) . IsClobber && "Found clobber before reaching ClobberAt!" ) ? static_cast<void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 455, __PRETTY_FUNCTION__)) | ||||||
454 | .IsClobber &&((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) . IsClobber && "Found clobber before reaching ClobberAt!" ) ? static_cast<void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 455, __PRETTY_FUNCTION__)) | ||||||
455 | "Found clobber before reaching ClobberAt!")((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) . IsClobber && "Found clobber before reaching ClobberAt!" ) ? static_cast<void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 455, __PRETTY_FUNCTION__)); | ||||||
456 | continue; | ||||||
457 | } | ||||||
458 | |||||||
459 | if (const auto *MU = dyn_cast<MemoryUse>(MA)) { | ||||||
460 | (void)MU; | ||||||
461 | assert (MU == Start &&((MU == Start && "Can only find use in def chain if Start is a use" ) ? static_cast<void> (0) : __assert_fail ("MU == Start && \"Can only find use in def chain if Start is a use\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 462, __PRETTY_FUNCTION__)) | ||||||
462 | "Can only find use in def chain if Start is a use")((MU == Start && "Can only find use in def chain if Start is a use" ) ? static_cast<void> (0) : __assert_fail ("MU == Start && \"Can only find use in def chain if Start is a use\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 462, __PRETTY_FUNCTION__)); | ||||||
463 | continue; | ||||||
464 | } | ||||||
465 | |||||||
466 | assert(isa<MemoryPhi>(MA))((isa<MemoryPhi>(MA)) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(MA)", "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 466, __PRETTY_FUNCTION__)); | ||||||
467 | Worklist.append( | ||||||
468 | upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}), | ||||||
469 | upward_defs_end()); | ||||||
470 | } | ||||||
471 | } | ||||||
472 | |||||||
473 | // If the verify is done following an optimization, it's possible that | ||||||
474 | // ClobberAt was a conservative clobbering, that we can now infer is not a | ||||||
475 | // true clobbering access. Don't fail the verify if that's the case. | ||||||
476 | // We do have accesses that claim they're optimized, but could be optimized | ||||||
477 | // further. Updating all these can be expensive, so allow it for now (FIXME). | ||||||
478 | if (AllowImpreciseClobber) | ||||||
479 | return; | ||||||
480 | |||||||
481 | // If ClobberAt is a MemoryPhi, we can assume something above it acted as a | ||||||
482 | // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. | ||||||
483 | assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&(((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber") ? static_cast<void> (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 484, __PRETTY_FUNCTION__)) | ||||||
484 | "ClobberAt never acted as a clobber")(((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber") ? static_cast<void> (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 484, __PRETTY_FUNCTION__)); | ||||||
485 | } | ||||||
486 | |||||||
487 | namespace { | ||||||
488 | |||||||
489 | /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up | ||||||
490 | /// in one class. | ||||||
491 | template <class AliasAnalysisType> class ClobberWalker { | ||||||
492 | /// Save a few bytes by using unsigned instead of size_t. | ||||||
493 | using ListIndex = unsigned; | ||||||
494 | |||||||
495 | /// Represents a span of contiguous MemoryDefs, potentially ending in a | ||||||
496 | /// MemoryPhi. | ||||||
497 | struct DefPath { | ||||||
498 | MemoryLocation Loc; | ||||||
499 | // Note that, because we always walk in reverse, Last will always dominate | ||||||
500 | // First. Also note that First and Last are inclusive. | ||||||
501 | MemoryAccess *First; | ||||||
502 | MemoryAccess *Last; | ||||||
503 | Optional<ListIndex> Previous; | ||||||
504 | |||||||
505 | DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, | ||||||
506 | Optional<ListIndex> Previous) | ||||||
507 | : Loc(Loc), First(First), Last(Last), Previous(Previous) {} | ||||||
508 | |||||||
509 | DefPath(const MemoryLocation &Loc, MemoryAccess *Init, | ||||||
510 | Optional<ListIndex> Previous) | ||||||
511 | : DefPath(Loc, Init, Init, Previous) {} | ||||||
512 | }; | ||||||
513 | |||||||
514 | const MemorySSA &MSSA; | ||||||
515 | AliasAnalysisType &AA; | ||||||
516 | DominatorTree &DT; | ||||||
517 | UpwardsMemoryQuery *Query; | ||||||
518 | unsigned *UpwardWalkLimit; | ||||||
519 | |||||||
520 | // Phi optimization bookkeeping | ||||||
521 | SmallVector<DefPath, 32> Paths; | ||||||
522 | DenseSet<ConstMemoryAccessPair> VisitedPhis; | ||||||
523 | |||||||
524 | /// Find the nearest def or phi that `From` can legally be optimized to. | ||||||
525 | const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { | ||||||
526 | assert(From->getNumOperands() && "Phi with no operands?")((From->getNumOperands() && "Phi with no operands?" ) ? static_cast<void> (0) : __assert_fail ("From->getNumOperands() && \"Phi with no operands?\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 526, __PRETTY_FUNCTION__)); | ||||||
527 | |||||||
528 | BasicBlock *BB = From->getBlock(); | ||||||
529 | MemoryAccess *Result = MSSA.getLiveOnEntryDef(); | ||||||
530 | DomTreeNode *Node = DT.getNode(BB); | ||||||
531 | while ((Node = Node->getIDom())) { | ||||||
532 | auto *Defs = MSSA.getBlockDefs(Node->getBlock()); | ||||||
533 | if (Defs) | ||||||
534 | return &*Defs->rbegin(); | ||||||
535 | } | ||||||
536 | return Result; | ||||||
537 | } | ||||||
538 | |||||||
539 | /// Result of calling walkToPhiOrClobber. | ||||||
540 | struct UpwardsWalkResult { | ||||||
541 | /// The "Result" of the walk. Either a clobber, the last thing we walked, or | ||||||
542 | /// both. Include alias info when clobber found. | ||||||
543 | MemoryAccess *Result; | ||||||
544 | bool IsKnownClobber; | ||||||
545 | Optional<AliasResult> AR; | ||||||
546 | }; | ||||||
547 | |||||||
548 | /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. | ||||||
549 | /// This will update Desc.Last as it walks. It will (optionally) also stop at | ||||||
550 | /// StopAt. | ||||||
551 | /// | ||||||
552 | /// This does not test for whether StopAt is a clobber | ||||||
553 | UpwardsWalkResult | ||||||
554 | walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr, | ||||||
555 | const MemoryAccess *SkipStopAt = nullptr) const { | ||||||
556 | assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world")((!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world" ) ? static_cast<void> (0) : __assert_fail ("!isa<MemoryUse>(Desc.Last) && \"Uses don't exist in my world\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 556, __PRETTY_FUNCTION__)); | ||||||
557 | assert(UpwardWalkLimit && "Need a valid walk limit")((UpwardWalkLimit && "Need a valid walk limit") ? static_cast <void> (0) : __assert_fail ("UpwardWalkLimit && \"Need a valid walk limit\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 557, __PRETTY_FUNCTION__)); | ||||||
558 | bool LimitAlreadyReached = false; | ||||||
559 | // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set | ||||||
560 | // it to 1. This will not do any alias() calls. It either returns in the | ||||||
561 | // first iteration in the loop below, or is set back to 0 if all def chains | ||||||
562 | // are free of MemoryDefs. | ||||||
563 | if (!*UpwardWalkLimit) { | ||||||
564 | *UpwardWalkLimit = 1; | ||||||
565 | LimitAlreadyReached = true; | ||||||
566 | } | ||||||
567 | |||||||
568 | for (MemoryAccess *Current : def_chain(Desc.Last)) { | ||||||
569 | Desc.Last = Current; | ||||||
570 | if (Current == StopAt || Current == SkipStopAt) | ||||||
571 | return {Current, false, MayAlias}; | ||||||
572 | |||||||
573 | if (auto *MD = dyn_cast<MemoryDef>(Current)) { | ||||||
574 | if (MSSA.isLiveOnEntryDef(MD)) | ||||||
575 | return {MD, true, MustAlias}; | ||||||
576 | |||||||
577 | if (!--*UpwardWalkLimit) | ||||||
578 | return {Current, true, MayAlias}; | ||||||
579 | |||||||
580 | ClobberAlias CA = | ||||||
581 | instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA); | ||||||
582 | if (CA.IsClobber) | ||||||
583 | return {MD, true, CA.AR}; | ||||||
584 | } | ||||||
585 | } | ||||||
586 | |||||||
587 | if (LimitAlreadyReached) | ||||||
588 | *UpwardWalkLimit = 0; | ||||||
589 | |||||||
590 | assert(isa<MemoryPhi>(Desc.Last) &&((isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?" ) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 591, __PRETTY_FUNCTION__)) | ||||||
591 | "Ended at a non-clobber that's not a phi?")((isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?" ) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 591, __PRETTY_FUNCTION__)); | ||||||
592 | return {Desc.Last, false, MayAlias}; | ||||||
593 | } | ||||||
594 | |||||||
595 | void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, | ||||||
596 | ListIndex PriorNode) { | ||||||
597 | auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}), | ||||||
598 | upward_defs_end()); | ||||||
599 | for (const MemoryAccessPair &P : UpwardDefs) { | ||||||
600 | PausedSearches.push_back(Paths.size()); | ||||||
601 | Paths.emplace_back(P.second, P.first, PriorNode); | ||||||
602 | } | ||||||
603 | } | ||||||
604 | |||||||
605 | /// Represents a search that terminated after finding a clobber. This clobber | ||||||
606 | /// may or may not be present in the path of defs from LastNode..SearchStart, | ||||||
607 | /// since it may have been retrieved from cache. | ||||||
608 | struct TerminatedPath { | ||||||
609 | MemoryAccess *Clobber; | ||||||
610 | ListIndex LastNode; | ||||||
611 | }; | ||||||
612 | |||||||
613 | /// Get an access that keeps us from optimizing to the given phi. | ||||||
614 | /// | ||||||
615 | /// PausedSearches is an array of indices into the Paths array. Its incoming | ||||||
616 | /// value is the indices of searches that stopped at the last phi optimization | ||||||
617 | /// target. It's left in an unspecified state. | ||||||
618 | /// | ||||||
619 | /// If this returns None, NewPaused is a vector of searches that terminated | ||||||
620 | /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. | ||||||
621 | Optional<TerminatedPath> | ||||||
622 | getBlockingAccess(const MemoryAccess *StopWhere, | ||||||
623 | SmallVectorImpl<ListIndex> &PausedSearches, | ||||||
624 | SmallVectorImpl<ListIndex> &NewPaused, | ||||||
625 | SmallVectorImpl<TerminatedPath> &Terminated) { | ||||||
626 | assert(!PausedSearches.empty() && "No searches to continue?")((!PausedSearches.empty() && "No searches to continue?" ) ? static_cast<void> (0) : __assert_fail ("!PausedSearches.empty() && \"No searches to continue?\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 626, __PRETTY_FUNCTION__)); | ||||||
627 | |||||||
628 | // BFS vs DFS really doesn't make a difference here, so just do a DFS with | ||||||
629 | // PausedSearches as our stack. | ||||||
630 | while (!PausedSearches.empty()) { | ||||||
631 | ListIndex PathIndex = PausedSearches.pop_back_val(); | ||||||
632 | DefPath &Node = Paths[PathIndex]; | ||||||
633 | |||||||
634 | // If we've already visited this path with this MemoryLocation, we don't | ||||||
635 | // need to do so again. | ||||||
636 | // | ||||||
637 | // NOTE: That we just drop these paths on the ground makes caching | ||||||
638 | // behavior sporadic. e.g. given a diamond: | ||||||
639 | // A | ||||||
640 | // B C | ||||||
641 | // D | ||||||
642 | // | ||||||
643 | // ...If we walk D, B, A, C, we'll only cache the result of phi | ||||||
644 | // optimization for A, B, and D; C will be skipped because it dies here. | ||||||
645 | // This arguably isn't the worst thing ever, since: | ||||||
646 | // - We generally query things in a top-down order, so if we got below D | ||||||
647 | // without needing cache entries for {C, MemLoc}, then chances are | ||||||
648 | // that those cache entries would end up ultimately unused. | ||||||
649 | // - We still cache things for A, so C only needs to walk up a bit. | ||||||
650 | // If this behavior becomes problematic, we can fix without a ton of extra | ||||||
651 | // work. | ||||||
652 | if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) | ||||||
653 | continue; | ||||||
654 | |||||||
655 | const MemoryAccess *SkipStopWhere = nullptr; | ||||||
656 | if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) { | ||||||
657 | assert(isa<MemoryDef>(Query->OriginalAccess))((isa<MemoryDef>(Query->OriginalAccess)) ? static_cast <void> (0) : __assert_fail ("isa<MemoryDef>(Query->OriginalAccess)" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 657, __PRETTY_FUNCTION__)); | ||||||
658 | SkipStopWhere = Query->OriginalAccess; | ||||||
659 | } | ||||||
660 | |||||||
661 | UpwardsWalkResult Res = walkToPhiOrClobber(Node, | ||||||
662 | /*StopAt=*/StopWhere, | ||||||
663 | /*SkipStopAt=*/SkipStopWhere); | ||||||
664 | if (Res.IsKnownClobber
| ||||||
665 | assert(Res.Result != StopWhere && Res.Result != SkipStopWhere)((Res.Result != StopWhere && Res.Result != SkipStopWhere ) ? static_cast<void> (0) : __assert_fail ("Res.Result != StopWhere && Res.Result != SkipStopWhere" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 665, __PRETTY_FUNCTION__)); | ||||||
666 | |||||||
667 | // If this wasn't a cache hit, we hit a clobber when walking. That's a | ||||||
668 | // failure. | ||||||
669 | TerminatedPath Term{Res.Result, PathIndex}; | ||||||
670 | if (!MSSA.dominates(Res.Result, StopWhere)) | ||||||
671 | return Term; | ||||||
672 | |||||||
673 | // Otherwise, it's a valid thing to potentially optimize to. | ||||||
674 | Terminated.push_back(Term); | ||||||
675 | continue; | ||||||
676 | } | ||||||
677 | |||||||
678 | if (Res.Result == StopWhere || Res.Result == SkipStopWhere) { | ||||||
679 | // We've hit our target. Save this path off for if we want to continue | ||||||
680 | // walking. If we are in the mode of skipping the OriginalAccess, and | ||||||
681 | // we've reached back to the OriginalAccess, do not save path, we've | ||||||
682 | // just looped back to self. | ||||||
683 | if (Res.Result != SkipStopWhere) | ||||||
684 | NewPaused.push_back(PathIndex); | ||||||
685 | continue; | ||||||
686 | } | ||||||
687 | |||||||
688 | assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber")((!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Res.Result) && \"liveOnEntry is a clobber\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 688, __PRETTY_FUNCTION__)); | ||||||
689 | addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); | ||||||
690 | } | ||||||
691 | |||||||
692 | return None; | ||||||
693 | } | ||||||
694 | |||||||
695 | template <typename T, typename Walker> | ||||||
696 | struct generic_def_path_iterator | ||||||
697 | : public iterator_facade_base<generic_def_path_iterator<T, Walker>, | ||||||
698 | std::forward_iterator_tag, T *> { | ||||||
699 | generic_def_path_iterator() {} | ||||||
700 | generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} | ||||||
701 | |||||||
702 | T &operator*() const { return curNode(); } | ||||||
703 | |||||||
704 | generic_def_path_iterator &operator++() { | ||||||
705 | N = curNode().Previous; | ||||||
706 | return *this; | ||||||
707 | } | ||||||
708 | |||||||
709 | bool operator==(const generic_def_path_iterator &O) const { | ||||||
710 | if (N.hasValue() != O.N.hasValue()) | ||||||
711 | return false; | ||||||
712 | return !N.hasValue() || *N == *O.N; | ||||||
713 | } | ||||||
714 | |||||||
715 | private: | ||||||
716 | T &curNode() const { return W->Paths[*N]; } | ||||||
717 | |||||||
718 | Walker *W = nullptr; | ||||||
719 | Optional<ListIndex> N = None; | ||||||
720 | }; | ||||||
721 | |||||||
722 | using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; | ||||||
723 | using const_def_path_iterator = | ||||||
724 | generic_def_path_iterator<const DefPath, const ClobberWalker>; | ||||||
725 | |||||||
726 | iterator_range<def_path_iterator> def_path(ListIndex From) { | ||||||
727 | return make_range(def_path_iterator(this, From), def_path_iterator()); | ||||||
728 | } | ||||||
729 | |||||||
730 | iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { | ||||||
731 | return make_range(const_def_path_iterator(this, From), | ||||||
732 | const_def_path_iterator()); | ||||||
733 | } | ||||||
734 | |||||||
735 | struct OptznResult { | ||||||
736 | /// The path that contains our result. | ||||||
737 | TerminatedPath PrimaryClobber; | ||||||
738 | /// The paths that we can legally cache back from, but that aren't | ||||||
739 | /// necessarily the result of the Phi optimization. | ||||||
740 | SmallVector<TerminatedPath, 4> OtherClobbers; | ||||||
741 | }; | ||||||
742 | |||||||
743 | ListIndex defPathIndex(const DefPath &N) const { | ||||||
744 | // The assert looks nicer if we don't need to do &N | ||||||
745 | const DefPath *NP = &N; | ||||||
746 | assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&((!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!" ) ? static_cast<void> (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 747, __PRETTY_FUNCTION__)) | ||||||
747 | "Out of bounds DefPath!")((!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!" ) ? static_cast<void> (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 747, __PRETTY_FUNCTION__)); | ||||||
748 | return NP - &Paths.front(); | ||||||
749 | } | ||||||
750 | |||||||
751 | /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths | ||||||
752 | /// that act as legal clobbers. Note that this won't return *all* clobbers. | ||||||
753 | /// | ||||||
754 | /// Phi optimization algorithm tl;dr: | ||||||
755 | /// - Find the earliest def/phi, A, we can optimize to | ||||||
756 | /// - Find if all paths from the starting memory access ultimately reach A | ||||||
757 | /// - If not, optimization isn't possible. | ||||||
758 | /// - Otherwise, walk from A to another clobber or phi, A'. | ||||||
759 | /// - If A' is a def, we're done. | ||||||
760 | /// - If A' is a phi, try to optimize it. | ||||||
761 | /// | ||||||
762 | /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path | ||||||
763 | /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. | ||||||
764 | OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, | ||||||
765 | const MemoryLocation &Loc) { | ||||||
766 | assert(Paths.empty() && VisitedPhis.empty() &&((Paths.empty() && VisitedPhis.empty() && "Reset the optimization state." ) ? static_cast<void> (0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 767, __PRETTY_FUNCTION__)) | ||||||
767 | "Reset the optimization state.")((Paths.empty() && VisitedPhis.empty() && "Reset the optimization state." ) ? static_cast<void> (0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 767, __PRETTY_FUNCTION__)); | ||||||
768 | |||||||
769 | Paths.emplace_back(Loc, Start, Phi, None); | ||||||
770 | // Stores how many "valid" optimization nodes we had prior to calling | ||||||
771 | // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. | ||||||
772 | auto PriorPathsSize = Paths.size(); | ||||||
773 | |||||||
774 | SmallVector<ListIndex, 16> PausedSearches; | ||||||
775 | SmallVector<ListIndex, 8> NewPaused; | ||||||
776 | SmallVector<TerminatedPath, 4> TerminatedPaths; | ||||||
777 | |||||||
778 | addSearches(Phi, PausedSearches, 0); | ||||||
779 | |||||||
780 | // Moves the TerminatedPath with the "most dominated" Clobber to the end of | ||||||
781 | // Paths. | ||||||
782 | auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { | ||||||
783 | assert(!Paths.empty() && "Need a path to move")((!Paths.empty() && "Need a path to move") ? static_cast <void> (0) : __assert_fail ("!Paths.empty() && \"Need a path to move\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 783, __PRETTY_FUNCTION__)); | ||||||
784 | auto Dom = Paths.begin(); | ||||||
785 | for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) | ||||||
786 | if (!MSSA.dominates(I->Clobber, Dom->Clobber)) | ||||||
787 | Dom = I; | ||||||
788 | auto Last = Paths.end() - 1; | ||||||
789 | if (Last != Dom) | ||||||
790 | std::iter_swap(Last, Dom); | ||||||
791 | }; | ||||||
792 | |||||||
793 | MemoryPhi *Current = Phi; | ||||||
794 | while (true) { | ||||||
795 | assert(!MSSA.isLiveOnEntryDef(Current) &&((!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 796, __PRETTY_FUNCTION__)) | ||||||
796 | "liveOnEntry wasn't treated as a clobber?")((!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 796, __PRETTY_FUNCTION__)); | ||||||
797 | |||||||
798 | const auto *Target = getWalkTarget(Current); | ||||||
799 | // If a TerminatedPath doesn't dominate Target, then it wasn't a legal | ||||||
800 | // optimization for the prior phi. | ||||||
801 | assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {((all_of(TerminatedPaths, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast <void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 803, __PRETTY_FUNCTION__)) | ||||||
802 | return MSSA.dominates(P.Clobber, Target);((all_of(TerminatedPaths, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast <void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 803, __PRETTY_FUNCTION__)) | ||||||
803 | }))((all_of(TerminatedPaths, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast <void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 803, __PRETTY_FUNCTION__)); | ||||||
804 | |||||||
805 | // FIXME: This is broken, because the Blocker may be reported to be | ||||||
806 | // liveOnEntry, and we'll happily wait for that to disappear (read: never) | ||||||
807 | // For the moment, this is fine, since we do nothing with blocker info. | ||||||
808 | if (Optional<TerminatedPath> Blocker = getBlockingAccess( | ||||||
809 | Target, PausedSearches, NewPaused, TerminatedPaths)) { | ||||||
810 | |||||||
811 | // Find the node we started at. We can't search based on N->Last, since | ||||||
812 | // we may have gone around a loop with a different MemoryLocation. | ||||||
813 | auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { | ||||||
814 | return defPathIndex(N) < PriorPathsSize; | ||||||
815 | }); | ||||||
816 | assert(Iter != def_path_iterator())((Iter != def_path_iterator()) ? static_cast<void> (0) : __assert_fail ("Iter != def_path_iterator()", "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 816, __PRETTY_FUNCTION__)); | ||||||
817 | |||||||
818 | DefPath &CurNode = *Iter; | ||||||
819 | assert(CurNode.Last == Current)((CurNode.Last == Current) ? static_cast<void> (0) : __assert_fail ("CurNode.Last == Current", "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 819, __PRETTY_FUNCTION__)); | ||||||
820 | |||||||
821 | // Two things: | ||||||
822 | // A. We can't reliably cache all of NewPaused back. Consider a case | ||||||
823 | // where we have two paths in NewPaused; one of which can't optimize | ||||||
824 | // above this phi, whereas the other can. If we cache the second path | ||||||
825 | // back, we'll end up with suboptimal cache entries. We can handle | ||||||
826 | // cases like this a bit better when we either try to find all | ||||||
827 | // clobbers that block phi optimization, or when our cache starts | ||||||
828 | // supporting unfinished searches. | ||||||
829 | // B. We can't reliably cache TerminatedPaths back here without doing | ||||||
830 | // extra checks; consider a case like: | ||||||
831 | // T | ||||||
832 | // / \ | ||||||
833 | // D C | ||||||
834 | // \ / | ||||||
835 | // S | ||||||
836 | // Where T is our target, C is a node with a clobber on it, D is a | ||||||
837 | // diamond (with a clobber *only* on the left or right node, N), and | ||||||
838 | // S is our start. Say we walk to D, through the node opposite N | ||||||
839 | // (read: ignoring the clobber), and see a cache entry in the top | ||||||
840 | // node of D. That cache entry gets put into TerminatedPaths. We then | ||||||
841 | // walk up to C (N is later in our worklist), find the clobber, and | ||||||
842 | // quit. If we append TerminatedPaths to OtherClobbers, we'll cache | ||||||
843 | // the bottom part of D to the cached clobber, ignoring the clobber | ||||||
844 | // in N. Again, this problem goes away if we start tracking all | ||||||
845 | // blockers for a given phi optimization. | ||||||
846 | TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; | ||||||
847 | return {Result, {}}; | ||||||
848 | } | ||||||
849 | |||||||
850 | // If there's nothing left to search, then all paths led to valid clobbers | ||||||
851 | // that we got from our cache; pick the nearest to the start, and allow | ||||||
852 | // the rest to be cached back. | ||||||
853 | if (NewPaused.empty()) { | ||||||
854 | MoveDominatedPathToEnd(TerminatedPaths); | ||||||
855 | TerminatedPath Result = TerminatedPaths.pop_back_val(); | ||||||
856 | return {Result, std::move(TerminatedPaths)}; | ||||||
857 | } | ||||||
858 | |||||||
859 | MemoryAccess *DefChainEnd = nullptr; | ||||||
860 | SmallVector<TerminatedPath, 4> Clobbers; | ||||||
861 | for (ListIndex Paused : NewPaused) { | ||||||
862 | UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); | ||||||
863 | if (WR.IsKnownClobber) | ||||||
864 | Clobbers.push_back({WR.Result, Paused}); | ||||||
865 | else | ||||||
866 | // Micro-opt: If we hit the end of the chain, save it. | ||||||
867 | DefChainEnd = WR.Result; | ||||||
868 | } | ||||||
869 | |||||||
870 | if (!TerminatedPaths.empty()) { | ||||||
871 | // If we couldn't find the dominating phi/liveOnEntry in the above loop, | ||||||
872 | // do it now. | ||||||
873 | if (!DefChainEnd
| ||||||
874 | for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) | ||||||
875 | DefChainEnd = MA; | ||||||
876 | |||||||
877 | // If any of the terminated paths don't dominate the phi we'll try to | ||||||
878 | // optimize, we need to figure out what they are and quit. | ||||||
879 | const BasicBlock *ChainBB = DefChainEnd->getBlock(); | ||||||
| |||||||
880 | for (const TerminatedPath &TP : TerminatedPaths) { | ||||||
881 | // Because we know that DefChainEnd is as "high" as we can go, we | ||||||
882 | // don't need local dominance checks; BB dominance is sufficient. | ||||||
883 | if (DT.dominates(ChainBB, TP.Clobber->getBlock())) | ||||||
884 | Clobbers.push_back(TP); | ||||||
885 | } | ||||||
886 | } | ||||||
887 | |||||||
888 | // If we have clobbers in the def chain, find the one closest to Current | ||||||
889 | // and quit. | ||||||
890 | if (!Clobbers.empty()) { | ||||||
891 | MoveDominatedPathToEnd(Clobbers); | ||||||
892 | TerminatedPath Result = Clobbers.pop_back_val(); | ||||||
893 | return {Result, std::move(Clobbers)}; | ||||||
894 | } | ||||||
895 | |||||||
896 | assert(all_of(NewPaused,((all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? static_cast<void> (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 897, __PRETTY_FUNCTION__)) | ||||||
897 | [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }))((all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? static_cast<void> (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 897, __PRETTY_FUNCTION__)); | ||||||
898 | |||||||
899 | // Because liveOnEntry is a clobber, this must be a phi. | ||||||
900 | auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); | ||||||
901 | |||||||
902 | PriorPathsSize = Paths.size(); | ||||||
903 | PausedSearches.clear(); | ||||||
904 | for (ListIndex I : NewPaused) | ||||||
905 | addSearches(DefChainPhi, PausedSearches, I); | ||||||
906 | NewPaused.clear(); | ||||||
907 | |||||||
908 | Current = DefChainPhi; | ||||||
909 | } | ||||||
910 | } | ||||||
911 | |||||||
912 | void verifyOptResult(const OptznResult &R) const { | ||||||
913 | assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {((all_of(R.OtherClobbers, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber ); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 915, __PRETTY_FUNCTION__)) | ||||||
914 | return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);((all_of(R.OtherClobbers, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber ); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 915, __PRETTY_FUNCTION__)) | ||||||
915 | }))((all_of(R.OtherClobbers, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber ); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 915, __PRETTY_FUNCTION__)); | ||||||
916 | } | ||||||
917 | |||||||
918 | void resetPhiOptznState() { | ||||||
919 | Paths.clear(); | ||||||
920 | VisitedPhis.clear(); | ||||||
921 | } | ||||||
922 | |||||||
923 | public: | ||||||
924 | ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT) | ||||||
925 | : MSSA(MSSA), AA(AA), DT(DT) {} | ||||||
926 | |||||||
927 | AliasAnalysisType *getAA() { return &AA; } | ||||||
928 | /// Finds the nearest clobber for the given query, optimizing phis if | ||||||
929 | /// possible. | ||||||
930 | MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q, | ||||||
931 | unsigned &UpWalkLimit) { | ||||||
932 | Query = &Q; | ||||||
933 | UpwardWalkLimit = &UpWalkLimit; | ||||||
934 | // Starting limit must be > 0. | ||||||
935 | if (!UpWalkLimit) | ||||||
936 | UpWalkLimit++; | ||||||
937 | |||||||
938 | MemoryAccess *Current = Start; | ||||||
939 | // This walker pretends uses don't exist. If we're handed one, silently grab | ||||||
940 | // its def. (This has the nice side-effect of ensuring we never cache uses) | ||||||
941 | if (auto *MU
| ||||||
942 | Current = MU->getDefiningAccess(); | ||||||
943 | |||||||
944 | DefPath FirstDesc(Q.StartingLoc, Current, Current, None); | ||||||
945 | // Fast path for the overly-common case (no crazy phi optimization | ||||||
946 | // necessary) | ||||||
947 | UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); | ||||||
948 | MemoryAccess *Result; | ||||||
949 | if (WalkResult.IsKnownClobber
| ||||||
950 | Result = WalkResult.Result; | ||||||
951 | Q.AR = WalkResult.AR; | ||||||
952 | } else { | ||||||
953 | OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), | ||||||
954 | Current, Q.StartingLoc); | ||||||
955 | verifyOptResult(OptRes); | ||||||
956 | resetPhiOptznState(); | ||||||
957 | Result = OptRes.PrimaryClobber.Clobber; | ||||||
958 | } | ||||||
959 | |||||||
960 | #ifdef EXPENSIVE_CHECKS | ||||||
961 | if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0) | ||||||
962 | checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); | ||||||
963 | #endif | ||||||
964 | return Result; | ||||||
965 | } | ||||||
966 | }; | ||||||
967 | |||||||
968 | struct RenamePassData { | ||||||
969 | DomTreeNode *DTN; | ||||||
970 | DomTreeNode::const_iterator ChildIt; | ||||||
971 | MemoryAccess *IncomingVal; | ||||||
972 | |||||||
973 | RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, | ||||||
974 | MemoryAccess *M) | ||||||
975 | : DTN(D), ChildIt(It), IncomingVal(M) {} | ||||||
976 | |||||||
977 | void swap(RenamePassData &RHS) { | ||||||
978 | std::swap(DTN, RHS.DTN); | ||||||
979 | std::swap(ChildIt, RHS.ChildIt); | ||||||
980 | std::swap(IncomingVal, RHS.IncomingVal); | ||||||
981 | } | ||||||
982 | }; | ||||||
983 | |||||||
984 | } // end anonymous namespace | ||||||
985 | |||||||
986 | namespace llvm { | ||||||
987 | |||||||
988 | template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase { | ||||||
989 | ClobberWalker<AliasAnalysisType> Walker; | ||||||
990 | MemorySSA *MSSA; | ||||||
991 | |||||||
992 | public: | ||||||
993 | ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D) | ||||||
994 | : Walker(*M, *A, *D), MSSA(M) {} | ||||||
995 | |||||||
996 | MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, | ||||||
997 | const MemoryLocation &, | ||||||
998 | unsigned &); | ||||||
999 | // Third argument (bool), defines whether the clobber search should skip the | ||||||
1000 | // original queried access. If true, there will be a follow-up query searching | ||||||
1001 | // for a clobber access past "self". Note that the Optimized access is not | ||||||
1002 | // updated if a new clobber is found by this SkipSelf search. If this | ||||||
1003 | // additional query becomes heavily used we may decide to cache the result. | ||||||
1004 | // Walker instantiations will decide how to set the SkipSelf bool. | ||||||
1005 | MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool); | ||||||
1006 | }; | ||||||
1007 | |||||||
1008 | /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no | ||||||
1009 | /// longer does caching on its own, but the name has been retained for the | ||||||
1010 | /// moment. | ||||||
1011 | template <class AliasAnalysisType> | ||||||
1012 | class MemorySSA::CachingWalker final : public MemorySSAWalker { | ||||||
1013 | ClobberWalkerBase<AliasAnalysisType> *Walker; | ||||||
1014 | |||||||
1015 | public: | ||||||
1016 | CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) | ||||||
1017 | : MemorySSAWalker(M), Walker(W) {} | ||||||
1018 | ~CachingWalker() override = default; | ||||||
1019 | |||||||
1020 | using MemorySSAWalker::getClobberingMemoryAccess; | ||||||
1021 | |||||||
1022 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { | ||||||
1023 | return Walker->getClobberingMemoryAccessBase(MA, UWL, false); | ||||||
1024 | } | ||||||
1025 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||||
1026 | const MemoryLocation &Loc, | ||||||
1027 | unsigned &UWL) { | ||||||
1028 | return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); | ||||||
1029 | } | ||||||
1030 | |||||||
1031 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { | ||||||
1032 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||||
1033 | return getClobberingMemoryAccess(MA, UpwardWalkLimit); | ||||||
1034 | } | ||||||
1035 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||||
1036 | const MemoryLocation &Loc) override { | ||||||
1037 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||||
1038 | return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); | ||||||
1039 | } | ||||||
1040 | |||||||
1041 | void invalidateInfo(MemoryAccess *MA) override { | ||||||
1042 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||||
1043 | MUD->resetOptimized(); | ||||||
1044 | } | ||||||
1045 | }; | ||||||
1046 | |||||||
1047 | template <class AliasAnalysisType> | ||||||
1048 | class MemorySSA::SkipSelfWalker final : public MemorySSAWalker { | ||||||
1049 | ClobberWalkerBase<AliasAnalysisType> *Walker; | ||||||
1050 | |||||||
1051 | public: | ||||||
1052 | SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) | ||||||
1053 | : MemorySSAWalker(M), Walker(W) {} | ||||||
1054 | ~SkipSelfWalker() override = default; | ||||||
1055 | |||||||
1056 | using MemorySSAWalker::getClobberingMemoryAccess; | ||||||
1057 | |||||||
1058 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { | ||||||
1059 | return Walker->getClobberingMemoryAccessBase(MA, UWL, true); | ||||||
1060 | } | ||||||
1061 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||||
1062 | const MemoryLocation &Loc, | ||||||
1063 | unsigned &UWL) { | ||||||
1064 | return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); | ||||||
1065 | } | ||||||
1066 | |||||||
1067 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { | ||||||
1068 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||||
1069 | return getClobberingMemoryAccess(MA, UpwardWalkLimit); | ||||||
1070 | } | ||||||
1071 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||||
1072 | const MemoryLocation &Loc) override { | ||||||
1073 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||||
1074 | return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); | ||||||
| |||||||
1075 | } | ||||||
1076 | |||||||
1077 | void invalidateInfo(MemoryAccess *MA) override { | ||||||
1078 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||||
1079 | MUD->resetOptimized(); | ||||||
1080 | } | ||||||
1081 | }; | ||||||
1082 | |||||||
1083 | } // end namespace llvm | ||||||
1084 | |||||||
1085 | void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, | ||||||
1086 | bool RenameAllUses) { | ||||||
1087 | // Pass through values to our successors | ||||||
1088 | for (const BasicBlock *S : successors(BB)) { | ||||||
1089 | auto It = PerBlockAccesses.find(S); | ||||||
1090 | // Rename the phi nodes in our successor block | ||||||
1091 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | ||||||
1092 | continue; | ||||||
1093 | AccessList *Accesses = It->second.get(); | ||||||
1094 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | ||||||
1095 | if (RenameAllUses) { | ||||||
1096 | bool ReplacementDone = false; | ||||||
1097 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) | ||||||
1098 | if (Phi->getIncomingBlock(I) == BB) { | ||||||
1099 | Phi->setIncomingValue(I, IncomingVal); | ||||||
1100 | ReplacementDone = true; | ||||||
1101 | } | ||||||
1102 | (void) ReplacementDone; | ||||||
1103 | assert(ReplacementDone && "Incomplete phi during partial rename")((ReplacementDone && "Incomplete phi during partial rename" ) ? static_cast<void> (0) : __assert_fail ("ReplacementDone && \"Incomplete phi during partial rename\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1103, __PRETTY_FUNCTION__)); | ||||||
1104 | } else | ||||||
1105 | Phi->addIncoming(IncomingVal, BB); | ||||||
1106 | } | ||||||
1107 | } | ||||||
1108 | |||||||
1109 | /// Rename a single basic block into MemorySSA form. | ||||||
1110 | /// Uses the standard SSA renaming algorithm. | ||||||
1111 | /// \returns The new incoming value. | ||||||
1112 | MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, | ||||||
1113 | bool RenameAllUses) { | ||||||
1114 | auto It = PerBlockAccesses.find(BB); | ||||||
1115 | // Skip most processing if the list is empty. | ||||||
1116 | if (It != PerBlockAccesses.end()) { | ||||||
1117 | AccessList *Accesses = It->second.get(); | ||||||
1118 | for (MemoryAccess &L : *Accesses) { | ||||||
1119 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { | ||||||
1120 | if (MUD->getDefiningAccess() == nullptr || RenameAllUses) | ||||||
1121 | MUD->setDefiningAccess(IncomingVal); | ||||||
1122 | if (isa<MemoryDef>(&L)) | ||||||
1123 | IncomingVal = &L; | ||||||
1124 | } else { | ||||||
1125 | IncomingVal = &L; | ||||||
1126 | } | ||||||
1127 | } | ||||||
1128 | } | ||||||
1129 | return IncomingVal; | ||||||
1130 | } | ||||||
1131 | |||||||
1132 | /// This is the standard SSA renaming algorithm. | ||||||
1133 | /// | ||||||
1134 | /// We walk the dominator tree in preorder, renaming accesses, and then filling | ||||||
1135 | /// in phi nodes in our successors. | ||||||
1136 | void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, | ||||||
1137 | SmallPtrSetImpl<BasicBlock *> &Visited, | ||||||
1138 | bool SkipVisited, bool RenameAllUses) { | ||||||
1139 | assert(Root && "Trying to rename accesses in an unreachable block")((Root && "Trying to rename accesses in an unreachable block" ) ? static_cast<void> (0) : __assert_fail ("Root && \"Trying to rename accesses in an unreachable block\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1139, __PRETTY_FUNCTION__)); | ||||||
1140 | |||||||
1141 | SmallVector<RenamePassData, 32> WorkStack; | ||||||
1142 | // Skip everything if we already renamed this block and we are skipping. | ||||||
1143 | // Note: You can't sink this into the if, because we need it to occur | ||||||
1144 | // regardless of whether we skip blocks or not. | ||||||
1145 | bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; | ||||||
1146 | if (SkipVisited && AlreadyVisited) | ||||||
1147 | return; | ||||||
1148 | |||||||
1149 | IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); | ||||||
1150 | renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); | ||||||
1151 | WorkStack.push_back({Root, Root->begin(), IncomingVal}); | ||||||
1152 | |||||||
1153 | while (!WorkStack.empty()) { | ||||||
1154 | DomTreeNode *Node = WorkStack.back().DTN; | ||||||
1155 | DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; | ||||||
1156 | IncomingVal = WorkStack.back().IncomingVal; | ||||||
1157 | |||||||
1158 | if (ChildIt == Node->end()) { | ||||||
1159 | WorkStack.pop_back(); | ||||||
1160 | } else { | ||||||
1161 | DomTreeNode *Child = *ChildIt; | ||||||
1162 | ++WorkStack.back().ChildIt; | ||||||
1163 | BasicBlock *BB = Child->getBlock(); | ||||||
1164 | // Note: You can't sink this into the if, because we need it to occur | ||||||
1165 | // regardless of whether we skip blocks or not. | ||||||
1166 | AlreadyVisited = !Visited.insert(BB).second; | ||||||
1167 | if (SkipVisited && AlreadyVisited) { | ||||||
1168 | // We already visited this during our renaming, which can happen when | ||||||
1169 | // being asked to rename multiple blocks. Figure out the incoming val, | ||||||
1170 | // which is the last def. | ||||||
1171 | // Incoming value can only change if there is a block def, and in that | ||||||
1172 | // case, it's the last block def in the list. | ||||||
1173 | if (auto *BlockDefs = getWritableBlockDefs(BB)) | ||||||
1174 | IncomingVal = &*BlockDefs->rbegin(); | ||||||
1175 | } else | ||||||
1176 | IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); | ||||||
1177 | renameSuccessorPhis(BB, IncomingVal, RenameAllUses); | ||||||
1178 | WorkStack.push_back({Child, Child->begin(), IncomingVal}); | ||||||
1179 | } | ||||||
1180 | } | ||||||
1181 | } | ||||||
1182 | |||||||
1183 | /// This handles unreachable block accesses by deleting phi nodes in | ||||||
1184 | /// unreachable blocks, and marking all other unreachable MemoryAccess's as | ||||||
1185 | /// being uses of the live on entry definition. | ||||||
1186 | void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { | ||||||
1187 | assert(!DT->isReachableFromEntry(BB) &&((!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks" ) ? static_cast<void> (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1188, __PRETTY_FUNCTION__)) | ||||||
1188 | "Reachable block found while handling unreachable blocks")((!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks" ) ? static_cast<void> (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1188, __PRETTY_FUNCTION__)); | ||||||
1189 | |||||||
1190 | // Make sure phi nodes in our reachable successors end up with a | ||||||
1191 | // LiveOnEntryDef for our incoming edge, even though our block is forward | ||||||
1192 | // unreachable. We could just disconnect these blocks from the CFG fully, | ||||||
1193 | // but we do not right now. | ||||||
1194 | for (const BasicBlock *S : successors(BB)) { | ||||||
1195 | if (!DT->isReachableFromEntry(S)) | ||||||
1196 | continue; | ||||||
1197 | auto It = PerBlockAccesses.find(S); | ||||||
1198 | // Rename the phi nodes in our successor block | ||||||
1199 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | ||||||
1200 | continue; | ||||||
1201 | AccessList *Accesses = It->second.get(); | ||||||
1202 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | ||||||
1203 | Phi->addIncoming(LiveOnEntryDef.get(), BB); | ||||||
1204 | } | ||||||
1205 | |||||||
1206 | auto It = PerBlockAccesses.find(BB); | ||||||
1207 | if (It == PerBlockAccesses.end()) | ||||||
1208 | return; | ||||||
1209 | |||||||
1210 | auto &Accesses = It->second; | ||||||
1211 | for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { | ||||||
1212 | auto Next = std::next(AI); | ||||||
1213 | // If we have a phi, just remove it. We are going to replace all | ||||||
1214 | // users with live on entry. | ||||||
1215 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) | ||||||
1216 | UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); | ||||||
1217 | else | ||||||
1218 | Accesses->erase(AI); | ||||||
1219 | AI = Next; | ||||||
1220 | } | ||||||
1221 | } | ||||||
1222 | |||||||
1223 | MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) | ||||||
1224 | : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), | ||||||
1225 | SkipWalker(nullptr), NextID(0) { | ||||||
1226 | // Build MemorySSA using a batch alias analysis. This reuses the internal | ||||||
1227 | // state that AA collects during an alias()/getModRefInfo() call. This is | ||||||
1228 | // safe because there are no CFG changes while building MemorySSA and can | ||||||
1229 | // significantly reduce the time spent by the compiler in AA, because we will | ||||||
1230 | // make queries about all the instructions in the Function. | ||||||
1231 | BatchAAResults BatchAA(*AA); | ||||||
1232 | buildMemorySSA(BatchAA); | ||||||
1233 | // Intentionally leave AA to nullptr while building so we don't accidently | ||||||
1234 | // use non-batch AliasAnalysis. | ||||||
1235 | this->AA = AA; | ||||||
1236 | // Also create the walker here. | ||||||
1237 | getWalker(); | ||||||
1238 | } | ||||||
1239 | |||||||
1240 | MemorySSA::~MemorySSA() { | ||||||
1241 | // Drop all our references | ||||||
1242 | for (const auto &Pair : PerBlockAccesses) | ||||||
1243 | for (MemoryAccess &MA : *Pair.second) | ||||||
1244 | MA.dropAllReferences(); | ||||||
1245 | } | ||||||
1246 | |||||||
1247 | MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { | ||||||
1248 | auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); | ||||||
1249 | |||||||
1250 | if (Res.second) | ||||||
1251 | Res.first->second = std::make_unique<AccessList>(); | ||||||
1252 | return Res.first->second.get(); | ||||||
1253 | } | ||||||
1254 | |||||||
1255 | MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { | ||||||
1256 | auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); | ||||||
1257 | |||||||
1258 | if (Res.second) | ||||||
1259 | Res.first->second = std::make_unique<DefsList>(); | ||||||
1260 | return Res.first->second.get(); | ||||||
1261 | } | ||||||
1262 | |||||||
1263 | namespace llvm { | ||||||
1264 | |||||||
1265 | /// This class is a batch walker of all MemoryUse's in the program, and points | ||||||
1266 | /// their defining access at the thing that actually clobbers them. Because it | ||||||
1267 | /// is a batch walker that touches everything, it does not operate like the | ||||||
1268 | /// other walkers. This walker is basically performing a top-down SSA renaming | ||||||
1269 | /// pass, where the version stack is used as the cache. This enables it to be | ||||||
1270 | /// significantly more time and memory efficient than using the regular walker, | ||||||
1271 | /// which is walking bottom-up. | ||||||
1272 | class MemorySSA::OptimizeUses { | ||||||
1273 | public: | ||||||
1274 | OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker, | ||||||
1275 | BatchAAResults *BAA, DominatorTree *DT) | ||||||
1276 | : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {} | ||||||
1277 | |||||||
1278 | void optimizeUses(); | ||||||
1279 | |||||||
1280 | private: | ||||||
1281 | /// This represents where a given memorylocation is in the stack. | ||||||
1282 | struct MemlocStackInfo { | ||||||
1283 | // This essentially is keeping track of versions of the stack. Whenever | ||||||
1284 | // the stack changes due to pushes or pops, these versions increase. | ||||||
1285 | unsigned long StackEpoch; | ||||||
1286 | unsigned long PopEpoch; | ||||||
1287 | // This is the lower bound of places on the stack to check. It is equal to | ||||||
1288 | // the place the last stack walk ended. | ||||||
1289 | // Note: Correctness depends on this being initialized to 0, which densemap | ||||||
1290 | // does | ||||||
1291 | unsigned long LowerBound; | ||||||
1292 | const BasicBlock *LowerBoundBlock; | ||||||
1293 | // This is where the last walk for this memory location ended. | ||||||
1294 | unsigned long LastKill; | ||||||
1295 | bool LastKillValid; | ||||||
1296 | Optional<AliasResult> AR; | ||||||
1297 | }; | ||||||
1298 | |||||||
1299 | void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, | ||||||
1300 | SmallVectorImpl<MemoryAccess *> &, | ||||||
1301 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &); | ||||||
1302 | |||||||
1303 | MemorySSA *MSSA; | ||||||
1304 | CachingWalker<BatchAAResults> *Walker; | ||||||
1305 | BatchAAResults *AA; | ||||||
1306 | DominatorTree *DT; | ||||||
1307 | }; | ||||||
1308 | |||||||
1309 | } // end namespace llvm | ||||||
1310 | |||||||
1311 | /// Optimize the uses in a given block This is basically the SSA renaming | ||||||
1312 | /// algorithm, with one caveat: We are able to use a single stack for all | ||||||
1313 | /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is | ||||||
1314 | /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just | ||||||
1315 | /// going to be some position in that stack of possible ones. | ||||||
1316 | /// | ||||||
1317 | /// We track the stack positions that each MemoryLocation needs | ||||||
1318 | /// to check, and last ended at. This is because we only want to check the | ||||||
1319 | /// things that changed since last time. The same MemoryLocation should | ||||||
1320 | /// get clobbered by the same store (getModRefInfo does not use invariantness or | ||||||
1321 | /// things like this, and if they start, we can modify MemoryLocOrCall to | ||||||
1322 | /// include relevant data) | ||||||
1323 | void MemorySSA::OptimizeUses::optimizeUsesInBlock( | ||||||
1324 | const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, | ||||||
1325 | SmallVectorImpl<MemoryAccess *> &VersionStack, | ||||||
1326 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { | ||||||
1327 | |||||||
1328 | /// If no accesses, nothing to do. | ||||||
1329 | MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); | ||||||
1330 | if (Accesses == nullptr) | ||||||
1331 | return; | ||||||
1332 | |||||||
1333 | // Pop everything that doesn't dominate the current block off the stack, | ||||||
1334 | // increment the PopEpoch to account for this. | ||||||
1335 | while (true) { | ||||||
1336 | assert(((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1338, __PRETTY_FUNCTION__)) | ||||||
1337 | !VersionStack.empty() &&((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1338, __PRETTY_FUNCTION__)) | ||||||
1338 | "Version stack should have liveOnEntry sentinel dominating everything")((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1338, __PRETTY_FUNCTION__)); | ||||||
1339 | BasicBlock *BackBlock = VersionStack.back()->getBlock(); | ||||||
1340 | if (DT->dominates(BackBlock, BB)) | ||||||
1341 | break; | ||||||
1342 | while (VersionStack.back()->getBlock() == BackBlock) | ||||||
1343 | VersionStack.pop_back(); | ||||||
1344 | ++PopEpoch; | ||||||
1345 | } | ||||||
1346 | |||||||
1347 | for (MemoryAccess &MA : *Accesses) { | ||||||
1348 | auto *MU = dyn_cast<MemoryUse>(&MA); | ||||||
1349 | if (!MU) { | ||||||
1350 | VersionStack.push_back(&MA); | ||||||
1351 | ++StackEpoch; | ||||||
1352 | continue; | ||||||
1353 | } | ||||||
1354 | |||||||
1355 | if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { | ||||||
1356 | MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None); | ||||||
1357 | continue; | ||||||
1358 | } | ||||||
1359 | |||||||
1360 | MemoryLocOrCall UseMLOC(MU); | ||||||
1361 | auto &LocInfo = LocStackInfo[UseMLOC]; | ||||||
1362 | // If the pop epoch changed, it means we've removed stuff from top of | ||||||
1363 | // stack due to changing blocks. We may have to reset the lower bound or | ||||||
1364 | // last kill info. | ||||||
1365 | if (LocInfo.PopEpoch != PopEpoch) { | ||||||
1366 | LocInfo.PopEpoch = PopEpoch; | ||||||
1367 | LocInfo.StackEpoch = StackEpoch; | ||||||
1368 | // If the lower bound was in something that no longer dominates us, we | ||||||
1369 | // have to reset it. | ||||||
1370 | // We can't simply track stack size, because the stack may have had | ||||||
1371 | // pushes/pops in the meantime. | ||||||
1372 | // XXX: This is non-optimal, but only is slower cases with heavily | ||||||
1373 | // branching dominator trees. To get the optimal number of queries would | ||||||
1374 | // be to make lowerbound and lastkill a per-loc stack, and pop it until | ||||||
1375 | // the top of that stack dominates us. This does not seem worth it ATM. | ||||||
1376 | // A much cheaper optimization would be to always explore the deepest | ||||||
1377 | // branch of the dominator tree first. This will guarantee this resets on | ||||||
1378 | // the smallest set of blocks. | ||||||
1379 | if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && | ||||||
1380 | !DT->dominates(LocInfo.LowerBoundBlock, BB)) { | ||||||
1381 | // Reset the lower bound of things to check. | ||||||
1382 | // TODO: Some day we should be able to reset to last kill, rather than | ||||||
1383 | // 0. | ||||||
1384 | LocInfo.LowerBound = 0; | ||||||
1385 | LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); | ||||||
1386 | LocInfo.LastKillValid = false; | ||||||
1387 | } | ||||||
1388 | } else if (LocInfo.StackEpoch != StackEpoch) { | ||||||
1389 | // If all that has changed is the StackEpoch, we only have to check the | ||||||
1390 | // new things on the stack, because we've checked everything before. In | ||||||
1391 | // this case, the lower bound of things to check remains the same. | ||||||
1392 | LocInfo.PopEpoch = PopEpoch; | ||||||
1393 | LocInfo.StackEpoch = StackEpoch; | ||||||
1394 | } | ||||||
1395 | if (!LocInfo.LastKillValid) { | ||||||
1396 | LocInfo.LastKill = VersionStack.size() - 1; | ||||||
1397 | LocInfo.LastKillValid = true; | ||||||
1398 | LocInfo.AR = MayAlias; | ||||||
1399 | } | ||||||
1400 | |||||||
1401 | // At this point, we should have corrected last kill and LowerBound to be | ||||||
1402 | // in bounds. | ||||||
1403 | assert(LocInfo.LowerBound < VersionStack.size() &&((LocInfo.LowerBound < VersionStack.size() && "Lower bound out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1404, __PRETTY_FUNCTION__)) | ||||||
1404 | "Lower bound out of range")((LocInfo.LowerBound < VersionStack.size() && "Lower bound out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1404, __PRETTY_FUNCTION__)); | ||||||
1405 | assert(LocInfo.LastKill < VersionStack.size() &&((LocInfo.LastKill < VersionStack.size() && "Last kill info out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1406, __PRETTY_FUNCTION__)) | ||||||
1406 | "Last kill info out of range")((LocInfo.LastKill < VersionStack.size() && "Last kill info out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1406, __PRETTY_FUNCTION__)); | ||||||
1407 | // In any case, the new upper bound is the top of the stack. | ||||||
1408 | unsigned long UpperBound = VersionStack.size() - 1; | ||||||
1409 | |||||||
1410 | if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { | ||||||
1411 | LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||||
1412 | << *(MU->getMemoryInst()) << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||||
1413 | << " because there are "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||||
1414 | << UpperBound - LocInfo.LowerBounddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||||
1415 | << " stores to disambiguate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false); | ||||||
1416 | // Because we did not walk, LastKill is no longer valid, as this may | ||||||
1417 | // have been a kill. | ||||||
1418 | LocInfo.LastKillValid = false; | ||||||
1419 | continue; | ||||||
1420 | } | ||||||
1421 | bool FoundClobberResult = false; | ||||||
1422 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||||
1423 | while (UpperBound > LocInfo.LowerBound) { | ||||||
1424 | if (isa<MemoryPhi>(VersionStack[UpperBound])) { | ||||||
1425 | // For phis, use the walker, see where we ended up, go there | ||||||
1426 | MemoryAccess *Result = | ||||||
1427 | Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit); | ||||||
1428 | // We are guaranteed to find it or something is wrong | ||||||
1429 | while (VersionStack[UpperBound] != Result) { | ||||||
1430 | assert(UpperBound != 0)((UpperBound != 0) ? static_cast<void> (0) : __assert_fail ("UpperBound != 0", "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1430, __PRETTY_FUNCTION__)); | ||||||
1431 | --UpperBound; | ||||||
1432 | } | ||||||
1433 | FoundClobberResult = true; | ||||||
1434 | break; | ||||||
1435 | } | ||||||
1436 | |||||||
1437 | MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); | ||||||
1438 | // If the lifetime of the pointer ends at this instruction, it's live on | ||||||
1439 | // entry. | ||||||
1440 | if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) { | ||||||
1441 | // Reset UpperBound to liveOnEntryDef's place in the stack | ||||||
1442 | UpperBound = 0; | ||||||
1443 | FoundClobberResult = true; | ||||||
1444 | LocInfo.AR = MustAlias; | ||||||
1445 | break; | ||||||
1446 | } | ||||||
1447 | ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA); | ||||||
1448 | if (CA.IsClobber) { | ||||||
1449 | FoundClobberResult = true; | ||||||
1450 | LocInfo.AR = CA.AR; | ||||||
1451 | break; | ||||||
1452 | } | ||||||
1453 | --UpperBound; | ||||||
1454 | } | ||||||
1455 | |||||||
1456 | // Note: Phis always have AliasResult AR set to MayAlias ATM. | ||||||
1457 | |||||||
1458 | // At the end of this loop, UpperBound is either a clobber, or lower bound | ||||||
1459 | // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. | ||||||
1460 | if (FoundClobberResult || UpperBound < LocInfo.LastKill) { | ||||||
1461 | // We were last killed now by where we got to | ||||||
1462 | if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound])) | ||||||
1463 | LocInfo.AR = None; | ||||||
1464 | MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR); | ||||||
1465 | LocInfo.LastKill = UpperBound; | ||||||
1466 | } else { | ||||||
1467 | // Otherwise, we checked all the new ones, and now we know we can get to | ||||||
1468 | // LastKill. | ||||||
1469 | MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR); | ||||||
1470 | } | ||||||
1471 | LocInfo.LowerBound = VersionStack.size() - 1; | ||||||
1472 | LocInfo.LowerBoundBlock = BB; | ||||||
1473 | } | ||||||
1474 | } | ||||||
1475 | |||||||
1476 | /// Optimize uses to point to their actual clobbering definitions. | ||||||
1477 | void MemorySSA::OptimizeUses::optimizeUses() { | ||||||
1478 | SmallVector<MemoryAccess *, 16> VersionStack; | ||||||
1479 | DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; | ||||||
1480 | VersionStack.push_back(MSSA->getLiveOnEntryDef()); | ||||||
1481 | |||||||
1482 | unsigned long StackEpoch = 1; | ||||||
1483 | unsigned long PopEpoch = 1; | ||||||
1484 | // We perform a non-recursive top-down dominator tree walk. | ||||||
1485 | for (const auto *DomNode : depth_first(DT->getRootNode())) | ||||||
1486 | optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, | ||||||
1487 | LocStackInfo); | ||||||
1488 | } | ||||||
1489 | |||||||
1490 | void MemorySSA::placePHINodes( | ||||||
1491 | const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) { | ||||||
1492 | // Determine where our MemoryPhi's should go | ||||||
1493 | ForwardIDFCalculator IDFs(*DT); | ||||||
1494 | IDFs.setDefiningBlocks(DefiningBlocks); | ||||||
1495 | SmallVector<BasicBlock *, 32> IDFBlocks; | ||||||
1496 | IDFs.calculate(IDFBlocks); | ||||||
1497 | |||||||
1498 | // Now place MemoryPhi nodes. | ||||||
1499 | for (auto &BB : IDFBlocks) | ||||||
1500 | createMemoryPhi(BB); | ||||||
1501 | } | ||||||
1502 | |||||||
1503 | void MemorySSA::buildMemorySSA(BatchAAResults &BAA) { | ||||||
1504 | // We create an access to represent "live on entry", for things like | ||||||
1505 | // arguments or users of globals, where the memory they use is defined before | ||||||
1506 | // the beginning of the function. We do not actually insert it into the IR. | ||||||
1507 | // We do not define a live on exit for the immediate uses, and thus our | ||||||
1508 | // semantics do *not* imply that something with no immediate uses can simply | ||||||
1509 | // be removed. | ||||||
1510 | BasicBlock &StartingPoint = F.getEntryBlock(); | ||||||
1511 | LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr, | ||||||
1512 | &StartingPoint, NextID++)); | ||||||
1513 | |||||||
1514 | // We maintain lists of memory accesses per-block, trading memory for time. We | ||||||
1515 | // could just look up the memory access for every possible instruction in the | ||||||
1516 | // stream. | ||||||
1517 | SmallPtrSet<BasicBlock *, 32> DefiningBlocks; | ||||||
1518 | // Go through each block, figure out where defs occur, and chain together all | ||||||
1519 | // the accesses. | ||||||
1520 | for (BasicBlock &B : F) { | ||||||
1521 | bool InsertIntoDef = false; | ||||||
1522 | AccessList *Accesses = nullptr; | ||||||
1523 | DefsList *Defs = nullptr; | ||||||
1524 | for (Instruction &I : B) { | ||||||
1525 | MemoryUseOrDef *MUD = createNewAccess(&I, &BAA); | ||||||
1526 | if (!MUD) | ||||||
1527 | continue; | ||||||
1528 | |||||||
1529 | if (!Accesses) | ||||||
1530 | Accesses = getOrCreateAccessList(&B); | ||||||
1531 | Accesses->push_back(MUD); | ||||||
1532 | if (isa<MemoryDef>(MUD)) { | ||||||
1533 | InsertIntoDef = true; | ||||||
1534 | if (!Defs) | ||||||
1535 | Defs = getOrCreateDefsList(&B); | ||||||
1536 | Defs->push_back(*MUD); | ||||||
1537 | } | ||||||
1538 | } | ||||||
1539 | if (InsertIntoDef) | ||||||
1540 | DefiningBlocks.insert(&B); | ||||||
1541 | } | ||||||
1542 | placePHINodes(DefiningBlocks); | ||||||
1543 | |||||||
1544 | // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get | ||||||
1545 | // filled in with all blocks. | ||||||
1546 | SmallPtrSet<BasicBlock *, 16> Visited; | ||||||
1547 | renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); | ||||||
1548 | |||||||
1549 | ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT); | ||||||
1550 | CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase); | ||||||
1551 | OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses(); | ||||||
1552 | |||||||
1553 | // Mark the uses in unreachable blocks as live on entry, so that they go | ||||||
1554 | // somewhere. | ||||||
1555 | for (auto &BB : F) | ||||||
1556 | if (!Visited.count(&BB)) | ||||||
1557 | markUnreachableAsLiveOnEntry(&BB); | ||||||
1558 | } | ||||||
1559 | |||||||
1560 | MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } | ||||||
1561 | |||||||
1562 | MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() { | ||||||
1563 | if (Walker) | ||||||
1564 | return Walker.get(); | ||||||
1565 | |||||||
1566 | if (!WalkerBase) | ||||||
1567 | WalkerBase = | ||||||
1568 | std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); | ||||||
1569 | |||||||
1570 | Walker = | ||||||
1571 | std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get()); | ||||||
1572 | return Walker.get(); | ||||||
1573 | } | ||||||
1574 | |||||||
1575 | MemorySSAWalker *MemorySSA::getSkipSelfWalker() { | ||||||
1576 | if (SkipWalker) | ||||||
1577 | return SkipWalker.get(); | ||||||
1578 | |||||||
1579 | if (!WalkerBase) | ||||||
1580 | WalkerBase = | ||||||
1581 | std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); | ||||||
1582 | |||||||
1583 | SkipWalker = | ||||||
1584 | std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get()); | ||||||
1585 | return SkipWalker.get(); | ||||||
1586 | } | ||||||
1587 | |||||||
1588 | |||||||
1589 | // This is a helper function used by the creation routines. It places NewAccess | ||||||
1590 | // into the access and defs lists for a given basic block, at the given | ||||||
1591 | // insertion point. | ||||||
1592 | void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, | ||||||
1593 | const BasicBlock *BB, | ||||||
1594 | InsertionPlace Point) { | ||||||
1595 | auto *Accesses = getOrCreateAccessList(BB); | ||||||
1596 | if (Point == Beginning) { | ||||||
1597 | // If it's a phi node, it goes first, otherwise, it goes after any phi | ||||||
1598 | // nodes. | ||||||
1599 | if (isa<MemoryPhi>(NewAccess)) { | ||||||
1600 | Accesses->push_front(NewAccess); | ||||||
1601 | auto *Defs = getOrCreateDefsList(BB); | ||||||
1602 | Defs->push_front(*NewAccess); | ||||||
1603 | } else { | ||||||
1604 | auto AI = find_if_not( | ||||||
1605 | *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); | ||||||
1606 | Accesses->insert(AI, NewAccess); | ||||||
1607 | if (!isa<MemoryUse>(NewAccess)) { | ||||||
1608 | auto *Defs = getOrCreateDefsList(BB); | ||||||
1609 | auto DI = find_if_not( | ||||||
1610 | *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); | ||||||
1611 | Defs->insert(DI, *NewAccess); | ||||||
1612 | } | ||||||
1613 | } | ||||||
1614 | } else { | ||||||
1615 | Accesses->push_back(NewAccess); | ||||||
1616 | if (!isa<MemoryUse>(NewAccess)) { | ||||||
1617 | auto *Defs = getOrCreateDefsList(BB); | ||||||
1618 | Defs->push_back(*NewAccess); | ||||||
1619 | } | ||||||
1620 | } | ||||||
1621 | BlockNumberingValid.erase(BB); | ||||||
1622 | } | ||||||
1623 | |||||||
1624 | void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, | ||||||
1625 | AccessList::iterator InsertPt) { | ||||||
1626 | auto *Accesses = getWritableBlockAccesses(BB); | ||||||
1627 | bool WasEnd = InsertPt == Accesses->end(); | ||||||
1628 | Accesses->insert(AccessList::iterator(InsertPt), What); | ||||||
1629 | if (!isa<MemoryUse>(What)) { | ||||||
1630 | auto *Defs = getOrCreateDefsList(BB); | ||||||
1631 | // If we got asked to insert at the end, we have an easy job, just shove it | ||||||
1632 | // at the end. If we got asked to insert before an existing def, we also get | ||||||
1633 | // an iterator. If we got asked to insert before a use, we have to hunt for | ||||||
1634 | // the next def. | ||||||
1635 | if (WasEnd) { | ||||||
1636 | Defs->push_back(*What); | ||||||
1637 | } else if (isa<MemoryDef>(InsertPt)) { | ||||||
1638 | Defs->insert(InsertPt->getDefsIterator(), *What); | ||||||
1639 | } else { | ||||||
1640 | while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) | ||||||
1641 | ++InsertPt; | ||||||
1642 | // Either we found a def, or we are inserting at the end | ||||||
1643 | if (InsertPt == Accesses->end()) | ||||||
1644 | Defs->push_back(*What); | ||||||
1645 | else | ||||||
1646 | Defs->insert(InsertPt->getDefsIterator(), *What); | ||||||
1647 | } | ||||||
1648 | } | ||||||
1649 | BlockNumberingValid.erase(BB); | ||||||
1650 | } | ||||||
1651 | |||||||
1652 | void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) { | ||||||
1653 | // Keep it in the lookup tables, remove from the lists | ||||||
1654 | removeFromLists(What, false); | ||||||
1655 | |||||||
1656 | // Note that moving should implicitly invalidate the optimized state of a | ||||||
1657 | // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a | ||||||
1658 | // MemoryDef. | ||||||
1659 | if (auto *MD = dyn_cast<MemoryDef>(What)) | ||||||
1660 | MD->resetOptimized(); | ||||||
1661 | What->setBlock(BB); | ||||||
1662 | } | ||||||
1663 | |||||||
1664 | // Move What before Where in the IR. The end result is that What will belong to | ||||||
1665 | // the right lists and have the right Block set, but will not otherwise be | ||||||
1666 | // correct. It will not have the right defining access, and if it is a def, | ||||||
1667 | // things below it will not properly be updated. | ||||||
1668 | void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, | ||||||
1669 | AccessList::iterator Where) { | ||||||
1670 | prepareForMoveTo(What, BB); | ||||||
1671 | insertIntoListsBefore(What, BB, Where); | ||||||
1672 | } | ||||||
1673 | |||||||
1674 | void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB, | ||||||
1675 | InsertionPlace Point) { | ||||||
1676 | if (isa<MemoryPhi>(What)) { | ||||||
1677 | assert(Point == Beginning &&((Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? static_cast<void> (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1678, __PRETTY_FUNCTION__)) | ||||||
1678 | "Can only move a Phi at the beginning of the block")((Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? static_cast<void> (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1678, __PRETTY_FUNCTION__)); | ||||||
1679 | // Update lookup table entry | ||||||
1680 | ValueToMemoryAccess.erase(What->getBlock()); | ||||||
1681 | bool Inserted = ValueToMemoryAccess.insert({BB, What}).second; | ||||||
1682 | (void)Inserted; | ||||||
1683 | assert(Inserted && "Cannot move a Phi to a block that already has one")((Inserted && "Cannot move a Phi to a block that already has one" ) ? static_cast<void> (0) : __assert_fail ("Inserted && \"Cannot move a Phi to a block that already has one\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1683, __PRETTY_FUNCTION__)); | ||||||
1684 | } | ||||||
1685 | |||||||
1686 | prepareForMoveTo(What, BB); | ||||||
1687 | insertIntoListsForBlock(What, BB, Point); | ||||||
1688 | } | ||||||
1689 | |||||||
1690 | MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { | ||||||
1691 | assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB")((!getMemoryAccess(BB) && "MemoryPhi already exists for this BB" ) ? static_cast<void> (0) : __assert_fail ("!getMemoryAccess(BB) && \"MemoryPhi already exists for this BB\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1691, __PRETTY_FUNCTION__)); | ||||||
1692 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); | ||||||
1693 | // Phi's always are placed at the front of the block. | ||||||
1694 | insertIntoListsForBlock(Phi, BB, Beginning); | ||||||
1695 | ValueToMemoryAccess[BB] = Phi; | ||||||
1696 | return Phi; | ||||||
1697 | } | ||||||
1698 | |||||||
1699 | MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, | ||||||
1700 | MemoryAccess *Definition, | ||||||
1701 | const MemoryUseOrDef *Template, | ||||||
1702 | bool CreationMustSucceed) { | ||||||
1703 | assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI")((!isa<PHINode>(I) && "Cannot create a defined access for a PHI" ) ? static_cast<void> (0) : __assert_fail ("!isa<PHINode>(I) && \"Cannot create a defined access for a PHI\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1703, __PRETTY_FUNCTION__)); | ||||||
1704 | MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template); | ||||||
1705 | if (CreationMustSucceed) | ||||||
1706 | assert(NewAccess != nullptr && "Tried to create a memory access for a "((NewAccess != nullptr && "Tried to create a memory access for a " "non-memory touching instruction") ? static_cast<void> (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a \" \"non-memory touching instruction\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1707, __PRETTY_FUNCTION__)) | ||||||
1707 | "non-memory touching instruction")((NewAccess != nullptr && "Tried to create a memory access for a " "non-memory touching instruction") ? static_cast<void> (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a \" \"non-memory touching instruction\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1707, __PRETTY_FUNCTION__)); | ||||||
1708 | if (NewAccess) | ||||||
1709 | NewAccess->setDefiningAccess(Definition); | ||||||
1710 | return NewAccess; | ||||||
1711 | } | ||||||
1712 | |||||||
1713 | // Return true if the instruction has ordering constraints. | ||||||
1714 | // Note specifically that this only considers stores and loads | ||||||
1715 | // because others are still considered ModRef by getModRefInfo. | ||||||
1716 | static inline bool isOrdered(const Instruction *I) { | ||||||
1717 | if (auto *SI = dyn_cast<StoreInst>(I)) { | ||||||
1718 | if (!SI->isUnordered()) | ||||||
1719 | return true; | ||||||
1720 | } else if (auto *LI = dyn_cast<LoadInst>(I)) { | ||||||
1721 | if (!LI->isUnordered()) | ||||||
1722 | return true; | ||||||
1723 | } | ||||||
1724 | return false; | ||||||
1725 | } | ||||||
1726 | |||||||
1727 | /// Helper function to create new memory accesses | ||||||
1728 | template <typename AliasAnalysisType> | ||||||
1729 | MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I, | ||||||
1730 | AliasAnalysisType *AAP, | ||||||
1731 | const MemoryUseOrDef *Template) { | ||||||
1732 | // The assume intrinsic has a control dependency which we model by claiming | ||||||
1733 | // that it writes arbitrarily. Debuginfo intrinsics may be considered | ||||||
1734 | // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory | ||||||
1735 | // dependencies here. | ||||||
1736 | // FIXME: Replace this special casing with a more accurate modelling of | ||||||
1737 | // assume's control dependency. | ||||||
1738 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) | ||||||
1739 | if (II->getIntrinsicID() == Intrinsic::assume) | ||||||
1740 | return nullptr; | ||||||
1741 | |||||||
1742 | // Using a nonstandard AA pipelines might leave us with unexpected modref | ||||||
1743 | // results for I, so add a check to not model instructions that may not read | ||||||
1744 | // from or write to memory. This is necessary for correctness. | ||||||
1745 | if (!I->mayReadFromMemory() && !I->mayWriteToMemory()) | ||||||
1746 | return nullptr; | ||||||
1747 | |||||||
1748 | bool Def, Use; | ||||||
1749 | if (Template) { | ||||||
1750 | Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr; | ||||||
1751 | Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr; | ||||||
1752 | #if !defined(NDEBUG) | ||||||
1753 | ModRefInfo ModRef = AAP->getModRefInfo(I, None); | ||||||
1754 | bool DefCheck, UseCheck; | ||||||
1755 | DefCheck = isModSet(ModRef) || isOrdered(I); | ||||||
1756 | UseCheck = isRefSet(ModRef); | ||||||
1757 | assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template")((Def == DefCheck && (Def || Use == UseCheck) && "Invalid template") ? static_cast<void> (0) : __assert_fail ("Def == DefCheck && (Def || Use == UseCheck) && \"Invalid template\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1757, __PRETTY_FUNCTION__)); | ||||||
1758 | #endif | ||||||
1759 | } else { | ||||||
1760 | // Find out what affect this instruction has on memory. | ||||||
1761 | ModRefInfo ModRef = AAP->getModRefInfo(I, None); | ||||||
1762 | // The isOrdered check is used to ensure that volatiles end up as defs | ||||||
1763 | // (atomics end up as ModRef right now anyway). Until we separate the | ||||||
1764 | // ordering chain from the memory chain, this enables people to see at least | ||||||
1765 | // some relative ordering to volatiles. Note that getClobberingMemoryAccess | ||||||
1766 | // will still give an answer that bypasses other volatile loads. TODO: | ||||||
1767 | // Separate memory aliasing and ordering into two different chains so that | ||||||
1768 | // we can precisely represent both "what memory will this read/write/is | ||||||
1769 | // clobbered by" and "what instructions can I move this past". | ||||||
1770 | Def = isModSet(ModRef) || isOrdered(I); | ||||||
1771 | Use = isRefSet(ModRef); | ||||||
1772 | } | ||||||
1773 | |||||||
1774 | // It's possible for an instruction to not modify memory at all. During | ||||||
1775 | // construction, we ignore them. | ||||||
1776 | if (!Def && !Use) | ||||||
1777 | return nullptr; | ||||||
1778 | |||||||
1779 | MemoryUseOrDef *MUD; | ||||||
1780 | if (Def) | ||||||
1781 | MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); | ||||||
1782 | else | ||||||
1783 | MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); | ||||||
1784 | ValueToMemoryAccess[I] = MUD; | ||||||
1785 | return MUD; | ||||||
1786 | } | ||||||
1787 | |||||||
1788 | /// Returns true if \p Replacer dominates \p Replacee . | ||||||
1789 | bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, | ||||||
1790 | const MemoryAccess *Replacee) const { | ||||||
1791 | if (isa<MemoryUseOrDef>(Replacee)) | ||||||
1792 | return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); | ||||||
1793 | const auto *MP = cast<MemoryPhi>(Replacee); | ||||||
1794 | // For a phi node, the use occurs in the predecessor block of the phi node. | ||||||
1795 | // Since we may occur multiple times in the phi node, we have to check each | ||||||
1796 | // operand to ensure Replacer dominates each operand where Replacee occurs. | ||||||
1797 | for (const Use &Arg : MP->operands()) { | ||||||
1798 | if (Arg.get() != Replacee && | ||||||
1799 | !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) | ||||||
1800 | return false; | ||||||
1801 | } | ||||||
1802 | return true; | ||||||
1803 | } | ||||||
1804 | |||||||
1805 | /// Properly remove \p MA from all of MemorySSA's lookup tables. | ||||||
1806 | void MemorySSA::removeFromLookups(MemoryAccess *MA) { | ||||||
1807 | assert(MA->use_empty() &&((MA->use_empty() && "Trying to remove memory access that still has uses" ) ? static_cast<void> (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1808, __PRETTY_FUNCTION__)) | ||||||
1808 | "Trying to remove memory access that still has uses")((MA->use_empty() && "Trying to remove memory access that still has uses" ) ? static_cast<void> (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1808, __PRETTY_FUNCTION__)); | ||||||
1809 | BlockNumbering.erase(MA); | ||||||
1810 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||||
1811 | MUD->setDefiningAccess(nullptr); | ||||||
1812 | // Invalidate our walker's cache if necessary | ||||||
1813 | if (!isa<MemoryUse>(MA)) | ||||||
1814 | getWalker()->invalidateInfo(MA); | ||||||
1815 | |||||||
1816 | Value *MemoryInst; | ||||||
1817 | if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||||
1818 | MemoryInst = MUD->getMemoryInst(); | ||||||
1819 | else | ||||||
1820 | MemoryInst = MA->getBlock(); | ||||||
1821 | |||||||
1822 | auto VMA = ValueToMemoryAccess.find(MemoryInst); | ||||||
1823 | if (VMA->second == MA) | ||||||
1824 | ValueToMemoryAccess.erase(VMA); | ||||||
1825 | } | ||||||
1826 | |||||||
1827 | /// Properly remove \p MA from all of MemorySSA's lists. | ||||||
1828 | /// | ||||||
1829 | /// Because of the way the intrusive list and use lists work, it is important to | ||||||
1830 | /// do removal in the right order. | ||||||
1831 | /// ShouldDelete defaults to true, and will cause the memory access to also be | ||||||
1832 | /// deleted, not just removed. | ||||||
1833 | void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { | ||||||
1834 | BasicBlock *BB = MA->getBlock(); | ||||||
1835 | // The access list owns the reference, so we erase it from the non-owning list | ||||||
1836 | // first. | ||||||
1837 | if (!isa<MemoryUse>(MA)) { | ||||||
1838 | auto DefsIt = PerBlockDefs.find(BB); | ||||||
1839 | std::unique_ptr<DefsList> &Defs = DefsIt->second; | ||||||
1840 | Defs->remove(*MA); | ||||||
1841 | if (Defs->empty()) | ||||||
1842 | PerBlockDefs.erase(DefsIt); | ||||||
1843 | } | ||||||
1844 | |||||||
1845 | // The erase call here will delete it. If we don't want it deleted, we call | ||||||
1846 | // remove instead. | ||||||
1847 | auto AccessIt = PerBlockAccesses.find(BB); | ||||||
1848 | std::unique_ptr<AccessList> &Accesses = AccessIt->second; | ||||||
1849 | if (ShouldDelete) | ||||||
1850 | Accesses->erase(MA); | ||||||
1851 | else | ||||||
1852 | Accesses->remove(MA); | ||||||
1853 | |||||||
1854 | if (Accesses->empty()) { | ||||||
1855 | PerBlockAccesses.erase(AccessIt); | ||||||
1856 | BlockNumberingValid.erase(BB); | ||||||
1857 | } | ||||||
1858 | } | ||||||
1859 | |||||||
1860 | void MemorySSA::print(raw_ostream &OS) const { | ||||||
1861 | MemorySSAAnnotatedWriter Writer(this); | ||||||
1862 | F.print(OS, &Writer); | ||||||
1863 | } | ||||||
1864 | |||||||
1865 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||||
1866 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MemorySSA::dump() const { print(dbgs()); } | ||||||
1867 | #endif | ||||||
1868 | |||||||
1869 | void MemorySSA::verifyMemorySSA() const { | ||||||
1870 | verifyDefUses(F); | ||||||
1871 | verifyDomination(F); | ||||||
1872 | verifyOrdering(F); | ||||||
1873 | verifyDominationNumbers(F); | ||||||
1874 | verifyPrevDefInPhis(F); | ||||||
1875 | // Previously, the verification used to also verify that the clobberingAccess | ||||||
1876 | // cached by MemorySSA is the same as the clobberingAccess found at a later | ||||||
1877 | // query to AA. This does not hold true in general due to the current fragility | ||||||
1878 | // of BasicAA which has arbitrary caps on the things it analyzes before giving | ||||||
1879 | // up. As a result, transformations that are correct, will lead to BasicAA | ||||||
1880 | // returning different Alias answers before and after that transformation. | ||||||
1881 | // Invalidating MemorySSA is not an option, as the results in BasicAA can be so | ||||||
1882 | // random, in the worst case we'd need to rebuild MemorySSA from scratch after | ||||||
1883 | // every transformation, which defeats the purpose of using it. For such an | ||||||
1884 | // example, see test4 added in D51960. | ||||||
1885 | } | ||||||
1886 | |||||||
1887 | void MemorySSA::verifyPrevDefInPhis(Function &F) const { | ||||||
1888 | #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) | ||||||
1889 | for (const BasicBlock &BB : F) { | ||||||
1890 | if (MemoryPhi *Phi = getMemoryAccess(&BB)) { | ||||||
1891 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { | ||||||
1892 | auto *Pred = Phi->getIncomingBlock(I); | ||||||
1893 | auto *IncAcc = Phi->getIncomingValue(I); | ||||||
1894 | // If Pred has no unreachable predecessors, get last def looking at | ||||||
1895 | // IDoms. If, while walkings IDoms, any of these has an unreachable | ||||||
1896 | // predecessor, then the incoming def can be any access. | ||||||
1897 | if (auto *DTNode = DT->getNode(Pred)) { | ||||||
1898 | while (DTNode) { | ||||||
1899 | if (auto *DefList = getBlockDefs(DTNode->getBlock())) { | ||||||
1900 | auto *LastAcc = &*(--DefList->end()); | ||||||
1901 | assert(LastAcc == IncAcc &&((LastAcc == IncAcc && "Incorrect incoming access into phi." ) ? static_cast<void> (0) : __assert_fail ("LastAcc == IncAcc && \"Incorrect incoming access into phi.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1902, __PRETTY_FUNCTION__)) | ||||||
1902 | "Incorrect incoming access into phi.")((LastAcc == IncAcc && "Incorrect incoming access into phi." ) ? static_cast<void> (0) : __assert_fail ("LastAcc == IncAcc && \"Incorrect incoming access into phi.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1902, __PRETTY_FUNCTION__)); | ||||||
1903 | break; | ||||||
1904 | } | ||||||
1905 | DTNode = DTNode->getIDom(); | ||||||
1906 | } | ||||||
1907 | } else { | ||||||
1908 | // If Pred has unreachable predecessors, but has at least a Def, the | ||||||
1909 | // incoming access can be the last Def in Pred, or it could have been | ||||||
1910 | // optimized to LoE. After an update, though, the LoE may have been | ||||||
1911 | // replaced by another access, so IncAcc may be any access. | ||||||
1912 | // If Pred has unreachable predecessors and no Defs, incoming access | ||||||
1913 | // should be LoE; However, after an update, it may be any access. | ||||||
1914 | } | ||||||
1915 | } | ||||||
1916 | } | ||||||
1917 | } | ||||||
1918 | #endif | ||||||
1919 | } | ||||||
1920 | |||||||
1921 | /// Verify that all of the blocks we believe to have valid domination numbers | ||||||
1922 | /// actually have valid domination numbers. | ||||||
1923 | void MemorySSA::verifyDominationNumbers(const Function &F) const { | ||||||
1924 | #ifndef NDEBUG | ||||||
1925 | if (BlockNumberingValid.empty()) | ||||||
1926 | return; | ||||||
1927 | |||||||
1928 | SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid; | ||||||
1929 | for (const BasicBlock &BB : F) { | ||||||
1930 | if (!ValidBlocks.count(&BB)) | ||||||
1931 | continue; | ||||||
1932 | |||||||
1933 | ValidBlocks.erase(&BB); | ||||||
1934 | |||||||
1935 | const AccessList *Accesses = getBlockAccesses(&BB); | ||||||
1936 | // It's correct to say an empty block has valid numbering. | ||||||
1937 | if (!Accesses) | ||||||
1938 | continue; | ||||||
1939 | |||||||
1940 | // Block numbering starts at 1. | ||||||
1941 | unsigned long LastNumber = 0; | ||||||
1942 | for (const MemoryAccess &MA : *Accesses) { | ||||||
1943 | auto ThisNumberIter = BlockNumbering.find(&MA); | ||||||
1944 | assert(ThisNumberIter != BlockNumbering.end() &&((ThisNumberIter != BlockNumbering.end() && "MemoryAccess has no domination number in a valid block!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1945, __PRETTY_FUNCTION__)) | ||||||
1945 | "MemoryAccess has no domination number in a valid block!")((ThisNumberIter != BlockNumbering.end() && "MemoryAccess has no domination number in a valid block!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1945, __PRETTY_FUNCTION__)); | ||||||
1946 | |||||||
1947 | unsigned long ThisNumber = ThisNumberIter->second; | ||||||
1948 | assert(ThisNumber > LastNumber &&((ThisNumber > LastNumber && "Domination numbers should be strictly increasing!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1949, __PRETTY_FUNCTION__)) | ||||||
1949 | "Domination numbers should be strictly increasing!")((ThisNumber > LastNumber && "Domination numbers should be strictly increasing!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1949, __PRETTY_FUNCTION__)); | ||||||
1950 | LastNumber = ThisNumber; | ||||||
1951 | } | ||||||
1952 | } | ||||||
1953 | |||||||
1954 | assert(ValidBlocks.empty() &&((ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? static_cast<void> (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1955, __PRETTY_FUNCTION__)) | ||||||
1955 | "All valid BasicBlocks should exist in F -- dangling pointers?")((ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? static_cast<void> (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1955, __PRETTY_FUNCTION__)); | ||||||
1956 | #endif | ||||||
1957 | } | ||||||
1958 | |||||||
1959 | /// Verify that the order and existence of MemoryAccesses matches the | ||||||
1960 | /// order and existence of memory affecting instructions. | ||||||
1961 | void MemorySSA::verifyOrdering(Function &F) const { | ||||||
1962 | #ifndef NDEBUG | ||||||
1963 | // Walk all the blocks, comparing what the lookups think and what the access | ||||||
1964 | // lists think, as well as the order in the blocks vs the order in the access | ||||||
1965 | // lists. | ||||||
1966 | SmallVector<MemoryAccess *, 32> ActualAccesses; | ||||||
1967 | SmallVector<MemoryAccess *, 32> ActualDefs; | ||||||
1968 | for (BasicBlock &B : F) { | ||||||
1969 | const AccessList *AL = getBlockAccesses(&B); | ||||||
1970 | const auto *DL = getBlockDefs(&B); | ||||||
1971 | MemoryAccess *Phi = getMemoryAccess(&B); | ||||||
1972 | if (Phi) { | ||||||
1973 | ActualAccesses.push_back(Phi); | ||||||
1974 | ActualDefs.push_back(Phi); | ||||||
1975 | } | ||||||
1976 | |||||||
1977 | for (Instruction &I : B) { | ||||||
1978 | MemoryAccess *MA = getMemoryAccess(&I); | ||||||
1979 | assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1982, __PRETTY_FUNCTION__)) | ||||||
1980 | "We have memory affecting instructions "(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1982, __PRETTY_FUNCTION__)) | ||||||
1981 | "in this block but they are not in the "(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1982, __PRETTY_FUNCTION__)) | ||||||
1982 | "access list or defs list")(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1982, __PRETTY_FUNCTION__)); | ||||||
1983 | if (MA) { | ||||||
1984 | ActualAccesses.push_back(MA); | ||||||
1985 | if (isa<MemoryDef>(MA)) | ||||||
1986 | ActualDefs.push_back(MA); | ||||||
1987 | } | ||||||
1988 | } | ||||||
1989 | // Either we hit the assert, really have no accesses, or we have both | ||||||
1990 | // accesses and an access list. | ||||||
1991 | // Same with defs. | ||||||
1992 | if (!AL && !DL) | ||||||
1993 | continue; | ||||||
1994 | assert(AL->size() == ActualAccesses.size() &&((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the " "access list") ? static_cast<void> (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1996, __PRETTY_FUNCTION__)) | ||||||
1995 | "We don't have the same number of accesses in the block as on the "((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the " "access list") ? static_cast<void> (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1996, __PRETTY_FUNCTION__)) | ||||||
1996 | "access list")((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the " "access list") ? static_cast<void> (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1996, __PRETTY_FUNCTION__)); | ||||||
1997 | assert((DL || ActualDefs.size() == 0) &&(((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? static_cast<void> (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1998, __PRETTY_FUNCTION__)) | ||||||
1998 | "Either we should have a defs list, or we should have no defs")(((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? static_cast<void> (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 1998, __PRETTY_FUNCTION__)); | ||||||
1999 | assert((!DL || DL->size() == ActualDefs.size()) &&(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the " "def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2001, __PRETTY_FUNCTION__)) | ||||||
2000 | "We don't have the same number of defs in the block as on the "(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the " "def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2001, __PRETTY_FUNCTION__)) | ||||||
2001 | "def list")(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the " "def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2001, __PRETTY_FUNCTION__)); | ||||||
2002 | auto ALI = AL->begin(); | ||||||
2003 | auto AAI = ActualAccesses.begin(); | ||||||
2004 | while (ALI != AL->end() && AAI != ActualAccesses.end()) { | ||||||
2005 | assert(&*ALI == *AAI && "Not the same accesses in the same order")((&*ALI == *AAI && "Not the same accesses in the same order" ) ? static_cast<void> (0) : __assert_fail ("&*ALI == *AAI && \"Not the same accesses in the same order\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2005, __PRETTY_FUNCTION__)); | ||||||
2006 | ++ALI; | ||||||
2007 | ++AAI; | ||||||
2008 | } | ||||||
2009 | ActualAccesses.clear(); | ||||||
2010 | if (DL) { | ||||||
2011 | auto DLI = DL->begin(); | ||||||
2012 | auto ADI = ActualDefs.begin(); | ||||||
2013 | while (DLI != DL->end() && ADI != ActualDefs.end()) { | ||||||
2014 | assert(&*DLI == *ADI && "Not the same defs in the same order")((&*DLI == *ADI && "Not the same defs in the same order" ) ? static_cast<void> (0) : __assert_fail ("&*DLI == *ADI && \"Not the same defs in the same order\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2014, __PRETTY_FUNCTION__)); | ||||||
2015 | ++DLI; | ||||||
2016 | ++ADI; | ||||||
2017 | } | ||||||
2018 | } | ||||||
2019 | ActualDefs.clear(); | ||||||
2020 | } | ||||||
2021 | #endif | ||||||
2022 | } | ||||||
2023 | |||||||
2024 | /// Verify the domination properties of MemorySSA by checking that each | ||||||
2025 | /// definition dominates all of its uses. | ||||||
2026 | void MemorySSA::verifyDomination(Function &F) const { | ||||||
2027 | #ifndef NDEBUG | ||||||
2028 | for (BasicBlock &B : F) { | ||||||
2029 | // Phi nodes are attached to basic blocks | ||||||
2030 | if (MemoryPhi *MP = getMemoryAccess(&B)) | ||||||
2031 | for (const Use &U : MP->uses()) | ||||||
2032 | assert(dominates(MP, U) && "Memory PHI does not dominate it's uses")((dominates(MP, U) && "Memory PHI does not dominate it's uses" ) ? static_cast<void> (0) : __assert_fail ("dominates(MP, U) && \"Memory PHI does not dominate it's uses\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2032, __PRETTY_FUNCTION__)); | ||||||
2033 | |||||||
2034 | for (Instruction &I : B) { | ||||||
2035 | MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I)); | ||||||
2036 | if (!MD) | ||||||
2037 | continue; | ||||||
2038 | |||||||
2039 | for (const Use &U : MD->uses()) | ||||||
2040 | assert(dominates(MD, U) && "Memory Def does not dominate it's uses")((dominates(MD, U) && "Memory Def does not dominate it's uses" ) ? static_cast<void> (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2040, __PRETTY_FUNCTION__)); | ||||||
2041 | } | ||||||
2042 | } | ||||||
2043 | #endif | ||||||
2044 | } | ||||||
2045 | |||||||
2046 | /// Verify the def-use lists in MemorySSA, by verifying that \p Use | ||||||
2047 | /// appears in the use list of \p Def. | ||||||
2048 | void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { | ||||||
2049 | #ifndef NDEBUG | ||||||
2050 | // The live on entry use may cause us to get a NULL def here | ||||||
2051 | if (!Def) | ||||||
2052 | assert(isLiveOnEntryDef(Use) &&((isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? static_cast<void> (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2053, __PRETTY_FUNCTION__)) | ||||||
2053 | "Null def but use not point to live on entry def")((isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? static_cast<void> (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2053, __PRETTY_FUNCTION__)); | ||||||
2054 | else | ||||||
2055 | assert(is_contained(Def->users(), Use) &&((is_contained(Def->users(), Use) && "Did not find use in def's use list" ) ? static_cast<void> (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2056, __PRETTY_FUNCTION__)) | ||||||
2056 | "Did not find use in def's use list")((is_contained(Def->users(), Use) && "Did not find use in def's use list" ) ? static_cast<void> (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2056, __PRETTY_FUNCTION__)); | ||||||
2057 | #endif | ||||||
2058 | } | ||||||
2059 | |||||||
2060 | /// Verify the immediate use information, by walking all the memory | ||||||
2061 | /// accesses and verifying that, for each use, it appears in the | ||||||
2062 | /// appropriate def's use list | ||||||
2063 | void MemorySSA::verifyDefUses(Function &F) const { | ||||||
2064 | #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) | ||||||
2065 | for (BasicBlock &B : F) { | ||||||
2066 | // Phi nodes are attached to basic blocks | ||||||
2067 | if (MemoryPhi *Phi = getMemoryAccess(&B)) { | ||||||
2068 | assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(((Phi->getNumOperands() == static_cast<unsigned>(std ::distance( pred_begin(&B), pred_end(&B))) && "Incomplete MemoryPhi Node") ? static_cast<void> (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2070, __PRETTY_FUNCTION__)) | ||||||
2069 | pred_begin(&B), pred_end(&B))) &&((Phi->getNumOperands() == static_cast<unsigned>(std ::distance( pred_begin(&B), pred_end(&B))) && "Incomplete MemoryPhi Node") ? static_cast<void> (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2070, __PRETTY_FUNCTION__)) | ||||||
2070 | "Incomplete MemoryPhi Node")((Phi->getNumOperands() == static_cast<unsigned>(std ::distance( pred_begin(&B), pred_end(&B))) && "Incomplete MemoryPhi Node") ? static_cast<void> (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2070, __PRETTY_FUNCTION__)); | ||||||
2071 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { | ||||||
2072 | verifyUseInDefs(Phi->getIncomingValue(I), Phi); | ||||||
2073 | assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=((find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end (&B) && "Incoming phi block not a block predecessor" ) ? static_cast<void> (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2075, __PRETTY_FUNCTION__)) | ||||||
2074 | pred_end(&B) &&((find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end (&B) && "Incoming phi block not a block predecessor" ) ? static_cast<void> (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2075, __PRETTY_FUNCTION__)) | ||||||
2075 | "Incoming phi block not a block predecessor")((find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end (&B) && "Incoming phi block not a block predecessor" ) ? static_cast<void> (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2075, __PRETTY_FUNCTION__)); | ||||||
2076 | } | ||||||
2077 | } | ||||||
2078 | |||||||
2079 | for (Instruction &I : B) { | ||||||
2080 | if (MemoryUseOrDef *MA = getMemoryAccess(&I)) { | ||||||
2081 | verifyUseInDefs(MA->getDefiningAccess(), MA); | ||||||
2082 | } | ||||||
2083 | } | ||||||
2084 | } | ||||||
2085 | #endif | ||||||
2086 | } | ||||||
2087 | |||||||
2088 | /// Perform a local numbering on blocks so that instruction ordering can be | ||||||
2089 | /// determined in constant time. | ||||||
2090 | /// TODO: We currently just number in order. If we numbered by N, we could | ||||||
2091 | /// allow at least N-1 sequences of insertBefore or insertAfter (and at least | ||||||
2092 | /// log2(N) sequences of mixed before and after) without needing to invalidate | ||||||
2093 | /// the numbering. | ||||||
2094 | void MemorySSA::renumberBlock(const BasicBlock *B) const { | ||||||
2095 | // The pre-increment ensures the numbers really start at 1. | ||||||
2096 | unsigned long CurrentNumber = 0; | ||||||
2097 | const AccessList *AL = getBlockAccesses(B); | ||||||
2098 | assert(AL != nullptr && "Asking to renumber an empty block")((AL != nullptr && "Asking to renumber an empty block" ) ? static_cast<void> (0) : __assert_fail ("AL != nullptr && \"Asking to renumber an empty block\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2098, __PRETTY_FUNCTION__)); | ||||||
2099 | for (const auto &I : *AL) | ||||||
2100 | BlockNumbering[&I] = ++CurrentNumber; | ||||||
2101 | BlockNumberingValid.insert(B); | ||||||
2102 | } | ||||||
2103 | |||||||
2104 | /// Determine, for two memory accesses in the same block, | ||||||
2105 | /// whether \p Dominator dominates \p Dominatee. | ||||||
2106 | /// \returns True if \p Dominator dominates \p Dominatee. | ||||||
2107 | bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, | ||||||
2108 | const MemoryAccess *Dominatee) const { | ||||||
2109 | const BasicBlock *DominatorBlock = Dominator->getBlock(); | ||||||
2110 | |||||||
2111 | assert((DominatorBlock == Dominatee->getBlock()) &&(((DominatorBlock == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!" ) ? static_cast<void> (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2112, __PRETTY_FUNCTION__)) | ||||||
2112 | "Asking for local domination when accesses are in different blocks!")(((DominatorBlock == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!" ) ? static_cast<void> (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2112, __PRETTY_FUNCTION__)); | ||||||
2113 | // A node dominates itself. | ||||||
2114 | if (Dominatee == Dominator) | ||||||
2115 | return true; | ||||||
2116 | |||||||
2117 | // When Dominatee is defined on function entry, it is not dominated by another | ||||||
2118 | // memory access. | ||||||
2119 | if (isLiveOnEntryDef(Dominatee)) | ||||||
2120 | return false; | ||||||
2121 | |||||||
2122 | // When Dominator is defined on function entry, it dominates the other memory | ||||||
2123 | // access. | ||||||
2124 | if (isLiveOnEntryDef(Dominator)) | ||||||
2125 | return true; | ||||||
2126 | |||||||
2127 | if (!BlockNumberingValid.count(DominatorBlock)) | ||||||
2128 | renumberBlock(DominatorBlock); | ||||||
2129 | |||||||
2130 | unsigned long DominatorNum = BlockNumbering.lookup(Dominator); | ||||||
2131 | // All numbers start with 1 | ||||||
2132 | assert(DominatorNum != 0 && "Block was not numbered properly")((DominatorNum != 0 && "Block was not numbered properly" ) ? static_cast<void> (0) : __assert_fail ("DominatorNum != 0 && \"Block was not numbered properly\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2132, __PRETTY_FUNCTION__)); | ||||||
2133 | unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); | ||||||
2134 | assert(DominateeNum != 0 && "Block was not numbered properly")((DominateeNum != 0 && "Block was not numbered properly" ) ? static_cast<void> (0) : __assert_fail ("DominateeNum != 0 && \"Block was not numbered properly\"" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2134, __PRETTY_FUNCTION__)); | ||||||
2135 | return DominatorNum < DominateeNum; | ||||||
2136 | } | ||||||
2137 | |||||||
2138 | bool MemorySSA::dominates(const MemoryAccess *Dominator, | ||||||
2139 | const MemoryAccess *Dominatee) const { | ||||||
2140 | if (Dominator == Dominatee) | ||||||
2141 | return true; | ||||||
2142 | |||||||
2143 | if (isLiveOnEntryDef(Dominatee)) | ||||||
2144 | return false; | ||||||
2145 | |||||||
2146 | if (Dominator->getBlock() != Dominatee->getBlock()) | ||||||
2147 | return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); | ||||||
2148 | return locallyDominates(Dominator, Dominatee); | ||||||
2149 | } | ||||||
2150 | |||||||
2151 | bool MemorySSA::dominates(const MemoryAccess *Dominator, | ||||||
2152 | const Use &Dominatee) const { | ||||||
2153 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { | ||||||
2154 | BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); | ||||||
2155 | // The def must dominate the incoming block of the phi. | ||||||
2156 | if (UseBB != Dominator->getBlock()) | ||||||
2157 | return DT->dominates(Dominator->getBlock(), UseBB); | ||||||
2158 | // If the UseBB and the DefBB are the same, compare locally. | ||||||
2159 | return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); | ||||||
2160 | } | ||||||
2161 | // If it's not a PHI node use, the normal dominates can already handle it. | ||||||
2162 | return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); | ||||||
2163 | } | ||||||
2164 | |||||||
2165 | const static char LiveOnEntryStr[] = "liveOnEntry"; | ||||||
2166 | |||||||
2167 | void MemoryAccess::print(raw_ostream &OS) const { | ||||||
2168 | switch (getValueID()) { | ||||||
2169 | case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); | ||||||
2170 | case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); | ||||||
2171 | case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); | ||||||
2172 | } | ||||||
2173 | llvm_unreachable("invalid value id")::llvm::llvm_unreachable_internal("invalid value id", "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2173); | ||||||
2174 | } | ||||||
2175 | |||||||
2176 | void MemoryDef::print(raw_ostream &OS) const { | ||||||
2177 | MemoryAccess *UO = getDefiningAccess(); | ||||||
2178 | |||||||
2179 | auto printID = [&OS](MemoryAccess *A) { | ||||||
2180 | if (A && A->getID()) | ||||||
2181 | OS << A->getID(); | ||||||
2182 | else | ||||||
2183 | OS << LiveOnEntryStr; | ||||||
2184 | }; | ||||||
2185 | |||||||
2186 | OS << getID() << " = MemoryDef("; | ||||||
2187 | printID(UO); | ||||||
2188 | OS << ")"; | ||||||
2189 | |||||||
2190 | if (isOptimized()) { | ||||||
2191 | OS << "->"; | ||||||
2192 | printID(getOptimized()); | ||||||
2193 | |||||||
2194 | if (Optional<AliasResult> AR = getOptimizedAccessType()) | ||||||
2195 | OS << " " << *AR; | ||||||
2196 | } | ||||||
2197 | } | ||||||
2198 | |||||||
2199 | void MemoryPhi::print(raw_ostream &OS) const { | ||||||
2200 | bool First = true; | ||||||
2201 | OS << getID() << " = MemoryPhi("; | ||||||
2202 | for (const auto &Op : operands()) { | ||||||
2203 | BasicBlock *BB = getIncomingBlock(Op); | ||||||
2204 | MemoryAccess *MA = cast<MemoryAccess>(Op); | ||||||
2205 | if (!First) | ||||||
2206 | OS << ','; | ||||||
2207 | else | ||||||
2208 | First = false; | ||||||
2209 | |||||||
2210 | OS << '{'; | ||||||
2211 | if (BB->hasName()) | ||||||
2212 | OS << BB->getName(); | ||||||
2213 | else | ||||||
2214 | BB->printAsOperand(OS, false); | ||||||
2215 | OS << ','; | ||||||
2216 | if (unsigned ID = MA->getID()) | ||||||
2217 | OS << ID; | ||||||
2218 | else | ||||||
2219 | OS << LiveOnEntryStr; | ||||||
2220 | OS << '}'; | ||||||
2221 | } | ||||||
2222 | OS << ')'; | ||||||
2223 | } | ||||||
2224 | |||||||
2225 | void MemoryUse::print(raw_ostream &OS) const { | ||||||
2226 | MemoryAccess *UO = getDefiningAccess(); | ||||||
2227 | OS << "MemoryUse("; | ||||||
2228 | if (UO && UO->getID()) | ||||||
2229 | OS << UO->getID(); | ||||||
2230 | else | ||||||
2231 | OS << LiveOnEntryStr; | ||||||
2232 | OS << ')'; | ||||||
2233 | |||||||
2234 | if (Optional<AliasResult> AR = getOptimizedAccessType()) | ||||||
2235 | OS << " " << *AR; | ||||||
2236 | } | ||||||
2237 | |||||||
2238 | void MemoryAccess::dump() const { | ||||||
2239 | // Cannot completely remove virtual function even in release mode. | ||||||
2240 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||||
2241 | print(dbgs()); | ||||||
2242 | dbgs() << "\n"; | ||||||
2243 | #endif | ||||||
2244 | } | ||||||
2245 | |||||||
2246 | char MemorySSAPrinterLegacyPass::ID = 0; | ||||||
2247 | |||||||
2248 | MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { | ||||||
2249 | initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||||
2250 | } | ||||||
2251 | |||||||
2252 | void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { | ||||||
2253 | AU.setPreservesAll(); | ||||||
2254 | AU.addRequired<MemorySSAWrapperPass>(); | ||||||
2255 | } | ||||||
2256 | |||||||
2257 | bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { | ||||||
2258 | auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); | ||||||
2259 | MSSA.print(dbgs()); | ||||||
2260 | if (VerifyMemorySSA) | ||||||
2261 | MSSA.verifyMemorySSA(); | ||||||
2262 | return false; | ||||||
2263 | } | ||||||
2264 | |||||||
2265 | AnalysisKey MemorySSAAnalysis::Key; | ||||||
2266 | |||||||
2267 | MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, | ||||||
2268 | FunctionAnalysisManager &AM) { | ||||||
2269 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); | ||||||
2270 | auto &AA = AM.getResult<AAManager>(F); | ||||||
2271 | return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT)); | ||||||
2272 | } | ||||||
2273 | |||||||
2274 | bool MemorySSAAnalysis::Result::invalidate( | ||||||
2275 | Function &F, const PreservedAnalyses &PA, | ||||||
2276 | FunctionAnalysisManager::Invalidator &Inv) { | ||||||
2277 | auto PAC = PA.getChecker<MemorySSAAnalysis>(); | ||||||
2278 | return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || | ||||||
2279 | Inv.invalidate<AAManager>(F, PA) || | ||||||
2280 | Inv.invalidate<DominatorTreeAnalysis>(F, PA); | ||||||
2281 | } | ||||||
2282 | |||||||
2283 | PreservedAnalyses MemorySSAPrinterPass::run(Function &F, | ||||||
2284 | FunctionAnalysisManager &AM) { | ||||||
2285 | OS << "MemorySSA for function: " << F.getName() << "\n"; | ||||||
2286 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS); | ||||||
2287 | |||||||
2288 | return PreservedAnalyses::all(); | ||||||
2289 | } | ||||||
2290 | |||||||
2291 | PreservedAnalyses MemorySSAVerifierPass::run(Function &F, | ||||||
2292 | FunctionAnalysisManager &AM) { | ||||||
2293 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); | ||||||
2294 | |||||||
2295 | return PreservedAnalyses::all(); | ||||||
2296 | } | ||||||
2297 | |||||||
2298 | char MemorySSAWrapperPass::ID = 0; | ||||||
2299 | |||||||
2300 | MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { | ||||||
2301 | initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); | ||||||
2302 | } | ||||||
2303 | |||||||
2304 | void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } | ||||||
2305 | |||||||
2306 | void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { | ||||||
2307 | AU.setPreservesAll(); | ||||||
2308 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); | ||||||
2309 | AU.addRequiredTransitive<AAResultsWrapperPass>(); | ||||||
2310 | } | ||||||
2311 | |||||||
2312 | bool MemorySSAWrapperPass::runOnFunction(Function &F) { | ||||||
2313 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | ||||||
2314 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); | ||||||
2315 | MSSA.reset(new MemorySSA(F, &AA, &DT)); | ||||||
2316 | return false; | ||||||
2317 | } | ||||||
2318 | |||||||
2319 | void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); } | ||||||
2320 | |||||||
2321 | void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { | ||||||
2322 | MSSA->print(OS); | ||||||
2323 | } | ||||||
2324 | |||||||
2325 | MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} | ||||||
2326 | |||||||
2327 | /// Walk the use-def chains starting at \p StartingAccess and find | ||||||
2328 | /// the MemoryAccess that actually clobbers Loc. | ||||||
2329 | /// | ||||||
2330 | /// \returns our clobbering memory access | ||||||
2331 | template <typename AliasAnalysisType> | ||||||
2332 | MemoryAccess * | ||||||
2333 | MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( | ||||||
2334 | MemoryAccess *StartingAccess, const MemoryLocation &Loc, | ||||||
2335 | unsigned &UpwardWalkLimit) { | ||||||
2336 | if (isa<MemoryPhi>(StartingAccess)) | ||||||
2337 | return StartingAccess; | ||||||
2338 | |||||||
2339 | auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); | ||||||
2340 | if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) | ||||||
2341 | return StartingUseOrDef; | ||||||
2342 | |||||||
2343 | Instruction *I = StartingUseOrDef->getMemoryInst(); | ||||||
2344 | |||||||
2345 | // Conservatively, fences are always clobbers, so don't perform the walk if we | ||||||
2346 | // hit a fence. | ||||||
2347 | if (!isa<CallBase>(I) && I->isFenceLike()) | ||||||
2348 | return StartingUseOrDef; | ||||||
2349 | |||||||
2350 | UpwardsMemoryQuery Q; | ||||||
2351 | Q.OriginalAccess = StartingUseOrDef; | ||||||
2352 | Q.StartingLoc = Loc; | ||||||
2353 | Q.Inst = I; | ||||||
2354 | Q.IsCall = false; | ||||||
2355 | |||||||
2356 | // Unlike the other function, do not walk to the def of a def, because we are | ||||||
2357 | // handed something we already believe is the clobbering access. | ||||||
2358 | // We never set SkipSelf to true in Q in this method. | ||||||
2359 | MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) | ||||||
2360 | ? StartingUseOrDef->getDefiningAccess() | ||||||
2361 | : StartingUseOrDef; | ||||||
2362 | |||||||
2363 | MemoryAccess *Clobber = | ||||||
2364 | Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); | ||||||
2365 | LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (false); | ||||||
2366 | LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *StartingUseOrDef << "\n" ; } } while (false); | ||||||
2367 | LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Final Memory SSA clobber for " << *I << " is "; } } while (false); | ||||||
2368 | LLVM_DEBUG(dbgs() << *Clobber << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *Clobber << "\n"; } } while (false); | ||||||
2369 | return Clobber; | ||||||
2370 | } | ||||||
2371 | |||||||
2372 | template <typename AliasAnalysisType> | ||||||
2373 | MemoryAccess * | ||||||
2374 | MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( | ||||||
2375 | MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) { | ||||||
2376 | auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); | ||||||
2377 | // If this is a MemoryPhi, we can't do anything. | ||||||
2378 | if (!StartingAccess) | ||||||
2379 | return MA; | ||||||
2380 | |||||||
2381 | bool IsOptimized = false; | ||||||
2382 | |||||||
2383 | // If this is an already optimized use or def, return the optimized result. | ||||||
2384 | // Note: Currently, we store the optimized def result in a separate field, | ||||||
2385 | // since we can't use the defining access. | ||||||
2386 | if (StartingAccess->isOptimized()) { | ||||||
2387 | if (!SkipSelf || !isa<MemoryDef>(StartingAccess)) | ||||||
2388 | return StartingAccess->getOptimized(); | ||||||
2389 | IsOptimized = true; | ||||||
2390 | } | ||||||
2391 | |||||||
2392 | const Instruction *I = StartingAccess->getMemoryInst(); | ||||||
2393 | // We can't sanely do anything with a fence, since they conservatively clobber | ||||||
2394 | // all memory, and have no locations to get pointers from to try to | ||||||
2395 | // disambiguate. | ||||||
2396 | if (!isa<CallBase>(I) && I->isFenceLike()) | ||||||
2397 | return StartingAccess; | ||||||
2398 | |||||||
2399 | UpwardsMemoryQuery Q(I, StartingAccess); | ||||||
2400 | |||||||
2401 | if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) { | ||||||
2402 | MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); | ||||||
2403 | StartingAccess->setOptimized(LiveOnEntry); | ||||||
2404 | StartingAccess->setOptimizedAccessType(None); | ||||||
2405 | return LiveOnEntry; | ||||||
2406 | } | ||||||
2407 | |||||||
2408 | MemoryAccess *OptimizedAccess; | ||||||
2409 | if (!IsOptimized) { | ||||||
2410 | // Start with the thing we already think clobbers this location | ||||||
2411 | MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); | ||||||
2412 | |||||||
2413 | // At this point, DefiningAccess may be the live on entry def. | ||||||
2414 | // If it is, we will not get a better result. | ||||||
2415 | if (MSSA->isLiveOnEntryDef(DefiningAccess)) { | ||||||
2416 | StartingAccess->setOptimized(DefiningAccess); | ||||||
2417 | StartingAccess->setOptimizedAccessType(None); | ||||||
2418 | return DefiningAccess; | ||||||
2419 | } | ||||||
2420 | |||||||
2421 | OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); | ||||||
2422 | StartingAccess->setOptimized(OptimizedAccess); | ||||||
2423 | if (MSSA->isLiveOnEntryDef(OptimizedAccess)) | ||||||
2424 | StartingAccess->setOptimizedAccessType(None); | ||||||
2425 | else if (Q.AR == MustAlias) | ||||||
2426 | StartingAccess->setOptimizedAccessType(MustAlias); | ||||||
2427 | } else | ||||||
2428 | OptimizedAccess = StartingAccess->getOptimized(); | ||||||
2429 | |||||||
2430 | LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (false); | ||||||
2431 | LLVM_DEBUG(dbgs() << *StartingAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *StartingAccess << "\n" ; } } while (false); | ||||||
2432 | LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Optimized Memory SSA clobber for " << *I << " is "; } } while (false); | ||||||
2433 | LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *OptimizedAccess << "\n" ; } } while (false); | ||||||
2434 | |||||||
2435 | MemoryAccess *Result; | ||||||
2436 | if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) && | ||||||
2437 | isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) { | ||||||
2438 | assert(isa<MemoryDef>(Q.OriginalAccess))((isa<MemoryDef>(Q.OriginalAccess)) ? static_cast<void > (0) : __assert_fail ("isa<MemoryDef>(Q.OriginalAccess)" , "/build/llvm-toolchain-snapshot-10~svn373386/lib/Analysis/MemorySSA.cpp" , 2438, __PRETTY_FUNCTION__)); | ||||||
2439 | Q.SkipSelfAccess = true; | ||||||
2440 | Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit); | ||||||
2441 | } else | ||||||
2442 | Result = OptimizedAccess; | ||||||
2443 | |||||||
2444 | LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf; } } while (false); | ||||||
2445 | LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "] for " << *I << " is " << *Result << "\n"; } } while (false); | ||||||
2446 | |||||||
2447 | return Result; | ||||||
2448 | } | ||||||
2449 | |||||||
2450 | MemoryAccess * | ||||||
2451 | DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { | ||||||
2452 | if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) | ||||||
2453 | return Use->getDefiningAccess(); | ||||||
2454 | return MA; | ||||||
2455 | } | ||||||
2456 | |||||||
2457 | MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( | ||||||
2458 | MemoryAccess *StartingAccess, const MemoryLocation &) { | ||||||
2459 | if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) | ||||||
2460 | return Use->getDefiningAccess(); | ||||||
2461 | return StartingAccess; | ||||||
2462 | } | ||||||
2463 | |||||||
2464 | void MemoryPhi::deleteMe(DerivedUser *Self) { | ||||||
2465 | delete static_cast<MemoryPhi *>(Self); | ||||||
2466 | } | ||||||
2467 | |||||||
2468 | void MemoryDef::deleteMe(DerivedUser *Self) { | ||||||
2469 | delete static_cast<MemoryDef *>(Self); | ||||||
2470 | } | ||||||
2471 | |||||||
2472 | void MemoryUse::deleteMe(DerivedUser *Self) { | ||||||
2473 | delete static_cast<MemoryUse *>(Self); | ||||||
2474 | } |
1 | //===- Optional.h - Simple variant for passing optional values --*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file provides Optional, a template class modeled in the spirit of |
10 | // OCaml's 'opt' variant. The idea is to strongly type whether or not |
11 | // a value can be optional. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_ADT_OPTIONAL_H |
16 | #define LLVM_ADT_OPTIONAL_H |
17 | |
18 | #include "llvm/ADT/None.h" |
19 | #include "llvm/Support/Compiler.h" |
20 | #include "llvm/Support/type_traits.h" |
21 | #include <cassert> |
22 | #include <memory> |
23 | #include <new> |
24 | #include <utility> |
25 | |
26 | namespace llvm { |
27 | |
28 | class raw_ostream; |
29 | |
30 | namespace optional_detail { |
31 | |
32 | struct in_place_t {}; |
33 | |
34 | /// Storage for any type. |
35 | template <typename T, bool = is_trivially_copyable<T>::value> |
36 | class OptionalStorage { |
37 | union { |
38 | char empty; |
39 | T value; |
40 | }; |
41 | bool hasVal; |
42 | |
43 | public: |
44 | ~OptionalStorage() { reset(); } |
45 | |
46 | OptionalStorage() noexcept : empty(), hasVal(false) {} |
47 | |
48 | OptionalStorage(OptionalStorage const &other) : OptionalStorage() { |
49 | if (other.hasValue()) { |
50 | emplace(other.value); |
51 | } |
52 | } |
53 | OptionalStorage(OptionalStorage &&other) : OptionalStorage() { |
54 | if (other.hasValue()) { |
55 | emplace(std::move(other.value)); |
56 | } |
57 | } |
58 | |
59 | template <class... Args> |
60 | explicit OptionalStorage(in_place_t, Args &&... args) |
61 | : value(std::forward<Args>(args)...), hasVal(true) {} |
62 | |
63 | void reset() noexcept { |
64 | if (hasVal) { |
65 | value.~T(); |
66 | hasVal = false; |
67 | } |
68 | } |
69 | |
70 | bool hasValue() const noexcept { return hasVal; } |
71 | |
72 | T &getValue() LLVM_LVALUE_FUNCTION& noexcept { |
73 | assert(hasVal)((hasVal) ? static_cast<void> (0) : __assert_fail ("hasVal" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/Optional.h" , 73, __PRETTY_FUNCTION__)); |
74 | return value; |
75 | } |
76 | T const &getValue() const LLVM_LVALUE_FUNCTION& noexcept { |
77 | assert(hasVal)((hasVal) ? static_cast<void> (0) : __assert_fail ("hasVal" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/Optional.h" , 77, __PRETTY_FUNCTION__)); |
78 | return value; |
79 | } |
80 | #if LLVM_HAS_RVALUE_REFERENCE_THIS1 |
81 | T &&getValue() && noexcept { |
82 | assert(hasVal)((hasVal) ? static_cast<void> (0) : __assert_fail ("hasVal" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/Optional.h" , 82, __PRETTY_FUNCTION__)); |
83 | return std::move(value); |
84 | } |
85 | #endif |
86 | |
87 | template <class... Args> void emplace(Args &&... args) { |
88 | reset(); |
89 | ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...); |
90 | hasVal = true; |
91 | } |
92 | |
93 | OptionalStorage &operator=(T const &y) { |
94 | if (hasValue()) { |
95 | value = y; |
96 | } else { |
97 | ::new ((void *)std::addressof(value)) T(y); |
98 | hasVal = true; |
99 | } |
100 | return *this; |
101 | } |
102 | OptionalStorage &operator=(T &&y) { |
103 | if (hasValue()) { |
104 | value = std::move(y); |
105 | } else { |
106 | ::new ((void *)std::addressof(value)) T(std::move(y)); |
107 | hasVal = true; |
108 | } |
109 | return *this; |
110 | } |
111 | |
112 | OptionalStorage &operator=(OptionalStorage const &other) { |
113 | if (other.hasValue()) { |
114 | if (hasValue()) { |
115 | value = other.value; |
116 | } else { |
117 | ::new ((void *)std::addressof(value)) T(other.value); |
118 | hasVal = true; |
119 | } |
120 | } else { |
121 | reset(); |
122 | } |
123 | return *this; |
124 | } |
125 | |
126 | OptionalStorage &operator=(OptionalStorage &&other) { |
127 | if (other.hasValue()) { |
128 | if (hasValue()) { |
129 | value = std::move(other.value); |
130 | } else { |
131 | ::new ((void *)std::addressof(value)) T(std::move(other.value)); |
132 | hasVal = true; |
133 | } |
134 | } else { |
135 | reset(); |
136 | } |
137 | return *this; |
138 | } |
139 | }; |
140 | |
141 | template <typename T> class OptionalStorage<T, true> { |
142 | union { |
143 | char empty; |
144 | T value; |
145 | }; |
146 | bool hasVal = false; |
147 | |
148 | public: |
149 | ~OptionalStorage() = default; |
150 | |
151 | OptionalStorage() noexcept : empty{} {} |
152 | |
153 | OptionalStorage(OptionalStorage const &other) = default; |
154 | OptionalStorage(OptionalStorage &&other) = default; |
155 | |
156 | OptionalStorage &operator=(OptionalStorage const &other) = default; |
157 | OptionalStorage &operator=(OptionalStorage &&other) = default; |
158 | |
159 | template <class... Args> |
160 | explicit OptionalStorage(in_place_t, Args &&... args) |
161 | : value(std::forward<Args>(args)...), hasVal(true) {} |
162 | |
163 | void reset() noexcept { |
164 | if (hasVal) { |
165 | value.~T(); |
166 | hasVal = false; |
167 | } |
168 | } |
169 | |
170 | bool hasValue() const noexcept { return hasVal; } |
171 | |
172 | T &getValue() LLVM_LVALUE_FUNCTION& noexcept { |
173 | assert(hasVal)((hasVal) ? static_cast<void> (0) : __assert_fail ("hasVal" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/Optional.h" , 173, __PRETTY_FUNCTION__)); |
174 | return value; |
175 | } |
176 | T const &getValue() const LLVM_LVALUE_FUNCTION& noexcept { |
177 | assert(hasVal)((hasVal) ? static_cast<void> (0) : __assert_fail ("hasVal" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/Optional.h" , 177, __PRETTY_FUNCTION__)); |
178 | return value; |
179 | } |
180 | #if LLVM_HAS_RVALUE_REFERENCE_THIS1 |
181 | T &&getValue() && noexcept { |
182 | assert(hasVal)((hasVal) ? static_cast<void> (0) : __assert_fail ("hasVal" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/Optional.h" , 182, __PRETTY_FUNCTION__)); |
183 | return std::move(value); |
184 | } |
185 | #endif |
186 | |
187 | template <class... Args> void emplace(Args &&... args) { |
188 | reset(); |
189 | ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...); |
190 | hasVal = true; |
191 | } |
192 | |
193 | OptionalStorage &operator=(T const &y) { |
194 | if (hasValue()) { |
195 | value = y; |
196 | } else { |
197 | ::new ((void *)std::addressof(value)) T(y); |
198 | hasVal = true; |
199 | } |
200 | return *this; |
201 | } |
202 | OptionalStorage &operator=(T &&y) { |
203 | if (hasValue()) { |
204 | value = std::move(y); |
205 | } else { |
206 | ::new ((void *)std::addressof(value)) T(std::move(y)); |
207 | hasVal = true; |
208 | } |
209 | return *this; |
210 | } |
211 | }; |
212 | |
213 | } // namespace optional_detail |
214 | |
215 | template <typename T> class Optional { |
216 | optional_detail::OptionalStorage<T> Storage; |
217 | |
218 | public: |
219 | using value_type = T; |
220 | |
221 | constexpr Optional() {} |
222 | constexpr Optional(NoneType) {} |
223 | |
224 | Optional(const T &y) : Storage(optional_detail::in_place_t{}, y) {} |
225 | Optional(const Optional &O) = default; |
226 | |
227 | Optional(T &&y) : Storage(optional_detail::in_place_t{}, std::move(y)) {} |
228 | Optional(Optional &&O) = default; |
229 | |
230 | Optional &operator=(T &&y) { |
231 | Storage = std::move(y); |
232 | return *this; |
233 | } |
234 | Optional &operator=(Optional &&O) = default; |
235 | |
236 | /// Create a new object by constructing it in place with the given arguments. |
237 | template <typename... ArgTypes> void emplace(ArgTypes &&... Args) { |
238 | Storage.emplace(std::forward<ArgTypes>(Args)...); |
239 | } |
240 | |
241 | static inline Optional create(const T *y) { |
242 | return y ? Optional(*y) : Optional(); |
243 | } |
244 | |
245 | Optional &operator=(const T &y) { |
246 | Storage = y; |
247 | return *this; |
248 | } |
249 | Optional &operator=(const Optional &O) = default; |
250 | |
251 | void reset() { Storage.reset(); } |
252 | |
253 | const T *getPointer() const { return &Storage.getValue(); } |
254 | T *getPointer() { return &Storage.getValue(); } |
255 | const T &getValue() const LLVM_LVALUE_FUNCTION& { return Storage.getValue(); } |
256 | T &getValue() LLVM_LVALUE_FUNCTION& { return Storage.getValue(); } |
257 | |
258 | explicit operator bool() const { return hasValue(); } |
259 | bool hasValue() const { return Storage.hasValue(); } |
260 | const T *operator->() const { return getPointer(); } |
261 | T *operator->() { return getPointer(); } |
262 | const T &operator*() const LLVM_LVALUE_FUNCTION& { return getValue(); } |
263 | T &operator*() LLVM_LVALUE_FUNCTION& { return getValue(); } |
264 | |
265 | template <typename U> |
266 | constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION& { |
267 | return hasValue() ? getValue() : std::forward<U>(value); |
268 | } |
269 | |
270 | #if LLVM_HAS_RVALUE_REFERENCE_THIS1 |
271 | T &&getValue() && { return std::move(Storage.getValue()); } |
272 | T &&operator*() && { return std::move(Storage.getValue()); } |
273 | |
274 | template <typename U> |
275 | T getValueOr(U &&value) && { |
276 | return hasValue() ? std::move(getValue()) : std::forward<U>(value); |
277 | } |
278 | #endif |
279 | }; |
280 | |
281 | template <typename T, typename U> |
282 | bool operator==(const Optional<T> &X, const Optional<U> &Y) { |
283 | if (X && Y) |
284 | return *X == *Y; |
285 | return X.hasValue() == Y.hasValue(); |
286 | } |
287 | |
288 | template <typename T, typename U> |
289 | bool operator!=(const Optional<T> &X, const Optional<U> &Y) { |
290 | return !(X == Y); |
291 | } |
292 | |
293 | template <typename T, typename U> |
294 | bool operator<(const Optional<T> &X, const Optional<U> &Y) { |
295 | if (X && Y) |
296 | return *X < *Y; |
297 | return X.hasValue() < Y.hasValue(); |
298 | } |
299 | |
300 | template <typename T, typename U> |
301 | bool operator<=(const Optional<T> &X, const Optional<U> &Y) { |
302 | return !(Y < X); |
303 | } |
304 | |
305 | template <typename T, typename U> |
306 | bool operator>(const Optional<T> &X, const Optional<U> &Y) { |
307 | return Y < X; |
308 | } |
309 | |
310 | template <typename T, typename U> |
311 | bool operator>=(const Optional<T> &X, const Optional<U> &Y) { |
312 | return !(X < Y); |
313 | } |
314 | |
315 | template<typename T> |
316 | bool operator==(const Optional<T> &X, NoneType) { |
317 | return !X; |
318 | } |
319 | |
320 | template<typename T> |
321 | bool operator==(NoneType, const Optional<T> &X) { |
322 | return X == None; |
323 | } |
324 | |
325 | template<typename T> |
326 | bool operator!=(const Optional<T> &X, NoneType) { |
327 | return !(X == None); |
328 | } |
329 | |
330 | template<typename T> |
331 | bool operator!=(NoneType, const Optional<T> &X) { |
332 | return X != None; |
333 | } |
334 | |
335 | template <typename T> bool operator<(const Optional<T> &X, NoneType) { |
336 | return false; |
337 | } |
338 | |
339 | template <typename T> bool operator<(NoneType, const Optional<T> &X) { |
340 | return X.hasValue(); |
341 | } |
342 | |
343 | template <typename T> bool operator<=(const Optional<T> &X, NoneType) { |
344 | return !(None < X); |
345 | } |
346 | |
347 | template <typename T> bool operator<=(NoneType, const Optional<T> &X) { |
348 | return !(X < None); |
349 | } |
350 | |
351 | template <typename T> bool operator>(const Optional<T> &X, NoneType) { |
352 | return None < X; |
353 | } |
354 | |
355 | template <typename T> bool operator>(NoneType, const Optional<T> &X) { |
356 | return X < None; |
357 | } |
358 | |
359 | template <typename T> bool operator>=(const Optional<T> &X, NoneType) { |
360 | return None <= X; |
361 | } |
362 | |
363 | template <typename T> bool operator>=(NoneType, const Optional<T> &X) { |
364 | return X <= None; |
365 | } |
366 | |
367 | template <typename T> bool operator==(const Optional<T> &X, const T &Y) { |
368 | return X && *X == Y; |
369 | } |
370 | |
371 | template <typename T> bool operator==(const T &X, const Optional<T> &Y) { |
372 | return Y && X == *Y; |
373 | } |
374 | |
375 | template <typename T> bool operator!=(const Optional<T> &X, const T &Y) { |
376 | return !(X == Y); |
377 | } |
378 | |
379 | template <typename T> bool operator!=(const T &X, const Optional<T> &Y) { |
380 | return !(X == Y); |
381 | } |
382 | |
383 | template <typename T> bool operator<(const Optional<T> &X, const T &Y) { |
384 | return !X || *X < Y; |
385 | } |
386 | |
387 | template <typename T> bool operator<(const T &X, const Optional<T> &Y) { |
388 | return Y && X < *Y; |
389 | } |
390 | |
391 | template <typename T> bool operator<=(const Optional<T> &X, const T &Y) { |
392 | return !(Y < X); |
393 | } |
394 | |
395 | template <typename T> bool operator<=(const T &X, const Optional<T> &Y) { |
396 | return !(Y < X); |
397 | } |
398 | |
399 | template <typename T> bool operator>(const Optional<T> &X, const T &Y) { |
400 | return Y < X; |
401 | } |
402 | |
403 | template <typename T> bool operator>(const T &X, const Optional<T> &Y) { |
404 | return Y < X; |
405 | } |
406 | |
407 | template <typename T> bool operator>=(const Optional<T> &X, const T &Y) { |
408 | return !(X < Y); |
409 | } |
410 | |
411 | template <typename T> bool operator>=(const T &X, const Optional<T> &Y) { |
412 | return !(X < Y); |
413 | } |
414 | |
415 | raw_ostream &operator<<(raw_ostream &OS, NoneType); |
416 | |
417 | template <typename T, typename = decltype(std::declval<raw_ostream &>() |
418 | << std::declval<const T &>())> |
419 | raw_ostream &operator<<(raw_ostream &OS, const Optional<T> &O) { |
420 | if (O) |
421 | OS << *O; |
422 | else |
423 | OS << None; |
424 | return OS; |
425 | } |
426 | |
427 | } // end namespace llvm |
428 | |
429 | #endif // LLVM_ADT_OPTIONAL_H |
1 | //===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the SmallVector class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_ADT_SMALLVECTOR_H |
14 | #define LLVM_ADT_SMALLVECTOR_H |
15 | |
16 | #include "llvm/ADT/iterator_range.h" |
17 | #include "llvm/Support/AlignOf.h" |
18 | #include "llvm/Support/Compiler.h" |
19 | #include "llvm/Support/MathExtras.h" |
20 | #include "llvm/Support/MemAlloc.h" |
21 | #include "llvm/Support/type_traits.h" |
22 | #include "llvm/Support/ErrorHandling.h" |
23 | #include <algorithm> |
24 | #include <cassert> |
25 | #include <cstddef> |
26 | #include <cstdlib> |
27 | #include <cstring> |
28 | #include <initializer_list> |
29 | #include <iterator> |
30 | #include <memory> |
31 | #include <new> |
32 | #include <type_traits> |
33 | #include <utility> |
34 | |
35 | namespace llvm { |
36 | |
37 | /// This is all the non-templated stuff common to all SmallVectors. |
38 | class SmallVectorBase { |
39 | protected: |
40 | void *BeginX; |
41 | unsigned Size = 0, Capacity; |
42 | |
43 | SmallVectorBase() = delete; |
44 | SmallVectorBase(void *FirstEl, size_t TotalCapacity) |
45 | : BeginX(FirstEl), Capacity(TotalCapacity) {} |
46 | |
47 | /// This is an implementation of the grow() method which only works |
48 | /// on POD-like data types and is out of line to reduce code duplication. |
49 | void grow_pod(void *FirstEl, size_t MinCapacity, size_t TSize); |
50 | |
51 | public: |
52 | size_t size() const { return Size; } |
53 | size_t capacity() const { return Capacity; } |
54 | |
55 | LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; } |
56 | |
57 | /// Set the array size to \p N, which the current array must have enough |
58 | /// capacity for. |
59 | /// |
60 | /// This does not construct or destroy any elements in the vector. |
61 | /// |
62 | /// Clients can use this in conjunction with capacity() to write past the end |
63 | /// of the buffer when they know that more elements are available, and only |
64 | /// update the size later. This avoids the cost of value initializing elements |
65 | /// which will only be overwritten. |
66 | void set_size(size_t N) { |
67 | assert(N <= capacity())((N <= capacity()) ? static_cast<void> (0) : __assert_fail ("N <= capacity()", "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 67, __PRETTY_FUNCTION__)); |
68 | Size = N; |
69 | } |
70 | }; |
71 | |
72 | /// Figure out the offset of the first element. |
73 | template <class T, typename = void> struct SmallVectorAlignmentAndSize { |
74 | AlignedCharArrayUnion<SmallVectorBase> Base; |
75 | AlignedCharArrayUnion<T> FirstEl; |
76 | }; |
77 | |
78 | /// This is the part of SmallVectorTemplateBase which does not depend on whether |
79 | /// the type T is a POD. The extra dummy template argument is used by ArrayRef |
80 | /// to avoid unnecessarily requiring T to be complete. |
81 | template <typename T, typename = void> |
82 | class SmallVectorTemplateCommon : public SmallVectorBase { |
83 | /// Find the address of the first element. For this pointer math to be valid |
84 | /// with small-size of 0 for T with lots of alignment, it's important that |
85 | /// SmallVectorStorage is properly-aligned even for small-size of 0. |
86 | void *getFirstEl() const { |
87 | return const_cast<void *>(reinterpret_cast<const void *>( |
88 | reinterpret_cast<const char *>(this) + |
89 | offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl ))); |
90 | } |
91 | // Space after 'FirstEl' is clobbered, do not add any instance vars after it. |
92 | |
93 | protected: |
94 | SmallVectorTemplateCommon(size_t Size) |
95 | : SmallVectorBase(getFirstEl(), Size) {} |
96 | |
97 | void grow_pod(size_t MinCapacity, size_t TSize) { |
98 | SmallVectorBase::grow_pod(getFirstEl(), MinCapacity, TSize); |
99 | } |
100 | |
101 | /// Return true if this is a smallvector which has not had dynamic |
102 | /// memory allocated for it. |
103 | bool isSmall() const { return BeginX == getFirstEl(); } |
104 | |
105 | /// Put this vector in a state of being small. |
106 | void resetToSmall() { |
107 | BeginX = getFirstEl(); |
108 | Size = Capacity = 0; // FIXME: Setting Capacity to 0 is suspect. |
109 | } |
110 | |
111 | public: |
112 | using size_type = size_t; |
113 | using difference_type = ptrdiff_t; |
114 | using value_type = T; |
115 | using iterator = T *; |
116 | using const_iterator = const T *; |
117 | |
118 | using const_reverse_iterator = std::reverse_iterator<const_iterator>; |
119 | using reverse_iterator = std::reverse_iterator<iterator>; |
120 | |
121 | using reference = T &; |
122 | using const_reference = const T &; |
123 | using pointer = T *; |
124 | using const_pointer = const T *; |
125 | |
126 | // forward iterator creation methods. |
127 | iterator begin() { return (iterator)this->BeginX; } |
128 | const_iterator begin() const { return (const_iterator)this->BeginX; } |
129 | iterator end() { return begin() + size(); } |
130 | const_iterator end() const { return begin() + size(); } |
131 | |
132 | // reverse iterator creation methods. |
133 | reverse_iterator rbegin() { return reverse_iterator(end()); } |
134 | const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); } |
135 | reverse_iterator rend() { return reverse_iterator(begin()); } |
136 | const_reverse_iterator rend() const { return const_reverse_iterator(begin());} |
137 | |
138 | size_type size_in_bytes() const { return size() * sizeof(T); } |
139 | size_type max_size() const { return size_type(-1) / sizeof(T); } |
140 | |
141 | size_t capacity_in_bytes() const { return capacity() * sizeof(T); } |
142 | |
143 | /// Return a pointer to the vector's buffer, even if empty(). |
144 | pointer data() { return pointer(begin()); } |
145 | /// Return a pointer to the vector's buffer, even if empty(). |
146 | const_pointer data() const { return const_pointer(begin()); } |
147 | |
148 | reference operator[](size_type idx) { |
149 | assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail ("idx < size()", "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 149, __PRETTY_FUNCTION__)); |
150 | return begin()[idx]; |
151 | } |
152 | const_reference operator[](size_type idx) const { |
153 | assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail ("idx < size()", "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 153, __PRETTY_FUNCTION__)); |
154 | return begin()[idx]; |
155 | } |
156 | |
157 | reference front() { |
158 | assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 158, __PRETTY_FUNCTION__)); |
159 | return begin()[0]; |
160 | } |
161 | const_reference front() const { |
162 | assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 162, __PRETTY_FUNCTION__)); |
163 | return begin()[0]; |
164 | } |
165 | |
166 | reference back() { |
167 | assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 167, __PRETTY_FUNCTION__)); |
168 | return end()[-1]; |
169 | } |
170 | const_reference back() const { |
171 | assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 171, __PRETTY_FUNCTION__)); |
172 | return end()[-1]; |
173 | } |
174 | }; |
175 | |
176 | /// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put method |
177 | /// implementations that are designed to work with non-POD-like T's. |
178 | template <typename T, bool = is_trivially_copyable<T>::value> |
179 | class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> { |
180 | protected: |
181 | SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} |
182 | |
183 | static void destroy_range(T *S, T *E) { |
184 | while (S != E) { |
185 | --E; |
186 | E->~T(); |
187 | } |
188 | } |
189 | |
190 | /// Move the range [I, E) into the uninitialized memory starting with "Dest", |
191 | /// constructing elements as needed. |
192 | template<typename It1, typename It2> |
193 | static void uninitialized_move(It1 I, It1 E, It2 Dest) { |
194 | std::uninitialized_copy(std::make_move_iterator(I), |
195 | std::make_move_iterator(E), Dest); |
196 | } |
197 | |
198 | /// Copy the range [I, E) onto the uninitialized memory starting with "Dest", |
199 | /// constructing elements as needed. |
200 | template<typename It1, typename It2> |
201 | static void uninitialized_copy(It1 I, It1 E, It2 Dest) { |
202 | std::uninitialized_copy(I, E, Dest); |
203 | } |
204 | |
205 | /// Grow the allocated memory (without initializing new elements), doubling |
206 | /// the size of the allocated memory. Guarantees space for at least one more |
207 | /// element, or MinSize more elements if specified. |
208 | void grow(size_t MinSize = 0); |
209 | |
210 | public: |
211 | void push_back(const T &Elt) { |
212 | if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity ()), false)) |
213 | this->grow(); |
214 | ::new ((void*) this->end()) T(Elt); |
215 | this->set_size(this->size() + 1); |
216 | } |
217 | |
218 | void push_back(T &&Elt) { |
219 | if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity ()), false)) |
220 | this->grow(); |
221 | ::new ((void*) this->end()) T(::std::move(Elt)); |
222 | this->set_size(this->size() + 1); |
223 | } |
224 | |
225 | void pop_back() { |
226 | this->set_size(this->size() - 1); |
227 | this->end()->~T(); |
228 | } |
229 | }; |
230 | |
231 | // Define this out-of-line to dissuade the C++ compiler from inlining it. |
232 | template <typename T, bool TriviallyCopyable> |
233 | void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) { |
234 | if (MinSize > UINT32_MAX(4294967295U)) |
235 | report_bad_alloc_error("SmallVector capacity overflow during allocation"); |
236 | |
237 | // Always grow, even from zero. |
238 | size_t NewCapacity = size_t(NextPowerOf2(this->capacity() + 2)); |
239 | NewCapacity = std::min(std::max(NewCapacity, MinSize), size_t(UINT32_MAX(4294967295U))); |
240 | T *NewElts = static_cast<T*>(llvm::safe_malloc(NewCapacity*sizeof(T))); |
241 | |
242 | // Move the elements over. |
243 | this->uninitialized_move(this->begin(), this->end(), NewElts); |
244 | |
245 | // Destroy the original elements. |
246 | destroy_range(this->begin(), this->end()); |
247 | |
248 | // If this wasn't grown from the inline copy, deallocate the old space. |
249 | if (!this->isSmall()) |
250 | free(this->begin()); |
251 | |
252 | this->BeginX = NewElts; |
253 | this->Capacity = NewCapacity; |
254 | } |
255 | |
256 | /// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put |
257 | /// method implementations that are designed to work with POD-like T's. |
258 | template <typename T> |
259 | class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> { |
260 | protected: |
261 | SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} |
262 | |
263 | // No need to do a destroy loop for POD's. |
264 | static void destroy_range(T *, T *) {} |
265 | |
266 | /// Move the range [I, E) onto the uninitialized memory |
267 | /// starting with "Dest", constructing elements into it as needed. |
268 | template<typename It1, typename It2> |
269 | static void uninitialized_move(It1 I, It1 E, It2 Dest) { |
270 | // Just do a copy. |
271 | uninitialized_copy(I, E, Dest); |
272 | } |
273 | |
274 | /// Copy the range [I, E) onto the uninitialized memory |
275 | /// starting with "Dest", constructing elements into it as needed. |
276 | template<typename It1, typename It2> |
277 | static void uninitialized_copy(It1 I, It1 E, It2 Dest) { |
278 | // Arbitrary iterator types; just use the basic implementation. |
279 | std::uninitialized_copy(I, E, Dest); |
280 | } |
281 | |
282 | /// Copy the range [I, E) onto the uninitialized memory |
283 | /// starting with "Dest", constructing elements into it as needed. |
284 | template <typename T1, typename T2> |
285 | static void uninitialized_copy( |
286 | T1 *I, T1 *E, T2 *Dest, |
287 | typename std::enable_if<std::is_same<typename std::remove_const<T1>::type, |
288 | T2>::value>::type * = nullptr) { |
289 | // Use memcpy for PODs iterated by pointers (which includes SmallVector |
290 | // iterators): std::uninitialized_copy optimizes to memmove, but we can |
291 | // use memcpy here. Note that I and E are iterators and thus might be |
292 | // invalid for memcpy if they are equal. |
293 | if (I != E) |
294 | memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T)); |
295 | } |
296 | |
297 | /// Double the size of the allocated memory, guaranteeing space for at |
298 | /// least one more element or MinSize if specified. |
299 | void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); } |
300 | |
301 | public: |
302 | void push_back(const T &Elt) { |
303 | if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity ()), false)) |
304 | this->grow(); |
305 | memcpy(reinterpret_cast<void *>(this->end()), &Elt, sizeof(T)); |
306 | this->set_size(this->size() + 1); |
307 | } |
308 | |
309 | void pop_back() { this->set_size(this->size() - 1); } |
310 | }; |
311 | |
312 | /// This class consists of common code factored out of the SmallVector class to |
313 | /// reduce code duplication based on the SmallVector 'N' template parameter. |
314 | template <typename T> |
315 | class SmallVectorImpl : public SmallVectorTemplateBase<T> { |
316 | using SuperClass = SmallVectorTemplateBase<T>; |
317 | |
318 | public: |
319 | using iterator = typename SuperClass::iterator; |
320 | using const_iterator = typename SuperClass::const_iterator; |
321 | using reference = typename SuperClass::reference; |
322 | using size_type = typename SuperClass::size_type; |
323 | |
324 | protected: |
325 | // Default ctor - Initialize to empty. |
326 | explicit SmallVectorImpl(unsigned N) |
327 | : SmallVectorTemplateBase<T>(N) {} |
328 | |
329 | public: |
330 | SmallVectorImpl(const SmallVectorImpl &) = delete; |
331 | |
332 | ~SmallVectorImpl() { |
333 | // Subclass has already destructed this vector's elements. |
334 | // If this wasn't grown from the inline copy, deallocate the old space. |
335 | if (!this->isSmall()) |
336 | free(this->begin()); |
337 | } |
338 | |
339 | void clear() { |
340 | this->destroy_range(this->begin(), this->end()); |
341 | this->Size = 0; |
342 | } |
343 | |
344 | void resize(size_type N) { |
345 | if (N < this->size()) { |
346 | this->destroy_range(this->begin()+N, this->end()); |
347 | this->set_size(N); |
348 | } else if (N > this->size()) { |
349 | if (this->capacity() < N) |
350 | this->grow(N); |
351 | for (auto I = this->end(), E = this->begin() + N; I != E; ++I) |
352 | new (&*I) T(); |
353 | this->set_size(N); |
354 | } |
355 | } |
356 | |
357 | void resize(size_type N, const T &NV) { |
358 | if (N < this->size()) { |
359 | this->destroy_range(this->begin()+N, this->end()); |
360 | this->set_size(N); |
361 | } else if (N > this->size()) { |
362 | if (this->capacity() < N) |
363 | this->grow(N); |
364 | std::uninitialized_fill(this->end(), this->begin()+N, NV); |
365 | this->set_size(N); |
366 | } |
367 | } |
368 | |
369 | void reserve(size_type N) { |
370 | if (this->capacity() < N) |
371 | this->grow(N); |
372 | } |
373 | |
374 | LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() { |
375 | T Result = ::std::move(this->back()); |
376 | this->pop_back(); |
377 | return Result; |
378 | } |
379 | |
380 | void swap(SmallVectorImpl &RHS); |
381 | |
382 | /// Add the specified range to the end of the SmallVector. |
383 | template <typename in_iter, |
384 | typename = typename std::enable_if<std::is_convertible< |
385 | typename std::iterator_traits<in_iter>::iterator_category, |
386 | std::input_iterator_tag>::value>::type> |
387 | void append(in_iter in_start, in_iter in_end) { |
388 | size_type NumInputs = std::distance(in_start, in_end); |
389 | if (NumInputs > this->capacity() - this->size()) |
390 | this->grow(this->size()+NumInputs); |
391 | |
392 | this->uninitialized_copy(in_start, in_end, this->end()); |
393 | this->set_size(this->size() + NumInputs); |
394 | } |
395 | |
396 | /// Append \p NumInputs copies of \p Elt to the end. |
397 | void append(size_type NumInputs, const T &Elt) { |
398 | if (NumInputs > this->capacity() - this->size()) |
399 | this->grow(this->size()+NumInputs); |
400 | |
401 | std::uninitialized_fill_n(this->end(), NumInputs, Elt); |
402 | this->set_size(this->size() + NumInputs); |
403 | } |
404 | |
405 | void append(std::initializer_list<T> IL) { |
406 | append(IL.begin(), IL.end()); |
407 | } |
408 | |
409 | // FIXME: Consider assigning over existing elements, rather than clearing & |
410 | // re-initializing them - for all assign(...) variants. |
411 | |
412 | void assign(size_type NumElts, const T &Elt) { |
413 | clear(); |
414 | if (this->capacity() < NumElts) |
415 | this->grow(NumElts); |
416 | this->set_size(NumElts); |
417 | std::uninitialized_fill(this->begin(), this->end(), Elt); |
418 | } |
419 | |
420 | template <typename in_iter, |
421 | typename = typename std::enable_if<std::is_convertible< |
422 | typename std::iterator_traits<in_iter>::iterator_category, |
423 | std::input_iterator_tag>::value>::type> |
424 | void assign(in_iter in_start, in_iter in_end) { |
425 | clear(); |
426 | append(in_start, in_end); |
427 | } |
428 | |
429 | void assign(std::initializer_list<T> IL) { |
430 | clear(); |
431 | append(IL); |
432 | } |
433 | |
434 | iterator erase(const_iterator CI) { |
435 | // Just cast away constness because this is a non-const member function. |
436 | iterator I = const_cast<iterator>(CI); |
437 | |
438 | assert(I >= this->begin() && "Iterator to erase is out of bounds.")((I >= this->begin() && "Iterator to erase is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Iterator to erase is out of bounds.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 438, __PRETTY_FUNCTION__)); |
439 | assert(I < this->end() && "Erasing at past-the-end iterator.")((I < this->end() && "Erasing at past-the-end iterator." ) ? static_cast<void> (0) : __assert_fail ("I < this->end() && \"Erasing at past-the-end iterator.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 439, __PRETTY_FUNCTION__)); |
440 | |
441 | iterator N = I; |
442 | // Shift all elts down one. |
443 | std::move(I+1, this->end(), I); |
444 | // Drop the last elt. |
445 | this->pop_back(); |
446 | return(N); |
447 | } |
448 | |
449 | iterator erase(const_iterator CS, const_iterator CE) { |
450 | // Just cast away constness because this is a non-const member function. |
451 | iterator S = const_cast<iterator>(CS); |
452 | iterator E = const_cast<iterator>(CE); |
453 | |
454 | assert(S >= this->begin() && "Range to erase is out of bounds.")((S >= this->begin() && "Range to erase is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("S >= this->begin() && \"Range to erase is out of bounds.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 454, __PRETTY_FUNCTION__)); |
455 | assert(S <= E && "Trying to erase invalid range.")((S <= E && "Trying to erase invalid range.") ? static_cast <void> (0) : __assert_fail ("S <= E && \"Trying to erase invalid range.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 455, __PRETTY_FUNCTION__)); |
456 | assert(E <= this->end() && "Trying to erase past the end.")((E <= this->end() && "Trying to erase past the end." ) ? static_cast<void> (0) : __assert_fail ("E <= this->end() && \"Trying to erase past the end.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 456, __PRETTY_FUNCTION__)); |
457 | |
458 | iterator N = S; |
459 | // Shift all elts down. |
460 | iterator I = std::move(E, this->end(), S); |
461 | // Drop the last elts. |
462 | this->destroy_range(I, this->end()); |
463 | this->set_size(I - this->begin()); |
464 | return(N); |
465 | } |
466 | |
467 | iterator insert(iterator I, T &&Elt) { |
468 | if (I == this->end()) { // Important special case for empty vector. |
469 | this->push_back(::std::move(Elt)); |
470 | return this->end()-1; |
471 | } |
472 | |
473 | assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 473, __PRETTY_FUNCTION__)); |
474 | assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector." ) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 474, __PRETTY_FUNCTION__)); |
475 | |
476 | if (this->size() >= this->capacity()) { |
477 | size_t EltNo = I-this->begin(); |
478 | this->grow(); |
479 | I = this->begin()+EltNo; |
480 | } |
481 | |
482 | ::new ((void*) this->end()) T(::std::move(this->back())); |
483 | // Push everything else over. |
484 | std::move_backward(I, this->end()-1, this->end()); |
485 | this->set_size(this->size() + 1); |
486 | |
487 | // If we just moved the element we're inserting, be sure to update |
488 | // the reference. |
489 | T *EltPtr = &Elt; |
490 | if (I <= EltPtr && EltPtr < this->end()) |
491 | ++EltPtr; |
492 | |
493 | *I = ::std::move(*EltPtr); |
494 | return I; |
495 | } |
496 | |
497 | iterator insert(iterator I, const T &Elt) { |
498 | if (I == this->end()) { // Important special case for empty vector. |
499 | this->push_back(Elt); |
500 | return this->end()-1; |
501 | } |
502 | |
503 | assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 503, __PRETTY_FUNCTION__)); |
504 | assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector." ) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 504, __PRETTY_FUNCTION__)); |
505 | |
506 | if (this->size() >= this->capacity()) { |
507 | size_t EltNo = I-this->begin(); |
508 | this->grow(); |
509 | I = this->begin()+EltNo; |
510 | } |
511 | ::new ((void*) this->end()) T(std::move(this->back())); |
512 | // Push everything else over. |
513 | std::move_backward(I, this->end()-1, this->end()); |
514 | this->set_size(this->size() + 1); |
515 | |
516 | // If we just moved the element we're inserting, be sure to update |
517 | // the reference. |
518 | const T *EltPtr = &Elt; |
519 | if (I <= EltPtr && EltPtr < this->end()) |
520 | ++EltPtr; |
521 | |
522 | *I = *EltPtr; |
523 | return I; |
524 | } |
525 | |
526 | iterator insert(iterator I, size_type NumToInsert, const T &Elt) { |
527 | // Convert iterator to elt# to avoid invalidating iterator when we reserve() |
528 | size_t InsertElt = I - this->begin(); |
529 | |
530 | if (I == this->end()) { // Important special case for empty vector. |
531 | append(NumToInsert, Elt); |
532 | return this->begin()+InsertElt; |
533 | } |
534 | |
535 | assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 535, __PRETTY_FUNCTION__)); |
536 | assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector." ) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 536, __PRETTY_FUNCTION__)); |
537 | |
538 | // Ensure there is enough space. |
539 | reserve(this->size() + NumToInsert); |
540 | |
541 | // Uninvalidate the iterator. |
542 | I = this->begin()+InsertElt; |
543 | |
544 | // If there are more elements between the insertion point and the end of the |
545 | // range than there are being inserted, we can use a simple approach to |
546 | // insertion. Since we already reserved space, we know that this won't |
547 | // reallocate the vector. |
548 | if (size_t(this->end()-I) >= NumToInsert) { |
549 | T *OldEnd = this->end(); |
550 | append(std::move_iterator<iterator>(this->end() - NumToInsert), |
551 | std::move_iterator<iterator>(this->end())); |
552 | |
553 | // Copy the existing elements that get replaced. |
554 | std::move_backward(I, OldEnd-NumToInsert, OldEnd); |
555 | |
556 | std::fill_n(I, NumToInsert, Elt); |
557 | return I; |
558 | } |
559 | |
560 | // Otherwise, we're inserting more elements than exist already, and we're |
561 | // not inserting at the end. |
562 | |
563 | // Move over the elements that we're about to overwrite. |
564 | T *OldEnd = this->end(); |
565 | this->set_size(this->size() + NumToInsert); |
566 | size_t NumOverwritten = OldEnd-I; |
567 | this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten); |
568 | |
569 | // Replace the overwritten part. |
570 | std::fill_n(I, NumOverwritten, Elt); |
571 | |
572 | // Insert the non-overwritten middle part. |
573 | std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt); |
574 | return I; |
575 | } |
576 | |
577 | template <typename ItTy, |
578 | typename = typename std::enable_if<std::is_convertible< |
579 | typename std::iterator_traits<ItTy>::iterator_category, |
580 | std::input_iterator_tag>::value>::type> |
581 | iterator insert(iterator I, ItTy From, ItTy To) { |
582 | // Convert iterator to elt# to avoid invalidating iterator when we reserve() |
583 | size_t InsertElt = I - this->begin(); |
584 | |
585 | if (I == this->end()) { // Important special case for empty vector. |
586 | append(From, To); |
587 | return this->begin()+InsertElt; |
588 | } |
589 | |
590 | assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 590, __PRETTY_FUNCTION__)); |
591 | assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector." ) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\"" , "/build/llvm-toolchain-snapshot-10~svn373386/include/llvm/ADT/SmallVector.h" , 591, __PRETTY_FUNCTION__)); |
592 | |
593 | size_t NumToInsert = std::distance(From, To); |
594 | |
595 | // Ensure there is enough space. |
596 | reserve(this->size() + NumToInsert); |
597 | |
598 | // Uninvalidate the iterator. |
599 | I = this->begin()+InsertElt; |
600 | |
601 | // If there are more elements between the insertion point and the end of the |
602 | // range than there are being inserted, we can use a simple approach to |
603 | // insertion. Since we already reserved space, we know that this won't |
604 | // reallocate the vector. |
605 | if (size_t(this->end()-I) >= NumToInsert) { |
606 | T *OldEnd = this->end(); |
607 | append(std::move_iterator<iterator>(this->end() - NumToInsert), |
608 | std::move_iterator<iterator>(this->end())); |
609 | |
610 | // Copy the existing elements that get replaced. |
611 | std::move_backward(I, OldEnd-NumToInsert, OldEnd); |
612 | |
613 | std::copy(From, To, I); |
614 | return I; |
615 | } |
616 | |
617 | // Otherwise, we're inserting more elements than exist already, and we're |
618 | // not inserting at the end. |
619 | |
620 | // Move over the elements that we're about to overwrite. |
621 | T *OldEnd = this->end(); |
622 | this->set_size(this->size() + NumToInsert); |
623 | size_t NumOverwritten = OldEnd-I; |
624 | this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten); |
625 | |
626 | // Replace the overwritten part. |
627 | for (T *J = I; NumOverwritten > 0; --NumOverwritten) { |
628 | *J = *From; |
629 | ++J; ++From; |
630 | } |
631 | |
632 | // Insert the non-overwritten middle part. |
633 | this->uninitialized_copy(From, To, OldEnd); |
634 | return I; |
635 | } |
636 | |
637 | void insert(iterator I, std::initializer_list<T> IL) { |
638 | insert(I, IL.begin(), IL.end()); |
639 | } |
640 | |
641 | template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) { |
642 | if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity ()), false)) |
643 | this->grow(); |
644 | ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...); |
645 | this->set_size(this->size() + 1); |
646 | return this->back(); |
647 | } |
648 | |
649 | SmallVectorImpl &operator=(const SmallVectorImpl &RHS); |
650 | |
651 | SmallVectorImpl &operator=(SmallVectorImpl &&RHS); |
652 | |
653 | bool operator==(const SmallVectorImpl &RHS) const { |
654 | if (this->size() != RHS.size()) return false; |
655 | return std::equal(this->begin(), this->end(), RHS.begin()); |
656 | } |
657 | bool operator!=(const SmallVectorImpl &RHS) const { |
658 | return !(*this == RHS); |
659 | } |
660 | |
661 | bool operator<(const SmallVectorImpl &RHS) const { |
662 | return std::lexicographical_compare(this->begin(), this->end(), |
663 | RHS.begin(), RHS.end()); |
664 | } |
665 | }; |
666 | |
667 | template <typename T> |
668 | void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) { |
669 | if (this == &RHS) return; |
670 | |
671 | // We can only avoid copying elements if neither vector is small. |
672 | if (!this->isSmall() && !RHS.isSmall()) { |
673 | std::swap(this->BeginX, RHS.BeginX); |
674 | std::swap(this->Size, RHS.Size); |
675 | std::swap(this->Capacity, RHS.Capacity); |
676 | return; |
677 | } |
678 | if (RHS.size() > this->capacity()) |
679 | this->grow(RHS.size()); |
680 | if (this->size() > RHS.capacity()) |
681 | RHS.grow(this->size()); |
682 | |
683 | // Swap the shared elements. |
684 | size_t NumShared = this->size(); |
685 | if (NumShared > RHS.size()) NumShared = RHS.size(); |
686 | for (size_type i = 0; i != NumShared; ++i) |
687 | std::swap((*this)[i], RHS[i]); |
688 | |
689 | // Copy over the extra elts. |
690 | if (this->size() > RHS.size()) { |
691 | size_t EltDiff = this->size() - RHS.size(); |
692 | this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end()); |
693 | RHS.set_size(RHS.size() + EltDiff); |
694 | this->destroy_range(this->begin()+NumShared, this->end()); |
695 | this->set_size(NumShared); |
696 | } else if (RHS.size() > this->size()) { |
697 | size_t EltDiff = RHS.size() - this->size(); |
698 | this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end()); |
699 | this->set_size(this->size() + EltDiff); |
700 | this->destroy_range(RHS.begin()+NumShared, RHS.end()); |
701 | RHS.set_size(NumShared); |
702 | } |
703 | } |
704 | |
705 | template <typename T> |
706 | SmallVectorImpl<T> &SmallVectorImpl<T>:: |
707 | operator=(const SmallVectorImpl<T> &RHS) { |
708 | // Avoid self-assignment. |
709 | if (this == &RHS) return *this; |
710 | |
711 | // If we already have sufficient space, assign the common elements, then |
712 | // destroy any excess. |
713 | size_t RHSSize = RHS.size(); |
714 | size_t CurSize = this->size(); |
715 | if (CurSize >= RHSSize) { |
716 | // Assign common elements. |
717 | iterator NewEnd; |
718 | if (RHSSize) |
719 | NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin()); |
720 | else |
721 | NewEnd = this->begin(); |
722 | |
723 | // Destroy excess elements. |
724 | this->destroy_range(NewEnd, this->end()); |
725 | |
726 | // Trim. |
727 | this->set_size(RHSSize); |
728 | return *this; |
729 | } |
730 | |
731 | // If we have to grow to have enough elements, destroy the current elements. |
732 | // This allows us to avoid copying them during the grow. |
733 | // FIXME: don't do this if they're efficiently moveable. |
734 | if (this->capacity() < RHSSize) { |
735 | // Destroy current elements. |
736 | this->destroy_range(this->begin(), this->end()); |
737 | this->set_size(0); |
738 | CurSize = 0; |
739 | this->grow(RHSSize); |
740 | } else if (CurSize) { |
741 | // Otherwise, use assignment for the already-constructed elements. |
742 | std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin()); |
743 | } |
744 | |
745 | // Copy construct the new elements in place. |
746 | this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(), |
747 | this->begin()+CurSize); |
748 | |
749 | // Set end. |
750 | this->set_size(RHSSize); |
751 | return *this; |
752 | } |
753 | |
754 | template <typename T> |
755 | SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) { |
756 | // Avoid self-assignment. |
757 | if (this == &RHS) return *this; |
758 | |
759 | // If the RHS isn't small, clear this vector and then steal its buffer. |
760 | if (!RHS.isSmall()) { |
761 | this->destroy_range(this->begin(), this->end()); |
762 | if (!this->isSmall()) free(this->begin()); |
763 | this->BeginX = RHS.BeginX; |
764 | this->Size = RHS.Size; |
765 | this->Capacity = RHS.Capacity; |
766 | RHS.resetToSmall(); |
767 | return *this; |
768 | } |
769 | |
770 | // If we already have sufficient space, assign the common elements, then |
771 | // destroy any excess. |
772 | size_t RHSSize = RHS.size(); |
773 | size_t CurSize = this->size(); |
774 | if (CurSize >= RHSSize) { |
775 | // Assign common elements. |
776 | iterator NewEnd = this->begin(); |
777 | if (RHSSize) |
778 | NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd); |
779 | |
780 | // Destroy excess elements and trim the bounds. |
781 | this->destroy_range(NewEnd, this->end()); |
782 | this->set_size(RHSSize); |
783 | |
784 | // Clear the RHS. |
785 | RHS.clear(); |
786 | |
787 | return *this; |
788 | } |
789 | |
790 | // If we have to grow to have enough elements, destroy the current elements. |
791 | // This allows us to avoid copying them during the grow. |
792 | // FIXME: this may not actually make any sense if we can efficiently move |
793 | // elements. |
794 | if (this->capacity() < RHSSize) { |
795 | // Destroy current elements. |
796 | this->destroy_range(this->begin(), this->end()); |
797 | this->set_size(0); |
798 | CurSize = 0; |
799 | this->grow(RHSSize); |
800 | } else if (CurSize) { |
801 | // Otherwise, use assignment for the already-constructed elements. |
802 | std::move(RHS.begin(), RHS.begin()+CurSize, this->begin()); |
803 | } |
804 | |
805 | // Move-construct the new elements in place. |
806 | this->uninitialized_move(RHS.begin()+CurSize, RHS.end(), |
807 | this->begin()+CurSize); |
808 | |
809 | // Set end. |
810 | this->set_size(RHSSize); |
811 | |
812 | RHS.clear(); |
813 | return *this; |
814 | } |
815 | |
816 | /// Storage for the SmallVector elements. This is specialized for the N=0 case |
817 | /// to avoid allocating unnecessary storage. |
818 | template <typename T, unsigned N> |
819 | struct SmallVectorStorage { |
820 | AlignedCharArrayUnion<T> InlineElts[N]; |
821 | }; |
822 | |
823 | /// We need the storage to be properly aligned even for small-size of 0 so that |
824 | /// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is |
825 | /// well-defined. |
826 | template <typename T> struct alignas(alignof(T)) SmallVectorStorage<T, 0> {}; |
827 | |
828 | /// This is a 'vector' (really, a variable-sized array), optimized |
829 | /// for the case when the array is small. It contains some number of elements |
830 | /// in-place, which allows it to avoid heap allocation when the actual number of |
831 | /// elements is below that threshold. This allows normal "small" cases to be |
832 | /// fast without losing generality for large inputs. |
833 | /// |
834 | /// Note that this does not attempt to be exception safe. |
835 | /// |
836 | template <typename T, unsigned N> |
837 | class SmallVector : public SmallVectorImpl<T>, SmallVectorStorage<T, N> { |
838 | public: |
839 | SmallVector() : SmallVectorImpl<T>(N) {} |
840 | |
841 | ~SmallVector() { |
842 | // Destroy the constructed elements in the vector. |
843 | this->destroy_range(this->begin(), this->end()); |
844 | } |
845 | |
846 | explicit SmallVector(size_t Size, const T &Value = T()) |
847 | : SmallVectorImpl<T>(N) { |
848 | this->assign(Size, Value); |
849 | } |
850 | |
851 | template <typename ItTy, |
852 | typename = typename std::enable_if<std::is_convertible< |
853 | typename std::iterator_traits<ItTy>::iterator_category, |
854 | std::input_iterator_tag>::value>::type> |
855 | SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) { |
856 | this->append(S, E); |
857 | } |
858 | |
859 | template <typename RangeTy> |
860 | explicit SmallVector(const iterator_range<RangeTy> &R) |
861 | : SmallVectorImpl<T>(N) { |
862 | this->append(R.begin(), R.end()); |
863 | } |
864 | |
865 | SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) { |
866 | this->assign(IL); |
867 | } |
868 | |
869 | SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) { |
870 | if (!RHS.empty()) |
871 | SmallVectorImpl<T>::operator=(RHS); |
872 | } |
873 | |
874 | const SmallVector &operator=(const SmallVector &RHS) { |
875 | SmallVectorImpl<T>::operator=(RHS); |
876 | return *this; |
877 | } |
878 | |
879 | SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) { |
880 | if (!RHS.empty()) |
881 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
882 | } |
883 | |
884 | SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) { |
885 | if (!RHS.empty()) |
886 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
887 | } |
888 | |
889 | const SmallVector &operator=(SmallVector &&RHS) { |
890 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
891 | return *this; |
892 | } |
893 | |
894 | const SmallVector &operator=(SmallVectorImpl<T> &&RHS) { |
895 | SmallVectorImpl<T>::operator=(::std::move(RHS)); |
896 | return *this; |
897 | } |
898 | |
899 | const SmallVector &operator=(std::initializer_list<T> IL) { |
900 | this->assign(IL); |
901 | return *this; |
902 | } |
903 | }; |
904 | |
905 | template <typename T, unsigned N> |
906 | inline size_t capacity_in_bytes(const SmallVector<T, N> &X) { |
907 | return X.capacity_in_bytes(); |
908 | } |
909 | |
910 | } // end namespace llvm |
911 | |
912 | namespace std { |
913 | |
914 | /// Implement std::swap in terms of SmallVector swap. |
915 | template<typename T> |
916 | inline void |
917 | swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) { |
918 | LHS.swap(RHS); |
919 | } |
920 | |
921 | /// Implement std::swap in terms of SmallVector swap. |
922 | template<typename T, unsigned N> |
923 | inline void |
924 | swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) { |
925 | LHS.swap(RHS); |
926 | } |
927 | |
928 | } // end namespace std |
929 | |
930 | #endif // LLVM_ADT_SMALLVECTOR_H |