File: | llvm/lib/Analysis/MemorySSA.cpp |
Warning: | line 1996, column 5 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file implements the MemorySSA class. | ||||
10 | // | ||||
11 | //===----------------------------------------------------------------------===// | ||||
12 | |||||
13 | #include "llvm/Analysis/MemorySSA.h" | ||||
14 | #include "llvm/ADT/DenseMap.h" | ||||
15 | #include "llvm/ADT/DenseMapInfo.h" | ||||
16 | #include "llvm/ADT/DenseSet.h" | ||||
17 | #include "llvm/ADT/DepthFirstIterator.h" | ||||
18 | #include "llvm/ADT/Hashing.h" | ||||
19 | #include "llvm/ADT/None.h" | ||||
20 | #include "llvm/ADT/Optional.h" | ||||
21 | #include "llvm/ADT/STLExtras.h" | ||||
22 | #include "llvm/ADT/SmallPtrSet.h" | ||||
23 | #include "llvm/ADT/SmallVector.h" | ||||
24 | #include "llvm/ADT/iterator.h" | ||||
25 | #include "llvm/ADT/iterator_range.h" | ||||
26 | #include "llvm/Analysis/AliasAnalysis.h" | ||||
27 | #include "llvm/Analysis/IteratedDominanceFrontier.h" | ||||
28 | #include "llvm/Analysis/MemoryLocation.h" | ||||
29 | #include "llvm/Config/llvm-config.h" | ||||
30 | #include "llvm/IR/AssemblyAnnotationWriter.h" | ||||
31 | #include "llvm/IR/BasicBlock.h" | ||||
32 | #include "llvm/IR/Dominators.h" | ||||
33 | #include "llvm/IR/Function.h" | ||||
34 | #include "llvm/IR/Instruction.h" | ||||
35 | #include "llvm/IR/Instructions.h" | ||||
36 | #include "llvm/IR/IntrinsicInst.h" | ||||
37 | #include "llvm/IR/Intrinsics.h" | ||||
38 | #include "llvm/IR/LLVMContext.h" | ||||
39 | #include "llvm/IR/PassManager.h" | ||||
40 | #include "llvm/IR/Use.h" | ||||
41 | #include "llvm/Pass.h" | ||||
42 | #include "llvm/Support/AtomicOrdering.h" | ||||
43 | #include "llvm/Support/Casting.h" | ||||
44 | #include "llvm/Support/CommandLine.h" | ||||
45 | #include "llvm/Support/Compiler.h" | ||||
46 | #include "llvm/Support/Debug.h" | ||||
47 | #include "llvm/Support/ErrorHandling.h" | ||||
48 | #include "llvm/Support/FormattedStream.h" | ||||
49 | #include "llvm/Support/raw_ostream.h" | ||||
50 | #include <algorithm> | ||||
51 | #include <cassert> | ||||
52 | #include <cstdlib> | ||||
53 | #include <iterator> | ||||
54 | #include <memory> | ||||
55 | #include <utility> | ||||
56 | |||||
57 | using namespace llvm; | ||||
58 | |||||
59 | #define DEBUG_TYPE"memoryssa" "memoryssa" | ||||
60 | |||||
61 | INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { | ||||
62 | true)static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { | ||||
63 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||
64 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | ||||
65 | INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } | ||||
66 | true)PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } | ||||
67 | |||||
68 | INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { | ||||
69 | "Memory SSA Printer", false, false)static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { | ||||
70 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | ||||
71 | INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } | ||||
72 | "Memory SSA Printer", false, false)PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } | ||||
73 | |||||
74 | static cl::opt<unsigned> MaxCheckLimit( | ||||
75 | "memssa-check-limit", cl::Hidden, cl::init(100), | ||||
76 | cl::desc("The maximum number of stores/phis MemorySSA" | ||||
77 | "will consider trying to walk past (default = 100)")); | ||||
78 | |||||
79 | // Always verify MemorySSA if expensive checking is enabled. | ||||
80 | #ifdef EXPENSIVE_CHECKS | ||||
81 | bool llvm::VerifyMemorySSA = true; | ||||
82 | #else | ||||
83 | bool llvm::VerifyMemorySSA = false; | ||||
84 | #endif | ||||
85 | /// Enables memory ssa as a dependency for loop passes in legacy pass manager. | ||||
86 | cl::opt<bool> llvm::EnableMSSALoopDependency( | ||||
87 | "enable-mssa-loop-dependency", cl::Hidden, cl::init(true), | ||||
88 | cl::desc("Enable MemorySSA dependency for loop pass manager")); | ||||
89 | |||||
90 | static cl::opt<bool, true> | ||||
91 | VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), | ||||
92 | cl::Hidden, cl::desc("Enable verification of MemorySSA.")); | ||||
93 | |||||
94 | namespace llvm { | ||||
95 | |||||
96 | /// An assembly annotator class to print Memory SSA information in | ||||
97 | /// comments. | ||||
98 | class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { | ||||
99 | friend class MemorySSA; | ||||
100 | |||||
101 | const MemorySSA *MSSA; | ||||
102 | |||||
103 | public: | ||||
104 | MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} | ||||
105 | |||||
106 | void emitBasicBlockStartAnnot(const BasicBlock *BB, | ||||
107 | formatted_raw_ostream &OS) override { | ||||
108 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) | ||||
109 | OS << "; " << *MA << "\n"; | ||||
110 | } | ||||
111 | |||||
112 | void emitInstructionAnnot(const Instruction *I, | ||||
113 | formatted_raw_ostream &OS) override { | ||||
114 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) | ||||
115 | OS << "; " << *MA << "\n"; | ||||
116 | } | ||||
117 | }; | ||||
118 | |||||
119 | } // end namespace llvm | ||||
120 | |||||
121 | namespace { | ||||
122 | |||||
123 | /// Our current alias analysis API differentiates heavily between calls and | ||||
124 | /// non-calls, and functions called on one usually assert on the other. | ||||
125 | /// This class encapsulates the distinction to simplify other code that wants | ||||
126 | /// "Memory affecting instructions and related data" to use as a key. | ||||
127 | /// For example, this class is used as a densemap key in the use optimizer. | ||||
128 | class MemoryLocOrCall { | ||||
129 | public: | ||||
130 | bool IsCall = false; | ||||
131 | |||||
132 | MemoryLocOrCall(MemoryUseOrDef *MUD) | ||||
133 | : MemoryLocOrCall(MUD->getMemoryInst()) {} | ||||
134 | MemoryLocOrCall(const MemoryUseOrDef *MUD) | ||||
135 | : MemoryLocOrCall(MUD->getMemoryInst()) {} | ||||
136 | |||||
137 | MemoryLocOrCall(Instruction *Inst) { | ||||
138 | if (auto *C = dyn_cast<CallBase>(Inst)) { | ||||
139 | IsCall = true; | ||||
140 | Call = C; | ||||
141 | } else { | ||||
142 | IsCall = false; | ||||
143 | // There is no such thing as a memorylocation for a fence inst, and it is | ||||
144 | // unique in that regard. | ||||
145 | if (!isa<FenceInst>(Inst)) | ||||
146 | Loc = MemoryLocation::get(Inst); | ||||
147 | } | ||||
148 | } | ||||
149 | |||||
150 | explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} | ||||
151 | |||||
152 | const CallBase *getCall() const { | ||||
153 | assert(IsCall)((IsCall) ? static_cast<void> (0) : __assert_fail ("IsCall" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 153, __PRETTY_FUNCTION__)); | ||||
154 | return Call; | ||||
155 | } | ||||
156 | |||||
157 | MemoryLocation getLoc() const { | ||||
158 | assert(!IsCall)((!IsCall) ? static_cast<void> (0) : __assert_fail ("!IsCall" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 158, __PRETTY_FUNCTION__)); | ||||
159 | return Loc; | ||||
160 | } | ||||
161 | |||||
162 | bool operator==(const MemoryLocOrCall &Other) const { | ||||
163 | if (IsCall != Other.IsCall) | ||||
164 | return false; | ||||
165 | |||||
166 | if (!IsCall) | ||||
167 | return Loc == Other.Loc; | ||||
168 | |||||
169 | if (Call->getCalledValue() != Other.Call->getCalledValue()) | ||||
170 | return false; | ||||
171 | |||||
172 | return Call->arg_size() == Other.Call->arg_size() && | ||||
173 | std::equal(Call->arg_begin(), Call->arg_end(), | ||||
174 | Other.Call->arg_begin()); | ||||
175 | } | ||||
176 | |||||
177 | private: | ||||
178 | union { | ||||
179 | const CallBase *Call; | ||||
180 | MemoryLocation Loc; | ||||
181 | }; | ||||
182 | }; | ||||
183 | |||||
184 | } // end anonymous namespace | ||||
185 | |||||
186 | namespace llvm { | ||||
187 | |||||
188 | template <> struct DenseMapInfo<MemoryLocOrCall> { | ||||
189 | static inline MemoryLocOrCall getEmptyKey() { | ||||
190 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); | ||||
191 | } | ||||
192 | |||||
193 | static inline MemoryLocOrCall getTombstoneKey() { | ||||
194 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); | ||||
195 | } | ||||
196 | |||||
197 | static unsigned getHashValue(const MemoryLocOrCall &MLOC) { | ||||
198 | if (!MLOC.IsCall) | ||||
199 | return hash_combine( | ||||
200 | MLOC.IsCall, | ||||
201 | DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); | ||||
202 | |||||
203 | hash_code hash = | ||||
204 | hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue( | ||||
205 | MLOC.getCall()->getCalledValue())); | ||||
206 | |||||
207 | for (const Value *Arg : MLOC.getCall()->args()) | ||||
208 | hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg)); | ||||
209 | return hash; | ||||
210 | } | ||||
211 | |||||
212 | static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { | ||||
213 | return LHS == RHS; | ||||
214 | } | ||||
215 | }; | ||||
216 | |||||
217 | } // end namespace llvm | ||||
218 | |||||
219 | /// This does one-way checks to see if Use could theoretically be hoisted above | ||||
220 | /// MayClobber. This will not check the other way around. | ||||
221 | /// | ||||
222 | /// This assumes that, for the purposes of MemorySSA, Use comes directly after | ||||
223 | /// MayClobber, with no potentially clobbering operations in between them. | ||||
224 | /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) | ||||
225 | static bool areLoadsReorderable(const LoadInst *Use, | ||||
226 | const LoadInst *MayClobber) { | ||||
227 | bool VolatileUse = Use->isVolatile(); | ||||
228 | bool VolatileClobber = MayClobber->isVolatile(); | ||||
229 | // Volatile operations may never be reordered with other volatile operations. | ||||
230 | if (VolatileUse && VolatileClobber) | ||||
231 | return false; | ||||
232 | // Otherwise, volatile doesn't matter here. From the language reference: | ||||
233 | // 'optimizers may change the order of volatile operations relative to | ||||
234 | // non-volatile operations.'" | ||||
235 | |||||
236 | // If a load is seq_cst, it cannot be moved above other loads. If its ordering | ||||
237 | // is weaker, it can be moved above other loads. We just need to be sure that | ||||
238 | // MayClobber isn't an acquire load, because loads can't be moved above | ||||
239 | // acquire loads. | ||||
240 | // | ||||
241 | // Note that this explicitly *does* allow the free reordering of monotonic (or | ||||
242 | // weaker) loads of the same address. | ||||
243 | bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; | ||||
244 | bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), | ||||
245 | AtomicOrdering::Acquire); | ||||
246 | return !(SeqCstUse || MayClobberIsAcquire); | ||||
247 | } | ||||
248 | |||||
249 | namespace { | ||||
250 | |||||
251 | struct ClobberAlias { | ||||
252 | bool IsClobber; | ||||
253 | Optional<AliasResult> AR; | ||||
254 | }; | ||||
255 | |||||
256 | } // end anonymous namespace | ||||
257 | |||||
258 | // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being | ||||
259 | // ignored if IsClobber = false. | ||||
260 | template <typename AliasAnalysisType> | ||||
261 | static ClobberAlias | ||||
262 | instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, | ||||
263 | const Instruction *UseInst, AliasAnalysisType &AA) { | ||||
264 | Instruction *DefInst = MD->getMemoryInst(); | ||||
265 | assert(DefInst && "Defining instruction not actually an instruction")((DefInst && "Defining instruction not actually an instruction" ) ? static_cast<void> (0) : __assert_fail ("DefInst && \"Defining instruction not actually an instruction\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 265, __PRETTY_FUNCTION__)); | ||||
266 | const auto *UseCall = dyn_cast<CallBase>(UseInst); | ||||
267 | Optional<AliasResult> AR; | ||||
268 | |||||
269 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { | ||||
270 | // These intrinsics will show up as affecting memory, but they are just | ||||
271 | // markers, mostly. | ||||
272 | // | ||||
273 | // FIXME: We probably don't actually want MemorySSA to model these at all | ||||
274 | // (including creating MemoryAccesses for them): we just end up inventing | ||||
275 | // clobbers where they don't really exist at all. Please see D43269 for | ||||
276 | // context. | ||||
277 | switch (II->getIntrinsicID()) { | ||||
278 | case Intrinsic::lifetime_start: | ||||
279 | if (UseCall) | ||||
280 | return {false, NoAlias}; | ||||
281 | AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc); | ||||
282 | return {AR != NoAlias, AR}; | ||||
283 | case Intrinsic::lifetime_end: | ||||
284 | case Intrinsic::invariant_start: | ||||
285 | case Intrinsic::invariant_end: | ||||
286 | case Intrinsic::assume: | ||||
287 | return {false, NoAlias}; | ||||
288 | case Intrinsic::dbg_addr: | ||||
289 | case Intrinsic::dbg_declare: | ||||
290 | case Intrinsic::dbg_label: | ||||
291 | case Intrinsic::dbg_value: | ||||
292 | llvm_unreachable("debuginfo shouldn't have associated defs!")::llvm::llvm_unreachable_internal("debuginfo shouldn't have associated defs!" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 292); | ||||
293 | default: | ||||
294 | break; | ||||
295 | } | ||||
296 | } | ||||
297 | |||||
298 | if (UseCall) { | ||||
299 | ModRefInfo I = AA.getModRefInfo(DefInst, UseCall); | ||||
300 | AR = isMustSet(I) ? MustAlias : MayAlias; | ||||
301 | return {isModOrRefSet(I), AR}; | ||||
302 | } | ||||
303 | |||||
304 | if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) | ||||
305 | if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) | ||||
306 | return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias}; | ||||
307 | |||||
308 | ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc); | ||||
309 | AR = isMustSet(I) ? MustAlias : MayAlias; | ||||
310 | return {isModSet(I), AR}; | ||||
311 | } | ||||
312 | |||||
313 | template <typename AliasAnalysisType> | ||||
314 | static ClobberAlias instructionClobbersQuery(MemoryDef *MD, | ||||
315 | const MemoryUseOrDef *MU, | ||||
316 | const MemoryLocOrCall &UseMLOC, | ||||
317 | AliasAnalysisType &AA) { | ||||
318 | // FIXME: This is a temporary hack to allow a single instructionClobbersQuery | ||||
319 | // to exist while MemoryLocOrCall is pushed through places. | ||||
320 | if (UseMLOC.IsCall) | ||||
321 | return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), | ||||
322 | AA); | ||||
323 | return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), | ||||
324 | AA); | ||||
325 | } | ||||
326 | |||||
327 | // Return true when MD may alias MU, return false otherwise. | ||||
328 | bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, | ||||
329 | AliasAnalysis &AA) { | ||||
330 | return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber; | ||||
331 | } | ||||
332 | |||||
333 | namespace { | ||||
334 | |||||
335 | struct UpwardsMemoryQuery { | ||||
336 | // True if our original query started off as a call | ||||
337 | bool IsCall = false; | ||||
338 | // The pointer location we started the query with. This will be empty if | ||||
339 | // IsCall is true. | ||||
340 | MemoryLocation StartingLoc; | ||||
341 | // This is the instruction we were querying about. | ||||
342 | const Instruction *Inst = nullptr; | ||||
343 | // The MemoryAccess we actually got called with, used to test local domination | ||||
344 | const MemoryAccess *OriginalAccess = nullptr; | ||||
345 | Optional<AliasResult> AR = MayAlias; | ||||
346 | bool SkipSelfAccess = false; | ||||
347 | |||||
348 | UpwardsMemoryQuery() = default; | ||||
349 | |||||
350 | UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) | ||||
351 | : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) { | ||||
352 | if (!IsCall) | ||||
353 | StartingLoc = MemoryLocation::get(Inst); | ||||
354 | } | ||||
355 | }; | ||||
356 | |||||
357 | } // end anonymous namespace | ||||
358 | |||||
359 | static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, | ||||
360 | BatchAAResults &AA) { | ||||
361 | Instruction *Inst = MD->getMemoryInst(); | ||||
362 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { | ||||
363 | switch (II->getIntrinsicID()) { | ||||
364 | case Intrinsic::lifetime_end: | ||||
365 | return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias; | ||||
366 | default: | ||||
367 | return false; | ||||
368 | } | ||||
369 | } | ||||
370 | return false; | ||||
371 | } | ||||
372 | |||||
373 | template <typename AliasAnalysisType> | ||||
374 | static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, | ||||
375 | const Instruction *I) { | ||||
376 | // If the memory can't be changed, then loads of the memory can't be | ||||
377 | // clobbered. | ||||
378 | return isa<LoadInst>(I) && (I->hasMetadata(LLVMContext::MD_invariant_load) || | ||||
379 | AA.pointsToConstantMemory(MemoryLocation( | ||||
380 | cast<LoadInst>(I)->getPointerOperand()))); | ||||
381 | } | ||||
382 | |||||
383 | /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing | ||||
384 | /// inbetween `Start` and `ClobberAt` can clobbers `Start`. | ||||
385 | /// | ||||
386 | /// This is meant to be as simple and self-contained as possible. Because it | ||||
387 | /// uses no cache, etc., it can be relatively expensive. | ||||
388 | /// | ||||
389 | /// \param Start The MemoryAccess that we want to walk from. | ||||
390 | /// \param ClobberAt A clobber for Start. | ||||
391 | /// \param StartLoc The MemoryLocation for Start. | ||||
392 | /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to. | ||||
393 | /// \param Query The UpwardsMemoryQuery we used for our search. | ||||
394 | /// \param AA The AliasAnalysis we used for our search. | ||||
395 | /// \param AllowImpreciseClobber Always false, unless we do relaxed verify. | ||||
396 | |||||
397 | template <typename AliasAnalysisType> | ||||
398 | LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) static void | ||||
399 | checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, | ||||
400 | const MemoryLocation &StartLoc, const MemorySSA &MSSA, | ||||
401 | const UpwardsMemoryQuery &Query, AliasAnalysisType &AA, | ||||
402 | bool AllowImpreciseClobber = false) { | ||||
403 | assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?")((MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?" ) ? static_cast<void> (0) : __assert_fail ("MSSA.dominates(ClobberAt, Start) && \"Clobber doesn't dominate start?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 403, __PRETTY_FUNCTION__)); | ||||
404 | |||||
405 | if (MSSA.isLiveOnEntryDef(Start)) { | ||||
406 | assert(MSSA.isLiveOnEntryDef(ClobberAt) &&((MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself" ) ? static_cast<void> (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 407, __PRETTY_FUNCTION__)) | ||||
407 | "liveOnEntry must clobber itself")((MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself" ) ? static_cast<void> (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 407, __PRETTY_FUNCTION__)); | ||||
408 | return; | ||||
409 | } | ||||
410 | |||||
411 | bool FoundClobber = false; | ||||
412 | DenseSet<ConstMemoryAccessPair> VisitedPhis; | ||||
413 | SmallVector<ConstMemoryAccessPair, 8> Worklist; | ||||
414 | Worklist.emplace_back(Start, StartLoc); | ||||
415 | // Walk all paths from Start to ClobberAt, while looking for clobbers. If one | ||||
416 | // is found, complain. | ||||
417 | while (!Worklist.empty()) { | ||||
418 | auto MAP = Worklist.pop_back_val(); | ||||
419 | // All we care about is that nothing from Start to ClobberAt clobbers Start. | ||||
420 | // We learn nothing from revisiting nodes. | ||||
421 | if (!VisitedPhis.insert(MAP).second) | ||||
422 | continue; | ||||
423 | |||||
424 | for (const auto *MA : def_chain(MAP.first)) { | ||||
425 | if (MA == ClobberAt) { | ||||
426 | if (const auto *MD = dyn_cast<MemoryDef>(MA)) { | ||||
427 | // instructionClobbersQuery isn't essentially free, so don't use `|=`, | ||||
428 | // since it won't let us short-circuit. | ||||
429 | // | ||||
430 | // Also, note that this can't be hoisted out of the `Worklist` loop, | ||||
431 | // since MD may only act as a clobber for 1 of N MemoryLocations. | ||||
432 | FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD); | ||||
433 | if (!FoundClobber) { | ||||
434 | ClobberAlias CA = | ||||
435 | instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); | ||||
436 | if (CA.IsClobber) { | ||||
437 | FoundClobber = true; | ||||
438 | // Not used: CA.AR; | ||||
439 | } | ||||
440 | } | ||||
441 | } | ||||
442 | break; | ||||
443 | } | ||||
444 | |||||
445 | // We should never hit liveOnEntry, unless it's the clobber. | ||||
446 | assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?")((!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(MA) && \"Hit liveOnEntry before clobber?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 446, __PRETTY_FUNCTION__)); | ||||
447 | |||||
448 | if (const auto *MD = dyn_cast<MemoryDef>(MA)) { | ||||
449 | // If Start is a Def, skip self. | ||||
450 | if (MD == Start) | ||||
451 | continue; | ||||
452 | |||||
453 | assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) . IsClobber && "Found clobber before reaching ClobberAt!" ) ? static_cast<void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 455, __PRETTY_FUNCTION__)) | ||||
454 | .IsClobber &&((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) . IsClobber && "Found clobber before reaching ClobberAt!" ) ? static_cast<void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 455, __PRETTY_FUNCTION__)) | ||||
455 | "Found clobber before reaching ClobberAt!")((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) . IsClobber && "Found clobber before reaching ClobberAt!" ) ? static_cast<void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 455, __PRETTY_FUNCTION__)); | ||||
456 | continue; | ||||
457 | } | ||||
458 | |||||
459 | if (const auto *MU = dyn_cast<MemoryUse>(MA)) { | ||||
460 | (void)MU; | ||||
461 | assert (MU == Start &&((MU == Start && "Can only find use in def chain if Start is a use" ) ? static_cast<void> (0) : __assert_fail ("MU == Start && \"Can only find use in def chain if Start is a use\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 462, __PRETTY_FUNCTION__)) | ||||
462 | "Can only find use in def chain if Start is a use")((MU == Start && "Can only find use in def chain if Start is a use" ) ? static_cast<void> (0) : __assert_fail ("MU == Start && \"Can only find use in def chain if Start is a use\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 462, __PRETTY_FUNCTION__)); | ||||
463 | continue; | ||||
464 | } | ||||
465 | |||||
466 | assert(isa<MemoryPhi>(MA))((isa<MemoryPhi>(MA)) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(MA)", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 466, __PRETTY_FUNCTION__)); | ||||
467 | Worklist.append( | ||||
468 | upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}), | ||||
469 | upward_defs_end()); | ||||
470 | } | ||||
471 | } | ||||
472 | |||||
473 | // If the verify is done following an optimization, it's possible that | ||||
474 | // ClobberAt was a conservative clobbering, that we can now infer is not a | ||||
475 | // true clobbering access. Don't fail the verify if that's the case. | ||||
476 | // We do have accesses that claim they're optimized, but could be optimized | ||||
477 | // further. Updating all these can be expensive, so allow it for now (FIXME). | ||||
478 | if (AllowImpreciseClobber) | ||||
479 | return; | ||||
480 | |||||
481 | // If ClobberAt is a MemoryPhi, we can assume something above it acted as a | ||||
482 | // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. | ||||
483 | assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&(((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber") ? static_cast<void> (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 484, __PRETTY_FUNCTION__)) | ||||
484 | "ClobberAt never acted as a clobber")(((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber") ? static_cast<void> (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 484, __PRETTY_FUNCTION__)); | ||||
485 | } | ||||
486 | |||||
487 | namespace { | ||||
488 | |||||
489 | /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up | ||||
490 | /// in one class. | ||||
491 | template <class AliasAnalysisType> class ClobberWalker { | ||||
492 | /// Save a few bytes by using unsigned instead of size_t. | ||||
493 | using ListIndex = unsigned; | ||||
494 | |||||
495 | /// Represents a span of contiguous MemoryDefs, potentially ending in a | ||||
496 | /// MemoryPhi. | ||||
497 | struct DefPath { | ||||
498 | MemoryLocation Loc; | ||||
499 | // Note that, because we always walk in reverse, Last will always dominate | ||||
500 | // First. Also note that First and Last are inclusive. | ||||
501 | MemoryAccess *First; | ||||
502 | MemoryAccess *Last; | ||||
503 | Optional<ListIndex> Previous; | ||||
504 | |||||
505 | DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, | ||||
506 | Optional<ListIndex> Previous) | ||||
507 | : Loc(Loc), First(First), Last(Last), Previous(Previous) {} | ||||
508 | |||||
509 | DefPath(const MemoryLocation &Loc, MemoryAccess *Init, | ||||
510 | Optional<ListIndex> Previous) | ||||
511 | : DefPath(Loc, Init, Init, Previous) {} | ||||
512 | }; | ||||
513 | |||||
514 | const MemorySSA &MSSA; | ||||
515 | AliasAnalysisType &AA; | ||||
516 | DominatorTree &DT; | ||||
517 | UpwardsMemoryQuery *Query; | ||||
518 | unsigned *UpwardWalkLimit; | ||||
519 | |||||
520 | // Phi optimization bookkeeping | ||||
521 | SmallVector<DefPath, 32> Paths; | ||||
522 | DenseSet<ConstMemoryAccessPair> VisitedPhis; | ||||
523 | |||||
524 | /// Find the nearest def or phi that `From` can legally be optimized to. | ||||
525 | const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { | ||||
526 | assert(From->getNumOperands() && "Phi with no operands?")((From->getNumOperands() && "Phi with no operands?" ) ? static_cast<void> (0) : __assert_fail ("From->getNumOperands() && \"Phi with no operands?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 526, __PRETTY_FUNCTION__)); | ||||
527 | |||||
528 | BasicBlock *BB = From->getBlock(); | ||||
529 | MemoryAccess *Result = MSSA.getLiveOnEntryDef(); | ||||
530 | DomTreeNode *Node = DT.getNode(BB); | ||||
531 | while ((Node = Node->getIDom())) { | ||||
532 | auto *Defs = MSSA.getBlockDefs(Node->getBlock()); | ||||
533 | if (Defs) | ||||
534 | return &*Defs->rbegin(); | ||||
535 | } | ||||
536 | return Result; | ||||
537 | } | ||||
538 | |||||
539 | /// Result of calling walkToPhiOrClobber. | ||||
540 | struct UpwardsWalkResult { | ||||
541 | /// The "Result" of the walk. Either a clobber, the last thing we walked, or | ||||
542 | /// both. Include alias info when clobber found. | ||||
543 | MemoryAccess *Result; | ||||
544 | bool IsKnownClobber; | ||||
545 | Optional<AliasResult> AR; | ||||
546 | }; | ||||
547 | |||||
548 | /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. | ||||
549 | /// This will update Desc.Last as it walks. It will (optionally) also stop at | ||||
550 | /// StopAt. | ||||
551 | /// | ||||
552 | /// This does not test for whether StopAt is a clobber | ||||
553 | UpwardsWalkResult | ||||
554 | walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr, | ||||
555 | const MemoryAccess *SkipStopAt = nullptr) const { | ||||
556 | assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world")((!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world" ) ? static_cast<void> (0) : __assert_fail ("!isa<MemoryUse>(Desc.Last) && \"Uses don't exist in my world\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 556, __PRETTY_FUNCTION__)); | ||||
557 | assert(UpwardWalkLimit && "Need a valid walk limit")((UpwardWalkLimit && "Need a valid walk limit") ? static_cast <void> (0) : __assert_fail ("UpwardWalkLimit && \"Need a valid walk limit\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 557, __PRETTY_FUNCTION__)); | ||||
558 | bool LimitAlreadyReached = false; | ||||
559 | // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set | ||||
560 | // it to 1. This will not do any alias() calls. It either returns in the | ||||
561 | // first iteration in the loop below, or is set back to 0 if all def chains | ||||
562 | // are free of MemoryDefs. | ||||
563 | if (!*UpwardWalkLimit) { | ||||
564 | *UpwardWalkLimit = 1; | ||||
565 | LimitAlreadyReached = true; | ||||
566 | } | ||||
567 | |||||
568 | for (MemoryAccess *Current : def_chain(Desc.Last)) { | ||||
569 | Desc.Last = Current; | ||||
570 | if (Current == StopAt || Current == SkipStopAt) | ||||
571 | return {Current, false, MayAlias}; | ||||
572 | |||||
573 | if (auto *MD = dyn_cast<MemoryDef>(Current)) { | ||||
574 | if (MSSA.isLiveOnEntryDef(MD)) | ||||
575 | return {MD, true, MustAlias}; | ||||
576 | |||||
577 | if (!--*UpwardWalkLimit) | ||||
578 | return {Current, true, MayAlias}; | ||||
579 | |||||
580 | ClobberAlias CA = | ||||
581 | instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA); | ||||
582 | if (CA.IsClobber) | ||||
583 | return {MD, true, CA.AR}; | ||||
584 | } | ||||
585 | } | ||||
586 | |||||
587 | if (LimitAlreadyReached) | ||||
588 | *UpwardWalkLimit = 0; | ||||
589 | |||||
590 | assert(isa<MemoryPhi>(Desc.Last) &&((isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?" ) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 591, __PRETTY_FUNCTION__)) | ||||
591 | "Ended at a non-clobber that's not a phi?")((isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?" ) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 591, __PRETTY_FUNCTION__)); | ||||
592 | return {Desc.Last, false, MayAlias}; | ||||
593 | } | ||||
594 | |||||
595 | void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, | ||||
596 | ListIndex PriorNode) { | ||||
597 | auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}), | ||||
598 | upward_defs_end()); | ||||
599 | for (const MemoryAccessPair &P : UpwardDefs) { | ||||
600 | PausedSearches.push_back(Paths.size()); | ||||
601 | Paths.emplace_back(P.second, P.first, PriorNode); | ||||
602 | } | ||||
603 | } | ||||
604 | |||||
605 | /// Represents a search that terminated after finding a clobber. This clobber | ||||
606 | /// may or may not be present in the path of defs from LastNode..SearchStart, | ||||
607 | /// since it may have been retrieved from cache. | ||||
608 | struct TerminatedPath { | ||||
609 | MemoryAccess *Clobber; | ||||
610 | ListIndex LastNode; | ||||
611 | }; | ||||
612 | |||||
613 | /// Get an access that keeps us from optimizing to the given phi. | ||||
614 | /// | ||||
615 | /// PausedSearches is an array of indices into the Paths array. Its incoming | ||||
616 | /// value is the indices of searches that stopped at the last phi optimization | ||||
617 | /// target. It's left in an unspecified state. | ||||
618 | /// | ||||
619 | /// If this returns None, NewPaused is a vector of searches that terminated | ||||
620 | /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. | ||||
621 | Optional<TerminatedPath> | ||||
622 | getBlockingAccess(const MemoryAccess *StopWhere, | ||||
623 | SmallVectorImpl<ListIndex> &PausedSearches, | ||||
624 | SmallVectorImpl<ListIndex> &NewPaused, | ||||
625 | SmallVectorImpl<TerminatedPath> &Terminated) { | ||||
626 | assert(!PausedSearches.empty() && "No searches to continue?")((!PausedSearches.empty() && "No searches to continue?" ) ? static_cast<void> (0) : __assert_fail ("!PausedSearches.empty() && \"No searches to continue?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 626, __PRETTY_FUNCTION__)); | ||||
627 | |||||
628 | // BFS vs DFS really doesn't make a difference here, so just do a DFS with | ||||
629 | // PausedSearches as our stack. | ||||
630 | while (!PausedSearches.empty()) { | ||||
631 | ListIndex PathIndex = PausedSearches.pop_back_val(); | ||||
632 | DefPath &Node = Paths[PathIndex]; | ||||
633 | |||||
634 | // If we've already visited this path with this MemoryLocation, we don't | ||||
635 | // need to do so again. | ||||
636 | // | ||||
637 | // NOTE: That we just drop these paths on the ground makes caching | ||||
638 | // behavior sporadic. e.g. given a diamond: | ||||
639 | // A | ||||
640 | // B C | ||||
641 | // D | ||||
642 | // | ||||
643 | // ...If we walk D, B, A, C, we'll only cache the result of phi | ||||
644 | // optimization for A, B, and D; C will be skipped because it dies here. | ||||
645 | // This arguably isn't the worst thing ever, since: | ||||
646 | // - We generally query things in a top-down order, so if we got below D | ||||
647 | // without needing cache entries for {C, MemLoc}, then chances are | ||||
648 | // that those cache entries would end up ultimately unused. | ||||
649 | // - We still cache things for A, so C only needs to walk up a bit. | ||||
650 | // If this behavior becomes problematic, we can fix without a ton of extra | ||||
651 | // work. | ||||
652 | if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) | ||||
653 | continue; | ||||
654 | |||||
655 | const MemoryAccess *SkipStopWhere = nullptr; | ||||
656 | if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) { | ||||
657 | assert(isa<MemoryDef>(Query->OriginalAccess))((isa<MemoryDef>(Query->OriginalAccess)) ? static_cast <void> (0) : __assert_fail ("isa<MemoryDef>(Query->OriginalAccess)" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 657, __PRETTY_FUNCTION__)); | ||||
658 | SkipStopWhere = Query->OriginalAccess; | ||||
659 | } | ||||
660 | |||||
661 | UpwardsWalkResult Res = walkToPhiOrClobber(Node, | ||||
662 | /*StopAt=*/StopWhere, | ||||
663 | /*SkipStopAt=*/SkipStopWhere); | ||||
664 | if (Res.IsKnownClobber) { | ||||
665 | assert(Res.Result != StopWhere && Res.Result != SkipStopWhere)((Res.Result != StopWhere && Res.Result != SkipStopWhere ) ? static_cast<void> (0) : __assert_fail ("Res.Result != StopWhere && Res.Result != SkipStopWhere" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 665, __PRETTY_FUNCTION__)); | ||||
666 | |||||
667 | // If this wasn't a cache hit, we hit a clobber when walking. That's a | ||||
668 | // failure. | ||||
669 | TerminatedPath Term{Res.Result, PathIndex}; | ||||
670 | if (!MSSA.dominates(Res.Result, StopWhere)) | ||||
671 | return Term; | ||||
672 | |||||
673 | // Otherwise, it's a valid thing to potentially optimize to. | ||||
674 | Terminated.push_back(Term); | ||||
675 | continue; | ||||
676 | } | ||||
677 | |||||
678 | if (Res.Result == StopWhere || Res.Result == SkipStopWhere) { | ||||
679 | // We've hit our target. Save this path off for if we want to continue | ||||
680 | // walking. If we are in the mode of skipping the OriginalAccess, and | ||||
681 | // we've reached back to the OriginalAccess, do not save path, we've | ||||
682 | // just looped back to self. | ||||
683 | if (Res.Result != SkipStopWhere) | ||||
684 | NewPaused.push_back(PathIndex); | ||||
685 | continue; | ||||
686 | } | ||||
687 | |||||
688 | assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber")((!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Res.Result) && \"liveOnEntry is a clobber\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 688, __PRETTY_FUNCTION__)); | ||||
689 | addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); | ||||
690 | } | ||||
691 | |||||
692 | return None; | ||||
693 | } | ||||
694 | |||||
695 | template <typename T, typename Walker> | ||||
696 | struct generic_def_path_iterator | ||||
697 | : public iterator_facade_base<generic_def_path_iterator<T, Walker>, | ||||
698 | std::forward_iterator_tag, T *> { | ||||
699 | generic_def_path_iterator() {} | ||||
700 | generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} | ||||
701 | |||||
702 | T &operator*() const { return curNode(); } | ||||
703 | |||||
704 | generic_def_path_iterator &operator++() { | ||||
705 | N = curNode().Previous; | ||||
706 | return *this; | ||||
707 | } | ||||
708 | |||||
709 | bool operator==(const generic_def_path_iterator &O) const { | ||||
710 | if (N.hasValue() != O.N.hasValue()) | ||||
711 | return false; | ||||
712 | return !N.hasValue() || *N == *O.N; | ||||
713 | } | ||||
714 | |||||
715 | private: | ||||
716 | T &curNode() const { return W->Paths[*N]; } | ||||
717 | |||||
718 | Walker *W = nullptr; | ||||
719 | Optional<ListIndex> N = None; | ||||
720 | }; | ||||
721 | |||||
722 | using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; | ||||
723 | using const_def_path_iterator = | ||||
724 | generic_def_path_iterator<const DefPath, const ClobberWalker>; | ||||
725 | |||||
726 | iterator_range<def_path_iterator> def_path(ListIndex From) { | ||||
727 | return make_range(def_path_iterator(this, From), def_path_iterator()); | ||||
728 | } | ||||
729 | |||||
730 | iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { | ||||
731 | return make_range(const_def_path_iterator(this, From), | ||||
732 | const_def_path_iterator()); | ||||
733 | } | ||||
734 | |||||
735 | struct OptznResult { | ||||
736 | /// The path that contains our result. | ||||
737 | TerminatedPath PrimaryClobber; | ||||
738 | /// The paths that we can legally cache back from, but that aren't | ||||
739 | /// necessarily the result of the Phi optimization. | ||||
740 | SmallVector<TerminatedPath, 4> OtherClobbers; | ||||
741 | }; | ||||
742 | |||||
743 | ListIndex defPathIndex(const DefPath &N) const { | ||||
744 | // The assert looks nicer if we don't need to do &N | ||||
745 | const DefPath *NP = &N; | ||||
746 | assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&((!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!" ) ? static_cast<void> (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 747, __PRETTY_FUNCTION__)) | ||||
747 | "Out of bounds DefPath!")((!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!" ) ? static_cast<void> (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 747, __PRETTY_FUNCTION__)); | ||||
748 | return NP - &Paths.front(); | ||||
749 | } | ||||
750 | |||||
751 | /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths | ||||
752 | /// that act as legal clobbers. Note that this won't return *all* clobbers. | ||||
753 | /// | ||||
754 | /// Phi optimization algorithm tl;dr: | ||||
755 | /// - Find the earliest def/phi, A, we can optimize to | ||||
756 | /// - Find if all paths from the starting memory access ultimately reach A | ||||
757 | /// - If not, optimization isn't possible. | ||||
758 | /// - Otherwise, walk from A to another clobber or phi, A'. | ||||
759 | /// - If A' is a def, we're done. | ||||
760 | /// - If A' is a phi, try to optimize it. | ||||
761 | /// | ||||
762 | /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path | ||||
763 | /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. | ||||
764 | OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, | ||||
765 | const MemoryLocation &Loc) { | ||||
766 | assert(Paths.empty() && VisitedPhis.empty() &&((Paths.empty() && VisitedPhis.empty() && "Reset the optimization state." ) ? static_cast<void> (0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 767, __PRETTY_FUNCTION__)) | ||||
767 | "Reset the optimization state.")((Paths.empty() && VisitedPhis.empty() && "Reset the optimization state." ) ? static_cast<void> (0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 767, __PRETTY_FUNCTION__)); | ||||
768 | |||||
769 | Paths.emplace_back(Loc, Start, Phi, None); | ||||
770 | // Stores how many "valid" optimization nodes we had prior to calling | ||||
771 | // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. | ||||
772 | auto PriorPathsSize = Paths.size(); | ||||
773 | |||||
774 | SmallVector<ListIndex, 16> PausedSearches; | ||||
775 | SmallVector<ListIndex, 8> NewPaused; | ||||
776 | SmallVector<TerminatedPath, 4> TerminatedPaths; | ||||
777 | |||||
778 | addSearches(Phi, PausedSearches, 0); | ||||
779 | |||||
780 | // Moves the TerminatedPath with the "most dominated" Clobber to the end of | ||||
781 | // Paths. | ||||
782 | auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { | ||||
783 | assert(!Paths.empty() && "Need a path to move")((!Paths.empty() && "Need a path to move") ? static_cast <void> (0) : __assert_fail ("!Paths.empty() && \"Need a path to move\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 783, __PRETTY_FUNCTION__)); | ||||
784 | auto Dom = Paths.begin(); | ||||
785 | for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) | ||||
786 | if (!MSSA.dominates(I->Clobber, Dom->Clobber)) | ||||
787 | Dom = I; | ||||
788 | auto Last = Paths.end() - 1; | ||||
789 | if (Last != Dom) | ||||
790 | std::iter_swap(Last, Dom); | ||||
791 | }; | ||||
792 | |||||
793 | MemoryPhi *Current = Phi; | ||||
794 | while (true) { | ||||
795 | assert(!MSSA.isLiveOnEntryDef(Current) &&((!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 796, __PRETTY_FUNCTION__)) | ||||
796 | "liveOnEntry wasn't treated as a clobber?")((!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 796, __PRETTY_FUNCTION__)); | ||||
797 | |||||
798 | const auto *Target = getWalkTarget(Current); | ||||
799 | // If a TerminatedPath doesn't dominate Target, then it wasn't a legal | ||||
800 | // optimization for the prior phi. | ||||
801 | assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {((all_of(TerminatedPaths, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast <void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 803, __PRETTY_FUNCTION__)) | ||||
802 | return MSSA.dominates(P.Clobber, Target);((all_of(TerminatedPaths, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast <void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 803, __PRETTY_FUNCTION__)) | ||||
803 | }))((all_of(TerminatedPaths, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast <void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 803, __PRETTY_FUNCTION__)); | ||||
804 | |||||
805 | // FIXME: This is broken, because the Blocker may be reported to be | ||||
806 | // liveOnEntry, and we'll happily wait for that to disappear (read: never) | ||||
807 | // For the moment, this is fine, since we do nothing with blocker info. | ||||
808 | if (Optional<TerminatedPath> Blocker = getBlockingAccess( | ||||
809 | Target, PausedSearches, NewPaused, TerminatedPaths)) { | ||||
810 | |||||
811 | // Find the node we started at. We can't search based on N->Last, since | ||||
812 | // we may have gone around a loop with a different MemoryLocation. | ||||
813 | auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { | ||||
814 | return defPathIndex(N) < PriorPathsSize; | ||||
815 | }); | ||||
816 | assert(Iter != def_path_iterator())((Iter != def_path_iterator()) ? static_cast<void> (0) : __assert_fail ("Iter != def_path_iterator()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 816, __PRETTY_FUNCTION__)); | ||||
817 | |||||
818 | DefPath &CurNode = *Iter; | ||||
819 | assert(CurNode.Last == Current)((CurNode.Last == Current) ? static_cast<void> (0) : __assert_fail ("CurNode.Last == Current", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 819, __PRETTY_FUNCTION__)); | ||||
820 | |||||
821 | // Two things: | ||||
822 | // A. We can't reliably cache all of NewPaused back. Consider a case | ||||
823 | // where we have two paths in NewPaused; one of which can't optimize | ||||
824 | // above this phi, whereas the other can. If we cache the second path | ||||
825 | // back, we'll end up with suboptimal cache entries. We can handle | ||||
826 | // cases like this a bit better when we either try to find all | ||||
827 | // clobbers that block phi optimization, or when our cache starts | ||||
828 | // supporting unfinished searches. | ||||
829 | // B. We can't reliably cache TerminatedPaths back here without doing | ||||
830 | // extra checks; consider a case like: | ||||
831 | // T | ||||
832 | // / \ | ||||
833 | // D C | ||||
834 | // \ / | ||||
835 | // S | ||||
836 | // Where T is our target, C is a node with a clobber on it, D is a | ||||
837 | // diamond (with a clobber *only* on the left or right node, N), and | ||||
838 | // S is our start. Say we walk to D, through the node opposite N | ||||
839 | // (read: ignoring the clobber), and see a cache entry in the top | ||||
840 | // node of D. That cache entry gets put into TerminatedPaths. We then | ||||
841 | // walk up to C (N is later in our worklist), find the clobber, and | ||||
842 | // quit. If we append TerminatedPaths to OtherClobbers, we'll cache | ||||
843 | // the bottom part of D to the cached clobber, ignoring the clobber | ||||
844 | // in N. Again, this problem goes away if we start tracking all | ||||
845 | // blockers for a given phi optimization. | ||||
846 | TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; | ||||
847 | return {Result, {}}; | ||||
848 | } | ||||
849 | |||||
850 | // If there's nothing left to search, then all paths led to valid clobbers | ||||
851 | // that we got from our cache; pick the nearest to the start, and allow | ||||
852 | // the rest to be cached back. | ||||
853 | if (NewPaused.empty()) { | ||||
854 | MoveDominatedPathToEnd(TerminatedPaths); | ||||
855 | TerminatedPath Result = TerminatedPaths.pop_back_val(); | ||||
856 | return {Result, std::move(TerminatedPaths)}; | ||||
857 | } | ||||
858 | |||||
859 | MemoryAccess *DefChainEnd = nullptr; | ||||
860 | SmallVector<TerminatedPath, 4> Clobbers; | ||||
861 | for (ListIndex Paused : NewPaused) { | ||||
862 | UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); | ||||
863 | if (WR.IsKnownClobber) | ||||
864 | Clobbers.push_back({WR.Result, Paused}); | ||||
865 | else | ||||
866 | // Micro-opt: If we hit the end of the chain, save it. | ||||
867 | DefChainEnd = WR.Result; | ||||
868 | } | ||||
869 | |||||
870 | if (!TerminatedPaths.empty()) { | ||||
871 | // If we couldn't find the dominating phi/liveOnEntry in the above loop, | ||||
872 | // do it now. | ||||
873 | if (!DefChainEnd) | ||||
874 | for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) | ||||
875 | DefChainEnd = MA; | ||||
876 | assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry")((DefChainEnd && "Failed to find dominating phi/liveOnEntry" ) ? static_cast<void> (0) : __assert_fail ("DefChainEnd && \"Failed to find dominating phi/liveOnEntry\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 876, __PRETTY_FUNCTION__)); | ||||
877 | |||||
878 | // If any of the terminated paths don't dominate the phi we'll try to | ||||
879 | // optimize, we need to figure out what they are and quit. | ||||
880 | const BasicBlock *ChainBB = DefChainEnd->getBlock(); | ||||
881 | for (const TerminatedPath &TP : TerminatedPaths) { | ||||
882 | // Because we know that DefChainEnd is as "high" as we can go, we | ||||
883 | // don't need local dominance checks; BB dominance is sufficient. | ||||
884 | if (DT.dominates(ChainBB, TP.Clobber->getBlock())) | ||||
885 | Clobbers.push_back(TP); | ||||
886 | } | ||||
887 | } | ||||
888 | |||||
889 | // If we have clobbers in the def chain, find the one closest to Current | ||||
890 | // and quit. | ||||
891 | if (!Clobbers.empty()) { | ||||
892 | MoveDominatedPathToEnd(Clobbers); | ||||
893 | TerminatedPath Result = Clobbers.pop_back_val(); | ||||
894 | return {Result, std::move(Clobbers)}; | ||||
895 | } | ||||
896 | |||||
897 | assert(all_of(NewPaused,((all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? static_cast<void> (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 898, __PRETTY_FUNCTION__)) | ||||
898 | [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }))((all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? static_cast<void> (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 898, __PRETTY_FUNCTION__)); | ||||
899 | |||||
900 | // Because liveOnEntry is a clobber, this must be a phi. | ||||
901 | auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); | ||||
902 | |||||
903 | PriorPathsSize = Paths.size(); | ||||
904 | PausedSearches.clear(); | ||||
905 | for (ListIndex I : NewPaused) | ||||
906 | addSearches(DefChainPhi, PausedSearches, I); | ||||
907 | NewPaused.clear(); | ||||
908 | |||||
909 | Current = DefChainPhi; | ||||
910 | } | ||||
911 | } | ||||
912 | |||||
913 | void verifyOptResult(const OptznResult &R) const { | ||||
914 | assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {((all_of(R.OtherClobbers, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber ); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 916, __PRETTY_FUNCTION__)) | ||||
915 | return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);((all_of(R.OtherClobbers, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber ); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 916, __PRETTY_FUNCTION__)) | ||||
916 | }))((all_of(R.OtherClobbers, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber ); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 916, __PRETTY_FUNCTION__)); | ||||
917 | } | ||||
918 | |||||
919 | void resetPhiOptznState() { | ||||
920 | Paths.clear(); | ||||
921 | VisitedPhis.clear(); | ||||
922 | } | ||||
923 | |||||
924 | public: | ||||
925 | ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT) | ||||
926 | : MSSA(MSSA), AA(AA), DT(DT) {} | ||||
927 | |||||
928 | AliasAnalysisType *getAA() { return &AA; } | ||||
929 | /// Finds the nearest clobber for the given query, optimizing phis if | ||||
930 | /// possible. | ||||
931 | MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q, | ||||
932 | unsigned &UpWalkLimit) { | ||||
933 | Query = &Q; | ||||
934 | UpwardWalkLimit = &UpWalkLimit; | ||||
935 | // Starting limit must be > 0. | ||||
936 | if (!UpWalkLimit) | ||||
937 | UpWalkLimit++; | ||||
938 | |||||
939 | MemoryAccess *Current = Start; | ||||
940 | // This walker pretends uses don't exist. If we're handed one, silently grab | ||||
941 | // its def. (This has the nice side-effect of ensuring we never cache uses) | ||||
942 | if (auto *MU = dyn_cast<MemoryUse>(Start)) | ||||
943 | Current = MU->getDefiningAccess(); | ||||
944 | |||||
945 | DefPath FirstDesc(Q.StartingLoc, Current, Current, None); | ||||
946 | // Fast path for the overly-common case (no crazy phi optimization | ||||
947 | // necessary) | ||||
948 | UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); | ||||
949 | MemoryAccess *Result; | ||||
950 | if (WalkResult.IsKnownClobber) { | ||||
951 | Result = WalkResult.Result; | ||||
952 | Q.AR = WalkResult.AR; | ||||
953 | } else { | ||||
954 | OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), | ||||
955 | Current, Q.StartingLoc); | ||||
956 | verifyOptResult(OptRes); | ||||
957 | resetPhiOptznState(); | ||||
958 | Result = OptRes.PrimaryClobber.Clobber; | ||||
959 | } | ||||
960 | |||||
961 | #ifdef EXPENSIVE_CHECKS | ||||
962 | if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0) | ||||
963 | checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); | ||||
964 | #endif | ||||
965 | return Result; | ||||
966 | } | ||||
967 | }; | ||||
968 | |||||
969 | struct RenamePassData { | ||||
970 | DomTreeNode *DTN; | ||||
971 | DomTreeNode::const_iterator ChildIt; | ||||
972 | MemoryAccess *IncomingVal; | ||||
973 | |||||
974 | RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, | ||||
975 | MemoryAccess *M) | ||||
976 | : DTN(D), ChildIt(It), IncomingVal(M) {} | ||||
977 | |||||
978 | void swap(RenamePassData &RHS) { | ||||
979 | std::swap(DTN, RHS.DTN); | ||||
980 | std::swap(ChildIt, RHS.ChildIt); | ||||
981 | std::swap(IncomingVal, RHS.IncomingVal); | ||||
982 | } | ||||
983 | }; | ||||
984 | |||||
985 | } // end anonymous namespace | ||||
986 | |||||
987 | namespace llvm { | ||||
988 | |||||
989 | template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase { | ||||
990 | ClobberWalker<AliasAnalysisType> Walker; | ||||
991 | MemorySSA *MSSA; | ||||
992 | |||||
993 | public: | ||||
994 | ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D) | ||||
995 | : Walker(*M, *A, *D), MSSA(M) {} | ||||
996 | |||||
997 | MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, | ||||
998 | const MemoryLocation &, | ||||
999 | unsigned &); | ||||
1000 | // Third argument (bool), defines whether the clobber search should skip the | ||||
1001 | // original queried access. If true, there will be a follow-up query searching | ||||
1002 | // for a clobber access past "self". Note that the Optimized access is not | ||||
1003 | // updated if a new clobber is found by this SkipSelf search. If this | ||||
1004 | // additional query becomes heavily used we may decide to cache the result. | ||||
1005 | // Walker instantiations will decide how to set the SkipSelf bool. | ||||
1006 | MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool); | ||||
1007 | }; | ||||
1008 | |||||
1009 | /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no | ||||
1010 | /// longer does caching on its own, but the name has been retained for the | ||||
1011 | /// moment. | ||||
1012 | template <class AliasAnalysisType> | ||||
1013 | class MemorySSA::CachingWalker final : public MemorySSAWalker { | ||||
1014 | ClobberWalkerBase<AliasAnalysisType> *Walker; | ||||
1015 | |||||
1016 | public: | ||||
1017 | CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) | ||||
1018 | : MemorySSAWalker(M), Walker(W) {} | ||||
1019 | ~CachingWalker() override = default; | ||||
1020 | |||||
1021 | using MemorySSAWalker::getClobberingMemoryAccess; | ||||
1022 | |||||
1023 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { | ||||
1024 | return Walker->getClobberingMemoryAccessBase(MA, UWL, false); | ||||
1025 | } | ||||
1026 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1027 | const MemoryLocation &Loc, | ||||
1028 | unsigned &UWL) { | ||||
1029 | return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); | ||||
1030 | } | ||||
1031 | |||||
1032 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { | ||||
1033 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1034 | return getClobberingMemoryAccess(MA, UpwardWalkLimit); | ||||
1035 | } | ||||
1036 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1037 | const MemoryLocation &Loc) override { | ||||
1038 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1039 | return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); | ||||
1040 | } | ||||
1041 | |||||
1042 | void invalidateInfo(MemoryAccess *MA) override { | ||||
1043 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1044 | MUD->resetOptimized(); | ||||
1045 | } | ||||
1046 | }; | ||||
1047 | |||||
1048 | template <class AliasAnalysisType> | ||||
1049 | class MemorySSA::SkipSelfWalker final : public MemorySSAWalker { | ||||
1050 | ClobberWalkerBase<AliasAnalysisType> *Walker; | ||||
1051 | |||||
1052 | public: | ||||
1053 | SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) | ||||
1054 | : MemorySSAWalker(M), Walker(W) {} | ||||
1055 | ~SkipSelfWalker() override = default; | ||||
1056 | |||||
1057 | using MemorySSAWalker::getClobberingMemoryAccess; | ||||
1058 | |||||
1059 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { | ||||
1060 | return Walker->getClobberingMemoryAccessBase(MA, UWL, true); | ||||
1061 | } | ||||
1062 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1063 | const MemoryLocation &Loc, | ||||
1064 | unsigned &UWL) { | ||||
1065 | return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); | ||||
1066 | } | ||||
1067 | |||||
1068 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { | ||||
1069 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1070 | return getClobberingMemoryAccess(MA, UpwardWalkLimit); | ||||
1071 | } | ||||
1072 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1073 | const MemoryLocation &Loc) override { | ||||
1074 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1075 | return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); | ||||
1076 | } | ||||
1077 | |||||
1078 | void invalidateInfo(MemoryAccess *MA) override { | ||||
1079 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1080 | MUD->resetOptimized(); | ||||
1081 | } | ||||
1082 | }; | ||||
1083 | |||||
1084 | } // end namespace llvm | ||||
1085 | |||||
1086 | void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, | ||||
1087 | bool RenameAllUses) { | ||||
1088 | // Pass through values to our successors | ||||
1089 | for (const BasicBlock *S : successors(BB)) { | ||||
1090 | auto It = PerBlockAccesses.find(S); | ||||
1091 | // Rename the phi nodes in our successor block | ||||
1092 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | ||||
1093 | continue; | ||||
1094 | AccessList *Accesses = It->second.get(); | ||||
1095 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | ||||
1096 | if (RenameAllUses) { | ||||
1097 | bool ReplacementDone = false; | ||||
1098 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) | ||||
1099 | if (Phi->getIncomingBlock(I) == BB) { | ||||
1100 | Phi->setIncomingValue(I, IncomingVal); | ||||
1101 | ReplacementDone = true; | ||||
1102 | } | ||||
1103 | (void) ReplacementDone; | ||||
1104 | assert(ReplacementDone && "Incomplete phi during partial rename")((ReplacementDone && "Incomplete phi during partial rename" ) ? static_cast<void> (0) : __assert_fail ("ReplacementDone && \"Incomplete phi during partial rename\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1104, __PRETTY_FUNCTION__)); | ||||
1105 | } else | ||||
1106 | Phi->addIncoming(IncomingVal, BB); | ||||
1107 | } | ||||
1108 | } | ||||
1109 | |||||
1110 | /// Rename a single basic block into MemorySSA form. | ||||
1111 | /// Uses the standard SSA renaming algorithm. | ||||
1112 | /// \returns The new incoming value. | ||||
1113 | MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, | ||||
1114 | bool RenameAllUses) { | ||||
1115 | auto It = PerBlockAccesses.find(BB); | ||||
1116 | // Skip most processing if the list is empty. | ||||
1117 | if (It != PerBlockAccesses.end()) { | ||||
1118 | AccessList *Accesses = It->second.get(); | ||||
1119 | for (MemoryAccess &L : *Accesses) { | ||||
1120 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { | ||||
1121 | if (MUD->getDefiningAccess() == nullptr || RenameAllUses) | ||||
1122 | MUD->setDefiningAccess(IncomingVal); | ||||
1123 | if (isa<MemoryDef>(&L)) | ||||
1124 | IncomingVal = &L; | ||||
1125 | } else { | ||||
1126 | IncomingVal = &L; | ||||
1127 | } | ||||
1128 | } | ||||
1129 | } | ||||
1130 | return IncomingVal; | ||||
1131 | } | ||||
1132 | |||||
1133 | /// This is the standard SSA renaming algorithm. | ||||
1134 | /// | ||||
1135 | /// We walk the dominator tree in preorder, renaming accesses, and then filling | ||||
1136 | /// in phi nodes in our successors. | ||||
1137 | void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, | ||||
1138 | SmallPtrSetImpl<BasicBlock *> &Visited, | ||||
1139 | bool SkipVisited, bool RenameAllUses) { | ||||
1140 | assert(Root && "Trying to rename accesses in an unreachable block")((Root && "Trying to rename accesses in an unreachable block" ) ? static_cast<void> (0) : __assert_fail ("Root && \"Trying to rename accesses in an unreachable block\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1140, __PRETTY_FUNCTION__)); | ||||
1141 | |||||
1142 | SmallVector<RenamePassData, 32> WorkStack; | ||||
1143 | // Skip everything if we already renamed this block and we are skipping. | ||||
1144 | // Note: You can't sink this into the if, because we need it to occur | ||||
1145 | // regardless of whether we skip blocks or not. | ||||
1146 | bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; | ||||
1147 | if (SkipVisited && AlreadyVisited) | ||||
1148 | return; | ||||
1149 | |||||
1150 | IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); | ||||
1151 | renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); | ||||
1152 | WorkStack.push_back({Root, Root->begin(), IncomingVal}); | ||||
1153 | |||||
1154 | while (!WorkStack.empty()) { | ||||
1155 | DomTreeNode *Node = WorkStack.back().DTN; | ||||
1156 | DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; | ||||
1157 | IncomingVal = WorkStack.back().IncomingVal; | ||||
1158 | |||||
1159 | if (ChildIt == Node->end()) { | ||||
1160 | WorkStack.pop_back(); | ||||
1161 | } else { | ||||
1162 | DomTreeNode *Child = *ChildIt; | ||||
1163 | ++WorkStack.back().ChildIt; | ||||
1164 | BasicBlock *BB = Child->getBlock(); | ||||
1165 | // Note: You can't sink this into the if, because we need it to occur | ||||
1166 | // regardless of whether we skip blocks or not. | ||||
1167 | AlreadyVisited = !Visited.insert(BB).second; | ||||
1168 | if (SkipVisited && AlreadyVisited) { | ||||
1169 | // We already visited this during our renaming, which can happen when | ||||
1170 | // being asked to rename multiple blocks. Figure out the incoming val, | ||||
1171 | // which is the last def. | ||||
1172 | // Incoming value can only change if there is a block def, and in that | ||||
1173 | // case, it's the last block def in the list. | ||||
1174 | if (auto *BlockDefs = getWritableBlockDefs(BB)) | ||||
1175 | IncomingVal = &*BlockDefs->rbegin(); | ||||
1176 | } else | ||||
1177 | IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); | ||||
1178 | renameSuccessorPhis(BB, IncomingVal, RenameAllUses); | ||||
1179 | WorkStack.push_back({Child, Child->begin(), IncomingVal}); | ||||
1180 | } | ||||
1181 | } | ||||
1182 | } | ||||
1183 | |||||
1184 | /// This handles unreachable block accesses by deleting phi nodes in | ||||
1185 | /// unreachable blocks, and marking all other unreachable MemoryAccess's as | ||||
1186 | /// being uses of the live on entry definition. | ||||
1187 | void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { | ||||
1188 | assert(!DT->isReachableFromEntry(BB) &&((!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks" ) ? static_cast<void> (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1189, __PRETTY_FUNCTION__)) | ||||
1189 | "Reachable block found while handling unreachable blocks")((!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks" ) ? static_cast<void> (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1189, __PRETTY_FUNCTION__)); | ||||
1190 | |||||
1191 | // Make sure phi nodes in our reachable successors end up with a | ||||
1192 | // LiveOnEntryDef for our incoming edge, even though our block is forward | ||||
1193 | // unreachable. We could just disconnect these blocks from the CFG fully, | ||||
1194 | // but we do not right now. | ||||
1195 | for (const BasicBlock *S : successors(BB)) { | ||||
1196 | if (!DT->isReachableFromEntry(S)) | ||||
1197 | continue; | ||||
1198 | auto It = PerBlockAccesses.find(S); | ||||
1199 | // Rename the phi nodes in our successor block | ||||
1200 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | ||||
1201 | continue; | ||||
1202 | AccessList *Accesses = It->second.get(); | ||||
1203 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | ||||
1204 | Phi->addIncoming(LiveOnEntryDef.get(), BB); | ||||
1205 | } | ||||
1206 | |||||
1207 | auto It = PerBlockAccesses.find(BB); | ||||
1208 | if (It == PerBlockAccesses.end()) | ||||
1209 | return; | ||||
1210 | |||||
1211 | auto &Accesses = It->second; | ||||
1212 | for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { | ||||
1213 | auto Next = std::next(AI); | ||||
1214 | // If we have a phi, just remove it. We are going to replace all | ||||
1215 | // users with live on entry. | ||||
1216 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) | ||||
1217 | UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); | ||||
1218 | else | ||||
1219 | Accesses->erase(AI); | ||||
1220 | AI = Next; | ||||
1221 | } | ||||
1222 | } | ||||
1223 | |||||
1224 | MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) | ||||
1225 | : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), | ||||
1226 | SkipWalker(nullptr), NextID(0) { | ||||
1227 | // Build MemorySSA using a batch alias analysis. This reuses the internal | ||||
1228 | // state that AA collects during an alias()/getModRefInfo() call. This is | ||||
1229 | // safe because there are no CFG changes while building MemorySSA and can | ||||
1230 | // significantly reduce the time spent by the compiler in AA, because we will | ||||
1231 | // make queries about all the instructions in the Function. | ||||
1232 | assert(AA && "No alias analysis?")((AA && "No alias analysis?") ? static_cast<void> (0) : __assert_fail ("AA && \"No alias analysis?\"", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1232, __PRETTY_FUNCTION__)); | ||||
1233 | BatchAAResults BatchAA(*AA); | ||||
1234 | buildMemorySSA(BatchAA); | ||||
1235 | // Intentionally leave AA to nullptr while building so we don't accidently | ||||
1236 | // use non-batch AliasAnalysis. | ||||
1237 | this->AA = AA; | ||||
1238 | // Also create the walker here. | ||||
1239 | getWalker(); | ||||
1240 | } | ||||
1241 | |||||
1242 | MemorySSA::~MemorySSA() { | ||||
1243 | // Drop all our references | ||||
1244 | for (const auto &Pair : PerBlockAccesses) | ||||
1245 | for (MemoryAccess &MA : *Pair.second) | ||||
1246 | MA.dropAllReferences(); | ||||
1247 | } | ||||
1248 | |||||
1249 | MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { | ||||
1250 | auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); | ||||
1251 | |||||
1252 | if (Res.second) | ||||
1253 | Res.first->second = std::make_unique<AccessList>(); | ||||
1254 | return Res.first->second.get(); | ||||
1255 | } | ||||
1256 | |||||
1257 | MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { | ||||
1258 | auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); | ||||
1259 | |||||
1260 | if (Res.second) | ||||
1261 | Res.first->second = std::make_unique<DefsList>(); | ||||
1262 | return Res.first->second.get(); | ||||
1263 | } | ||||
1264 | |||||
1265 | namespace llvm { | ||||
1266 | |||||
1267 | /// This class is a batch walker of all MemoryUse's in the program, and points | ||||
1268 | /// their defining access at the thing that actually clobbers them. Because it | ||||
1269 | /// is a batch walker that touches everything, it does not operate like the | ||||
1270 | /// other walkers. This walker is basically performing a top-down SSA renaming | ||||
1271 | /// pass, where the version stack is used as the cache. This enables it to be | ||||
1272 | /// significantly more time and memory efficient than using the regular walker, | ||||
1273 | /// which is walking bottom-up. | ||||
1274 | class MemorySSA::OptimizeUses { | ||||
1275 | public: | ||||
1276 | OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker, | ||||
1277 | BatchAAResults *BAA, DominatorTree *DT) | ||||
1278 | : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {} | ||||
1279 | |||||
1280 | void optimizeUses(); | ||||
1281 | |||||
1282 | private: | ||||
1283 | /// This represents where a given memorylocation is in the stack. | ||||
1284 | struct MemlocStackInfo { | ||||
1285 | // This essentially is keeping track of versions of the stack. Whenever | ||||
1286 | // the stack changes due to pushes or pops, these versions increase. | ||||
1287 | unsigned long StackEpoch; | ||||
1288 | unsigned long PopEpoch; | ||||
1289 | // This is the lower bound of places on the stack to check. It is equal to | ||||
1290 | // the place the last stack walk ended. | ||||
1291 | // Note: Correctness depends on this being initialized to 0, which densemap | ||||
1292 | // does | ||||
1293 | unsigned long LowerBound; | ||||
1294 | const BasicBlock *LowerBoundBlock; | ||||
1295 | // This is where the last walk for this memory location ended. | ||||
1296 | unsigned long LastKill; | ||||
1297 | bool LastKillValid; | ||||
1298 | Optional<AliasResult> AR; | ||||
1299 | }; | ||||
1300 | |||||
1301 | void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, | ||||
1302 | SmallVectorImpl<MemoryAccess *> &, | ||||
1303 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &); | ||||
1304 | |||||
1305 | MemorySSA *MSSA; | ||||
1306 | CachingWalker<BatchAAResults> *Walker; | ||||
1307 | BatchAAResults *AA; | ||||
1308 | DominatorTree *DT; | ||||
1309 | }; | ||||
1310 | |||||
1311 | } // end namespace llvm | ||||
1312 | |||||
1313 | /// Optimize the uses in a given block This is basically the SSA renaming | ||||
1314 | /// algorithm, with one caveat: We are able to use a single stack for all | ||||
1315 | /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is | ||||
1316 | /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just | ||||
1317 | /// going to be some position in that stack of possible ones. | ||||
1318 | /// | ||||
1319 | /// We track the stack positions that each MemoryLocation needs | ||||
1320 | /// to check, and last ended at. This is because we only want to check the | ||||
1321 | /// things that changed since last time. The same MemoryLocation should | ||||
1322 | /// get clobbered by the same store (getModRefInfo does not use invariantness or | ||||
1323 | /// things like this, and if they start, we can modify MemoryLocOrCall to | ||||
1324 | /// include relevant data) | ||||
1325 | void MemorySSA::OptimizeUses::optimizeUsesInBlock( | ||||
1326 | const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, | ||||
1327 | SmallVectorImpl<MemoryAccess *> &VersionStack, | ||||
1328 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { | ||||
1329 | |||||
1330 | /// If no accesses, nothing to do. | ||||
1331 | MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); | ||||
1332 | if (Accesses == nullptr) | ||||
1333 | return; | ||||
1334 | |||||
1335 | // Pop everything that doesn't dominate the current block off the stack, | ||||
1336 | // increment the PopEpoch to account for this. | ||||
1337 | while (true) { | ||||
1338 | assert(((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1340, __PRETTY_FUNCTION__)) | ||||
1339 | !VersionStack.empty() &&((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1340, __PRETTY_FUNCTION__)) | ||||
1340 | "Version stack should have liveOnEntry sentinel dominating everything")((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1340, __PRETTY_FUNCTION__)); | ||||
1341 | BasicBlock *BackBlock = VersionStack.back()->getBlock(); | ||||
1342 | if (DT->dominates(BackBlock, BB)) | ||||
1343 | break; | ||||
1344 | while (VersionStack.back()->getBlock() == BackBlock) | ||||
1345 | VersionStack.pop_back(); | ||||
1346 | ++PopEpoch; | ||||
1347 | } | ||||
1348 | |||||
1349 | for (MemoryAccess &MA : *Accesses) { | ||||
1350 | auto *MU = dyn_cast<MemoryUse>(&MA); | ||||
1351 | if (!MU) { | ||||
1352 | VersionStack.push_back(&MA); | ||||
1353 | ++StackEpoch; | ||||
1354 | continue; | ||||
1355 | } | ||||
1356 | |||||
1357 | if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { | ||||
1358 | MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None); | ||||
1359 | continue; | ||||
1360 | } | ||||
1361 | |||||
1362 | MemoryLocOrCall UseMLOC(MU); | ||||
1363 | auto &LocInfo = LocStackInfo[UseMLOC]; | ||||
1364 | // If the pop epoch changed, it means we've removed stuff from top of | ||||
1365 | // stack due to changing blocks. We may have to reset the lower bound or | ||||
1366 | // last kill info. | ||||
1367 | if (LocInfo.PopEpoch != PopEpoch) { | ||||
1368 | LocInfo.PopEpoch = PopEpoch; | ||||
1369 | LocInfo.StackEpoch = StackEpoch; | ||||
1370 | // If the lower bound was in something that no longer dominates us, we | ||||
1371 | // have to reset it. | ||||
1372 | // We can't simply track stack size, because the stack may have had | ||||
1373 | // pushes/pops in the meantime. | ||||
1374 | // XXX: This is non-optimal, but only is slower cases with heavily | ||||
1375 | // branching dominator trees. To get the optimal number of queries would | ||||
1376 | // be to make lowerbound and lastkill a per-loc stack, and pop it until | ||||
1377 | // the top of that stack dominates us. This does not seem worth it ATM. | ||||
1378 | // A much cheaper optimization would be to always explore the deepest | ||||
1379 | // branch of the dominator tree first. This will guarantee this resets on | ||||
1380 | // the smallest set of blocks. | ||||
1381 | if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && | ||||
1382 | !DT->dominates(LocInfo.LowerBoundBlock, BB)) { | ||||
1383 | // Reset the lower bound of things to check. | ||||
1384 | // TODO: Some day we should be able to reset to last kill, rather than | ||||
1385 | // 0. | ||||
1386 | LocInfo.LowerBound = 0; | ||||
1387 | LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); | ||||
1388 | LocInfo.LastKillValid = false; | ||||
1389 | } | ||||
1390 | } else if (LocInfo.StackEpoch != StackEpoch) { | ||||
1391 | // If all that has changed is the StackEpoch, we only have to check the | ||||
1392 | // new things on the stack, because we've checked everything before. In | ||||
1393 | // this case, the lower bound of things to check remains the same. | ||||
1394 | LocInfo.PopEpoch = PopEpoch; | ||||
1395 | LocInfo.StackEpoch = StackEpoch; | ||||
1396 | } | ||||
1397 | if (!LocInfo.LastKillValid) { | ||||
1398 | LocInfo.LastKill = VersionStack.size() - 1; | ||||
1399 | LocInfo.LastKillValid = true; | ||||
1400 | LocInfo.AR = MayAlias; | ||||
1401 | } | ||||
1402 | |||||
1403 | // At this point, we should have corrected last kill and LowerBound to be | ||||
1404 | // in bounds. | ||||
1405 | assert(LocInfo.LowerBound < VersionStack.size() &&((LocInfo.LowerBound < VersionStack.size() && "Lower bound out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1406, __PRETTY_FUNCTION__)) | ||||
1406 | "Lower bound out of range")((LocInfo.LowerBound < VersionStack.size() && "Lower bound out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1406, __PRETTY_FUNCTION__)); | ||||
1407 | assert(LocInfo.LastKill < VersionStack.size() &&((LocInfo.LastKill < VersionStack.size() && "Last kill info out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1408, __PRETTY_FUNCTION__)) | ||||
1408 | "Last kill info out of range")((LocInfo.LastKill < VersionStack.size() && "Last kill info out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1408, __PRETTY_FUNCTION__)); | ||||
1409 | // In any case, the new upper bound is the top of the stack. | ||||
1410 | unsigned long UpperBound = VersionStack.size() - 1; | ||||
1411 | |||||
1412 | if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { | ||||
1413 | LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1414 | << *(MU->getMemoryInst()) << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1415 | << " because there are "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1416 | << UpperBound - LocInfo.LowerBounddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1417 | << " stores to disambiguate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false); | ||||
1418 | // Because we did not walk, LastKill is no longer valid, as this may | ||||
1419 | // have been a kill. | ||||
1420 | LocInfo.LastKillValid = false; | ||||
1421 | continue; | ||||
1422 | } | ||||
1423 | bool FoundClobberResult = false; | ||||
1424 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1425 | while (UpperBound > LocInfo.LowerBound) { | ||||
1426 | if (isa<MemoryPhi>(VersionStack[UpperBound])) { | ||||
1427 | // For phis, use the walker, see where we ended up, go there | ||||
1428 | MemoryAccess *Result = | ||||
1429 | Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit); | ||||
1430 | // We are guaranteed to find it or something is wrong | ||||
1431 | while (VersionStack[UpperBound] != Result) { | ||||
1432 | assert(UpperBound != 0)((UpperBound != 0) ? static_cast<void> (0) : __assert_fail ("UpperBound != 0", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1432, __PRETTY_FUNCTION__)); | ||||
1433 | --UpperBound; | ||||
1434 | } | ||||
1435 | FoundClobberResult = true; | ||||
1436 | break; | ||||
1437 | } | ||||
1438 | |||||
1439 | MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); | ||||
1440 | // If the lifetime of the pointer ends at this instruction, it's live on | ||||
1441 | // entry. | ||||
1442 | if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) { | ||||
1443 | // Reset UpperBound to liveOnEntryDef's place in the stack | ||||
1444 | UpperBound = 0; | ||||
1445 | FoundClobberResult = true; | ||||
1446 | LocInfo.AR = MustAlias; | ||||
1447 | break; | ||||
1448 | } | ||||
1449 | ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA); | ||||
1450 | if (CA.IsClobber) { | ||||
1451 | FoundClobberResult = true; | ||||
1452 | LocInfo.AR = CA.AR; | ||||
1453 | break; | ||||
1454 | } | ||||
1455 | --UpperBound; | ||||
1456 | } | ||||
1457 | |||||
1458 | // Note: Phis always have AliasResult AR set to MayAlias ATM. | ||||
1459 | |||||
1460 | // At the end of this loop, UpperBound is either a clobber, or lower bound | ||||
1461 | // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. | ||||
1462 | if (FoundClobberResult || UpperBound < LocInfo.LastKill) { | ||||
1463 | // We were last killed now by where we got to | ||||
1464 | if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound])) | ||||
1465 | LocInfo.AR = None; | ||||
1466 | MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR); | ||||
1467 | LocInfo.LastKill = UpperBound; | ||||
1468 | } else { | ||||
1469 | // Otherwise, we checked all the new ones, and now we know we can get to | ||||
1470 | // LastKill. | ||||
1471 | MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR); | ||||
1472 | } | ||||
1473 | LocInfo.LowerBound = VersionStack.size() - 1; | ||||
1474 | LocInfo.LowerBoundBlock = BB; | ||||
1475 | } | ||||
1476 | } | ||||
1477 | |||||
1478 | /// Optimize uses to point to their actual clobbering definitions. | ||||
1479 | void MemorySSA::OptimizeUses::optimizeUses() { | ||||
1480 | SmallVector<MemoryAccess *, 16> VersionStack; | ||||
1481 | DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; | ||||
1482 | VersionStack.push_back(MSSA->getLiveOnEntryDef()); | ||||
1483 | |||||
1484 | unsigned long StackEpoch = 1; | ||||
1485 | unsigned long PopEpoch = 1; | ||||
1486 | // We perform a non-recursive top-down dominator tree walk. | ||||
1487 | for (const auto *DomNode : depth_first(DT->getRootNode())) | ||||
1488 | optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, | ||||
1489 | LocStackInfo); | ||||
1490 | } | ||||
1491 | |||||
1492 | void MemorySSA::placePHINodes( | ||||
1493 | const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) { | ||||
1494 | // Determine where our MemoryPhi's should go | ||||
1495 | ForwardIDFCalculator IDFs(*DT); | ||||
1496 | IDFs.setDefiningBlocks(DefiningBlocks); | ||||
1497 | SmallVector<BasicBlock *, 32> IDFBlocks; | ||||
1498 | IDFs.calculate(IDFBlocks); | ||||
1499 | |||||
1500 | // Now place MemoryPhi nodes. | ||||
1501 | for (auto &BB : IDFBlocks) | ||||
1502 | createMemoryPhi(BB); | ||||
1503 | } | ||||
1504 | |||||
1505 | void MemorySSA::buildMemorySSA(BatchAAResults &BAA) { | ||||
1506 | // We create an access to represent "live on entry", for things like | ||||
1507 | // arguments or users of globals, where the memory they use is defined before | ||||
1508 | // the beginning of the function. We do not actually insert it into the IR. | ||||
1509 | // We do not define a live on exit for the immediate uses, and thus our | ||||
1510 | // semantics do *not* imply that something with no immediate uses can simply | ||||
1511 | // be removed. | ||||
1512 | BasicBlock &StartingPoint = F.getEntryBlock(); | ||||
1513 | LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr, | ||||
1514 | &StartingPoint, NextID++)); | ||||
1515 | |||||
1516 | // We maintain lists of memory accesses per-block, trading memory for time. We | ||||
1517 | // could just look up the memory access for every possible instruction in the | ||||
1518 | // stream. | ||||
1519 | SmallPtrSet<BasicBlock *, 32> DefiningBlocks; | ||||
1520 | // Go through each block, figure out where defs occur, and chain together all | ||||
1521 | // the accesses. | ||||
1522 | for (BasicBlock &B : F) { | ||||
1523 | bool InsertIntoDef = false; | ||||
1524 | AccessList *Accesses = nullptr; | ||||
1525 | DefsList *Defs = nullptr; | ||||
1526 | for (Instruction &I : B) { | ||||
1527 | MemoryUseOrDef *MUD = createNewAccess(&I, &BAA); | ||||
1528 | if (!MUD) | ||||
1529 | continue; | ||||
1530 | |||||
1531 | if (!Accesses) | ||||
1532 | Accesses = getOrCreateAccessList(&B); | ||||
1533 | Accesses->push_back(MUD); | ||||
1534 | if (isa<MemoryDef>(MUD)) { | ||||
1535 | InsertIntoDef = true; | ||||
1536 | if (!Defs) | ||||
1537 | Defs = getOrCreateDefsList(&B); | ||||
1538 | Defs->push_back(*MUD); | ||||
1539 | } | ||||
1540 | } | ||||
1541 | if (InsertIntoDef) | ||||
1542 | DefiningBlocks.insert(&B); | ||||
1543 | } | ||||
1544 | placePHINodes(DefiningBlocks); | ||||
1545 | |||||
1546 | // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get | ||||
1547 | // filled in with all blocks. | ||||
1548 | SmallPtrSet<BasicBlock *, 16> Visited; | ||||
1549 | renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); | ||||
1550 | |||||
1551 | ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT); | ||||
1552 | CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase); | ||||
1553 | OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses(); | ||||
1554 | |||||
1555 | // Mark the uses in unreachable blocks as live on entry, so that they go | ||||
1556 | // somewhere. | ||||
1557 | for (auto &BB : F) | ||||
1558 | if (!Visited.count(&BB)) | ||||
1559 | markUnreachableAsLiveOnEntry(&BB); | ||||
1560 | } | ||||
1561 | |||||
1562 | MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } | ||||
1563 | |||||
1564 | MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() { | ||||
1565 | if (Walker) | ||||
1566 | return Walker.get(); | ||||
1567 | |||||
1568 | if (!WalkerBase) | ||||
1569 | WalkerBase = | ||||
1570 | std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); | ||||
1571 | |||||
1572 | Walker = | ||||
1573 | std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get()); | ||||
1574 | return Walker.get(); | ||||
1575 | } | ||||
1576 | |||||
1577 | MemorySSAWalker *MemorySSA::getSkipSelfWalker() { | ||||
1578 | if (SkipWalker) | ||||
1579 | return SkipWalker.get(); | ||||
1580 | |||||
1581 | if (!WalkerBase) | ||||
1582 | WalkerBase = | ||||
1583 | std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); | ||||
1584 | |||||
1585 | SkipWalker = | ||||
1586 | std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get()); | ||||
1587 | return SkipWalker.get(); | ||||
1588 | } | ||||
1589 | |||||
1590 | |||||
1591 | // This is a helper function used by the creation routines. It places NewAccess | ||||
1592 | // into the access and defs lists for a given basic block, at the given | ||||
1593 | // insertion point. | ||||
1594 | void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, | ||||
1595 | const BasicBlock *BB, | ||||
1596 | InsertionPlace Point) { | ||||
1597 | auto *Accesses = getOrCreateAccessList(BB); | ||||
1598 | if (Point == Beginning) { | ||||
1599 | // If it's a phi node, it goes first, otherwise, it goes after any phi | ||||
1600 | // nodes. | ||||
1601 | if (isa<MemoryPhi>(NewAccess)) { | ||||
1602 | Accesses->push_front(NewAccess); | ||||
1603 | auto *Defs = getOrCreateDefsList(BB); | ||||
1604 | Defs->push_front(*NewAccess); | ||||
1605 | } else { | ||||
1606 | auto AI = find_if_not( | ||||
1607 | *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); | ||||
1608 | Accesses->insert(AI, NewAccess); | ||||
1609 | if (!isa<MemoryUse>(NewAccess)) { | ||||
1610 | auto *Defs = getOrCreateDefsList(BB); | ||||
1611 | auto DI = find_if_not( | ||||
1612 | *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); | ||||
1613 | Defs->insert(DI, *NewAccess); | ||||
1614 | } | ||||
1615 | } | ||||
1616 | } else { | ||||
1617 | Accesses->push_back(NewAccess); | ||||
1618 | if (!isa<MemoryUse>(NewAccess)) { | ||||
1619 | auto *Defs = getOrCreateDefsList(BB); | ||||
1620 | Defs->push_back(*NewAccess); | ||||
1621 | } | ||||
1622 | } | ||||
1623 | BlockNumberingValid.erase(BB); | ||||
1624 | } | ||||
1625 | |||||
1626 | void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, | ||||
1627 | AccessList::iterator InsertPt) { | ||||
1628 | auto *Accesses = getWritableBlockAccesses(BB); | ||||
1629 | bool WasEnd = InsertPt == Accesses->end(); | ||||
1630 | Accesses->insert(AccessList::iterator(InsertPt), What); | ||||
1631 | if (!isa<MemoryUse>(What)) { | ||||
1632 | auto *Defs = getOrCreateDefsList(BB); | ||||
1633 | // If we got asked to insert at the end, we have an easy job, just shove it | ||||
1634 | // at the end. If we got asked to insert before an existing def, we also get | ||||
1635 | // an iterator. If we got asked to insert before a use, we have to hunt for | ||||
1636 | // the next def. | ||||
1637 | if (WasEnd) { | ||||
1638 | Defs->push_back(*What); | ||||
1639 | } else if (isa<MemoryDef>(InsertPt)) { | ||||
1640 | Defs->insert(InsertPt->getDefsIterator(), *What); | ||||
1641 | } else { | ||||
1642 | while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) | ||||
1643 | ++InsertPt; | ||||
1644 | // Either we found a def, or we are inserting at the end | ||||
1645 | if (InsertPt == Accesses->end()) | ||||
1646 | Defs->push_back(*What); | ||||
1647 | else | ||||
1648 | Defs->insert(InsertPt->getDefsIterator(), *What); | ||||
1649 | } | ||||
1650 | } | ||||
1651 | BlockNumberingValid.erase(BB); | ||||
1652 | } | ||||
1653 | |||||
1654 | void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) { | ||||
1655 | // Keep it in the lookup tables, remove from the lists | ||||
1656 | removeFromLists(What, false); | ||||
1657 | |||||
1658 | // Note that moving should implicitly invalidate the optimized state of a | ||||
1659 | // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a | ||||
1660 | // MemoryDef. | ||||
1661 | if (auto *MD = dyn_cast<MemoryDef>(What)) | ||||
1662 | MD->resetOptimized(); | ||||
1663 | What->setBlock(BB); | ||||
1664 | } | ||||
1665 | |||||
1666 | // Move What before Where in the IR. The end result is that What will belong to | ||||
1667 | // the right lists and have the right Block set, but will not otherwise be | ||||
1668 | // correct. It will not have the right defining access, and if it is a def, | ||||
1669 | // things below it will not properly be updated. | ||||
1670 | void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, | ||||
1671 | AccessList::iterator Where) { | ||||
1672 | prepareForMoveTo(What, BB); | ||||
1673 | insertIntoListsBefore(What, BB, Where); | ||||
1674 | } | ||||
1675 | |||||
1676 | void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB, | ||||
1677 | InsertionPlace Point) { | ||||
1678 | if (isa<MemoryPhi>(What)) { | ||||
1679 | assert(Point == Beginning &&((Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? static_cast<void> (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1680, __PRETTY_FUNCTION__)) | ||||
1680 | "Can only move a Phi at the beginning of the block")((Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? static_cast<void> (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1680, __PRETTY_FUNCTION__)); | ||||
1681 | // Update lookup table entry | ||||
1682 | ValueToMemoryAccess.erase(What->getBlock()); | ||||
1683 | bool Inserted = ValueToMemoryAccess.insert({BB, What}).second; | ||||
1684 | (void)Inserted; | ||||
1685 | assert(Inserted && "Cannot move a Phi to a block that already has one")((Inserted && "Cannot move a Phi to a block that already has one" ) ? static_cast<void> (0) : __assert_fail ("Inserted && \"Cannot move a Phi to a block that already has one\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1685, __PRETTY_FUNCTION__)); | ||||
1686 | } | ||||
1687 | |||||
1688 | prepareForMoveTo(What, BB); | ||||
1689 | insertIntoListsForBlock(What, BB, Point); | ||||
1690 | } | ||||
1691 | |||||
1692 | MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { | ||||
1693 | assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB")((!getMemoryAccess(BB) && "MemoryPhi already exists for this BB" ) ? static_cast<void> (0) : __assert_fail ("!getMemoryAccess(BB) && \"MemoryPhi already exists for this BB\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1693, __PRETTY_FUNCTION__)); | ||||
1694 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); | ||||
1695 | // Phi's always are placed at the front of the block. | ||||
1696 | insertIntoListsForBlock(Phi, BB, Beginning); | ||||
1697 | ValueToMemoryAccess[BB] = Phi; | ||||
1698 | return Phi; | ||||
1699 | } | ||||
1700 | |||||
1701 | MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, | ||||
1702 | MemoryAccess *Definition, | ||||
1703 | const MemoryUseOrDef *Template, | ||||
1704 | bool CreationMustSucceed) { | ||||
1705 | assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI")((!isa<PHINode>(I) && "Cannot create a defined access for a PHI" ) ? static_cast<void> (0) : __assert_fail ("!isa<PHINode>(I) && \"Cannot create a defined access for a PHI\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1705, __PRETTY_FUNCTION__)); | ||||
1706 | MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template); | ||||
1707 | if (CreationMustSucceed) | ||||
1708 | assert(NewAccess != nullptr && "Tried to create a memory access for a "((NewAccess != nullptr && "Tried to create a memory access for a " "non-memory touching instruction") ? static_cast<void> (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a \" \"non-memory touching instruction\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1709, __PRETTY_FUNCTION__)) | ||||
1709 | "non-memory touching instruction")((NewAccess != nullptr && "Tried to create a memory access for a " "non-memory touching instruction") ? static_cast<void> (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a \" \"non-memory touching instruction\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1709, __PRETTY_FUNCTION__)); | ||||
1710 | if (NewAccess) | ||||
1711 | NewAccess->setDefiningAccess(Definition); | ||||
1712 | return NewAccess; | ||||
1713 | } | ||||
1714 | |||||
1715 | // Return true if the instruction has ordering constraints. | ||||
1716 | // Note specifically that this only considers stores and loads | ||||
1717 | // because others are still considered ModRef by getModRefInfo. | ||||
1718 | static inline bool isOrdered(const Instruction *I) { | ||||
1719 | if (auto *SI = dyn_cast<StoreInst>(I)) { | ||||
1720 | if (!SI->isUnordered()) | ||||
1721 | return true; | ||||
1722 | } else if (auto *LI = dyn_cast<LoadInst>(I)) { | ||||
1723 | if (!LI->isUnordered()) | ||||
1724 | return true; | ||||
1725 | } | ||||
1726 | return false; | ||||
1727 | } | ||||
1728 | |||||
1729 | /// Helper function to create new memory accesses | ||||
1730 | template <typename AliasAnalysisType> | ||||
1731 | MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I, | ||||
1732 | AliasAnalysisType *AAP, | ||||
1733 | const MemoryUseOrDef *Template) { | ||||
1734 | // The assume intrinsic has a control dependency which we model by claiming | ||||
1735 | // that it writes arbitrarily. Debuginfo intrinsics may be considered | ||||
1736 | // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory | ||||
1737 | // dependencies here. | ||||
1738 | // FIXME: Replace this special casing with a more accurate modelling of | ||||
1739 | // assume's control dependency. | ||||
1740 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) | ||||
1741 | if (II->getIntrinsicID() == Intrinsic::assume) | ||||
1742 | return nullptr; | ||||
1743 | |||||
1744 | // Using a nonstandard AA pipelines might leave us with unexpected modref | ||||
1745 | // results for I, so add a check to not model instructions that may not read | ||||
1746 | // from or write to memory. This is necessary for correctness. | ||||
1747 | if (!I->mayReadFromMemory() && !I->mayWriteToMemory()) | ||||
1748 | return nullptr; | ||||
1749 | |||||
1750 | bool Def, Use; | ||||
1751 | if (Template) { | ||||
1752 | Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr; | ||||
1753 | Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr; | ||||
1754 | #if !defined(NDEBUG) | ||||
1755 | ModRefInfo ModRef = AAP->getModRefInfo(I, None); | ||||
1756 | bool DefCheck, UseCheck; | ||||
1757 | DefCheck = isModSet(ModRef) || isOrdered(I); | ||||
1758 | UseCheck = isRefSet(ModRef); | ||||
1759 | assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template")((Def == DefCheck && (Def || Use == UseCheck) && "Invalid template") ? static_cast<void> (0) : __assert_fail ("Def == DefCheck && (Def || Use == UseCheck) && \"Invalid template\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1759, __PRETTY_FUNCTION__)); | ||||
1760 | #endif | ||||
1761 | } else { | ||||
1762 | // Find out what affect this instruction has on memory. | ||||
1763 | ModRefInfo ModRef = AAP->getModRefInfo(I, None); | ||||
1764 | // The isOrdered check is used to ensure that volatiles end up as defs | ||||
1765 | // (atomics end up as ModRef right now anyway). Until we separate the | ||||
1766 | // ordering chain from the memory chain, this enables people to see at least | ||||
1767 | // some relative ordering to volatiles. Note that getClobberingMemoryAccess | ||||
1768 | // will still give an answer that bypasses other volatile loads. TODO: | ||||
1769 | // Separate memory aliasing and ordering into two different chains so that | ||||
1770 | // we can precisely represent both "what memory will this read/write/is | ||||
1771 | // clobbered by" and "what instructions can I move this past". | ||||
1772 | Def = isModSet(ModRef) || isOrdered(I); | ||||
1773 | Use = isRefSet(ModRef); | ||||
1774 | } | ||||
1775 | |||||
1776 | // It's possible for an instruction to not modify memory at all. During | ||||
1777 | // construction, we ignore them. | ||||
1778 | if (!Def && !Use) | ||||
1779 | return nullptr; | ||||
1780 | |||||
1781 | MemoryUseOrDef *MUD; | ||||
1782 | if (Def) | ||||
1783 | MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); | ||||
1784 | else | ||||
1785 | MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); | ||||
1786 | ValueToMemoryAccess[I] = MUD; | ||||
1787 | return MUD; | ||||
1788 | } | ||||
1789 | |||||
1790 | /// Returns true if \p Replacer dominates \p Replacee . | ||||
1791 | bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, | ||||
1792 | const MemoryAccess *Replacee) const { | ||||
1793 | if (isa<MemoryUseOrDef>(Replacee)) | ||||
1794 | return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); | ||||
1795 | const auto *MP = cast<MemoryPhi>(Replacee); | ||||
1796 | // For a phi node, the use occurs in the predecessor block of the phi node. | ||||
1797 | // Since we may occur multiple times in the phi node, we have to check each | ||||
1798 | // operand to ensure Replacer dominates each operand where Replacee occurs. | ||||
1799 | for (const Use &Arg : MP->operands()) { | ||||
1800 | if (Arg.get() != Replacee && | ||||
1801 | !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) | ||||
1802 | return false; | ||||
1803 | } | ||||
1804 | return true; | ||||
1805 | } | ||||
1806 | |||||
1807 | /// Properly remove \p MA from all of MemorySSA's lookup tables. | ||||
1808 | void MemorySSA::removeFromLookups(MemoryAccess *MA) { | ||||
1809 | assert(MA->use_empty() &&((MA->use_empty() && "Trying to remove memory access that still has uses" ) ? static_cast<void> (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1810, __PRETTY_FUNCTION__)) | ||||
1810 | "Trying to remove memory access that still has uses")((MA->use_empty() && "Trying to remove memory access that still has uses" ) ? static_cast<void> (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1810, __PRETTY_FUNCTION__)); | ||||
1811 | BlockNumbering.erase(MA); | ||||
1812 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1813 | MUD->setDefiningAccess(nullptr); | ||||
1814 | // Invalidate our walker's cache if necessary | ||||
1815 | if (!isa<MemoryUse>(MA)) | ||||
1816 | getWalker()->invalidateInfo(MA); | ||||
1817 | |||||
1818 | Value *MemoryInst; | ||||
1819 | if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1820 | MemoryInst = MUD->getMemoryInst(); | ||||
1821 | else | ||||
1822 | MemoryInst = MA->getBlock(); | ||||
1823 | |||||
1824 | auto VMA = ValueToMemoryAccess.find(MemoryInst); | ||||
1825 | if (VMA->second == MA) | ||||
1826 | ValueToMemoryAccess.erase(VMA); | ||||
1827 | } | ||||
1828 | |||||
1829 | /// Properly remove \p MA from all of MemorySSA's lists. | ||||
1830 | /// | ||||
1831 | /// Because of the way the intrusive list and use lists work, it is important to | ||||
1832 | /// do removal in the right order. | ||||
1833 | /// ShouldDelete defaults to true, and will cause the memory access to also be | ||||
1834 | /// deleted, not just removed. | ||||
1835 | void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { | ||||
1836 | BasicBlock *BB = MA->getBlock(); | ||||
1837 | // The access list owns the reference, so we erase it from the non-owning list | ||||
1838 | // first. | ||||
1839 | if (!isa<MemoryUse>(MA)) { | ||||
1840 | auto DefsIt = PerBlockDefs.find(BB); | ||||
1841 | std::unique_ptr<DefsList> &Defs = DefsIt->second; | ||||
1842 | Defs->remove(*MA); | ||||
1843 | if (Defs->empty()) | ||||
1844 | PerBlockDefs.erase(DefsIt); | ||||
1845 | } | ||||
1846 | |||||
1847 | // The erase call here will delete it. If we don't want it deleted, we call | ||||
1848 | // remove instead. | ||||
1849 | auto AccessIt = PerBlockAccesses.find(BB); | ||||
1850 | std::unique_ptr<AccessList> &Accesses = AccessIt->second; | ||||
1851 | if (ShouldDelete) | ||||
1852 | Accesses->erase(MA); | ||||
1853 | else | ||||
1854 | Accesses->remove(MA); | ||||
1855 | |||||
1856 | if (Accesses->empty()) { | ||||
1857 | PerBlockAccesses.erase(AccessIt); | ||||
1858 | BlockNumberingValid.erase(BB); | ||||
1859 | } | ||||
1860 | } | ||||
1861 | |||||
1862 | void MemorySSA::print(raw_ostream &OS) const { | ||||
1863 | MemorySSAAnnotatedWriter Writer(this); | ||||
1864 | F.print(OS, &Writer); | ||||
1865 | } | ||||
1866 | |||||
1867 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||
1868 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MemorySSA::dump() const { print(dbgs()); } | ||||
1869 | #endif | ||||
1870 | |||||
1871 | void MemorySSA::verifyMemorySSA() const { | ||||
1872 | verifyDefUses(F); | ||||
1873 | verifyDomination(F); | ||||
1874 | verifyOrdering(F); | ||||
1875 | verifyDominationNumbers(F); | ||||
1876 | verifyPrevDefInPhis(F); | ||||
1877 | // Previously, the verification used to also verify that the clobberingAccess | ||||
1878 | // cached by MemorySSA is the same as the clobberingAccess found at a later | ||||
1879 | // query to AA. This does not hold true in general due to the current fragility | ||||
1880 | // of BasicAA which has arbitrary caps on the things it analyzes before giving | ||||
1881 | // up. As a result, transformations that are correct, will lead to BasicAA | ||||
1882 | // returning different Alias answers before and after that transformation. | ||||
1883 | // Invalidating MemorySSA is not an option, as the results in BasicAA can be so | ||||
1884 | // random, in the worst case we'd need to rebuild MemorySSA from scratch after | ||||
1885 | // every transformation, which defeats the purpose of using it. For such an | ||||
1886 | // example, see test4 added in D51960. | ||||
1887 | } | ||||
1888 | |||||
1889 | void MemorySSA::verifyPrevDefInPhis(Function &F) const { | ||||
1890 | #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) | ||||
1891 | for (const BasicBlock &BB : F) { | ||||
1892 | if (MemoryPhi *Phi = getMemoryAccess(&BB)) { | ||||
1893 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { | ||||
1894 | auto *Pred = Phi->getIncomingBlock(I); | ||||
1895 | auto *IncAcc = Phi->getIncomingValue(I); | ||||
1896 | // If Pred has no unreachable predecessors, get last def looking at | ||||
1897 | // IDoms. If, while walkings IDoms, any of these has an unreachable | ||||
1898 | // predecessor, then the incoming def can be any access. | ||||
1899 | if (auto *DTNode = DT->getNode(Pred)) { | ||||
1900 | while (DTNode) { | ||||
1901 | if (auto *DefList = getBlockDefs(DTNode->getBlock())) { | ||||
1902 | auto *LastAcc = &*(--DefList->end()); | ||||
1903 | assert(LastAcc == IncAcc &&((LastAcc == IncAcc && "Incorrect incoming access into phi." ) ? static_cast<void> (0) : __assert_fail ("LastAcc == IncAcc && \"Incorrect incoming access into phi.\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1904, __PRETTY_FUNCTION__)) | ||||
1904 | "Incorrect incoming access into phi.")((LastAcc == IncAcc && "Incorrect incoming access into phi." ) ? static_cast<void> (0) : __assert_fail ("LastAcc == IncAcc && \"Incorrect incoming access into phi.\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1904, __PRETTY_FUNCTION__)); | ||||
1905 | break; | ||||
1906 | } | ||||
1907 | DTNode = DTNode->getIDom(); | ||||
1908 | } | ||||
1909 | } else { | ||||
1910 | // If Pred has unreachable predecessors, but has at least a Def, the | ||||
1911 | // incoming access can be the last Def in Pred, or it could have been | ||||
1912 | // optimized to LoE. After an update, though, the LoE may have been | ||||
1913 | // replaced by another access, so IncAcc may be any access. | ||||
1914 | // If Pred has unreachable predecessors and no Defs, incoming access | ||||
1915 | // should be LoE; However, after an update, it may be any access. | ||||
1916 | } | ||||
1917 | } | ||||
1918 | } | ||||
1919 | } | ||||
1920 | #endif | ||||
1921 | } | ||||
1922 | |||||
1923 | /// Verify that all of the blocks we believe to have valid domination numbers | ||||
1924 | /// actually have valid domination numbers. | ||||
1925 | void MemorySSA::verifyDominationNumbers(const Function &F) const { | ||||
1926 | #ifndef NDEBUG | ||||
1927 | if (BlockNumberingValid.empty()) | ||||
1928 | return; | ||||
1929 | |||||
1930 | SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid; | ||||
1931 | for (const BasicBlock &BB : F) { | ||||
1932 | if (!ValidBlocks.count(&BB)) | ||||
1933 | continue; | ||||
1934 | |||||
1935 | ValidBlocks.erase(&BB); | ||||
1936 | |||||
1937 | const AccessList *Accesses = getBlockAccesses(&BB); | ||||
1938 | // It's correct to say an empty block has valid numbering. | ||||
1939 | if (!Accesses) | ||||
1940 | continue; | ||||
1941 | |||||
1942 | // Block numbering starts at 1. | ||||
1943 | unsigned long LastNumber = 0; | ||||
1944 | for (const MemoryAccess &MA : *Accesses) { | ||||
1945 | auto ThisNumberIter = BlockNumbering.find(&MA); | ||||
1946 | assert(ThisNumberIter != BlockNumbering.end() &&((ThisNumberIter != BlockNumbering.end() && "MemoryAccess has no domination number in a valid block!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1947, __PRETTY_FUNCTION__)) | ||||
1947 | "MemoryAccess has no domination number in a valid block!")((ThisNumberIter != BlockNumbering.end() && "MemoryAccess has no domination number in a valid block!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1947, __PRETTY_FUNCTION__)); | ||||
1948 | |||||
1949 | unsigned long ThisNumber = ThisNumberIter->second; | ||||
1950 | assert(ThisNumber > LastNumber &&((ThisNumber > LastNumber && "Domination numbers should be strictly increasing!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1951, __PRETTY_FUNCTION__)) | ||||
1951 | "Domination numbers should be strictly increasing!")((ThisNumber > LastNumber && "Domination numbers should be strictly increasing!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1951, __PRETTY_FUNCTION__)); | ||||
1952 | LastNumber = ThisNumber; | ||||
1953 | } | ||||
1954 | } | ||||
1955 | |||||
1956 | assert(ValidBlocks.empty() &&((ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? static_cast<void> (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1957, __PRETTY_FUNCTION__)) | ||||
1957 | "All valid BasicBlocks should exist in F -- dangling pointers?")((ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? static_cast<void> (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1957, __PRETTY_FUNCTION__)); | ||||
1958 | #endif | ||||
1959 | } | ||||
1960 | |||||
1961 | /// Verify that the order and existence of MemoryAccesses matches the | ||||
1962 | /// order and existence of memory affecting instructions. | ||||
1963 | void MemorySSA::verifyOrdering(Function &F) const { | ||||
1964 | #ifndef NDEBUG | ||||
1965 | // Walk all the blocks, comparing what the lookups think and what the access | ||||
1966 | // lists think, as well as the order in the blocks vs the order in the access | ||||
1967 | // lists. | ||||
1968 | SmallVector<MemoryAccess *, 32> ActualAccesses; | ||||
1969 | SmallVector<MemoryAccess *, 32> ActualDefs; | ||||
1970 | for (BasicBlock &B : F) { | ||||
1971 | const AccessList *AL = getBlockAccesses(&B); | ||||
1972 | const auto *DL = getBlockDefs(&B); | ||||
1973 | MemoryAccess *Phi = getMemoryAccess(&B); | ||||
1974 | if (Phi
| ||||
1975 | ActualAccesses.push_back(Phi); | ||||
1976 | ActualDefs.push_back(Phi); | ||||
1977 | } | ||||
1978 | |||||
1979 | for (Instruction &I : B) { | ||||
1980 | MemoryAccess *MA = getMemoryAccess(&I); | ||||
1981 | assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1984, __PRETTY_FUNCTION__)) | ||||
1982 | "We have memory affecting instructions "(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1984, __PRETTY_FUNCTION__)) | ||||
1983 | "in this block but they are not in the "(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1984, __PRETTY_FUNCTION__)) | ||||
1984 | "access list or defs list")(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1984, __PRETTY_FUNCTION__)); | ||||
1985 | if (MA) { | ||||
1986 | ActualAccesses.push_back(MA); | ||||
1987 | if (isa<MemoryDef>(MA)) | ||||
1988 | ActualDefs.push_back(MA); | ||||
1989 | } | ||||
1990 | } | ||||
1991 | // Either we hit the assert, really have no accesses, or we have both | ||||
1992 | // accesses and an access list. | ||||
1993 | // Same with defs. | ||||
1994 | if (!AL && !DL) | ||||
1995 | continue; | ||||
1996 | assert(AL->size() == ActualAccesses.size() &&((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the " "access list") ? static_cast<void> (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1998, __PRETTY_FUNCTION__)) | ||||
| |||||
1997 | "We don't have the same number of accesses in the block as on the "((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the " "access list") ? static_cast<void> (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1998, __PRETTY_FUNCTION__)) | ||||
1998 | "access list")((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the " "access list") ? static_cast<void> (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 1998, __PRETTY_FUNCTION__)); | ||||
1999 | assert((DL || ActualDefs.size() == 0) &&(((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? static_cast<void> (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2000, __PRETTY_FUNCTION__)) | ||||
2000 | "Either we should have a defs list, or we should have no defs")(((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? static_cast<void> (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2000, __PRETTY_FUNCTION__)); | ||||
2001 | assert((!DL || DL->size() == ActualDefs.size()) &&(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the " "def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2003, __PRETTY_FUNCTION__)) | ||||
2002 | "We don't have the same number of defs in the block as on the "(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the " "def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2003, __PRETTY_FUNCTION__)) | ||||
2003 | "def list")(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the " "def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2003, __PRETTY_FUNCTION__)); | ||||
2004 | auto ALI = AL->begin(); | ||||
2005 | auto AAI = ActualAccesses.begin(); | ||||
2006 | while (ALI != AL->end() && AAI != ActualAccesses.end()) { | ||||
2007 | assert(&*ALI == *AAI && "Not the same accesses in the same order")((&*ALI == *AAI && "Not the same accesses in the same order" ) ? static_cast<void> (0) : __assert_fail ("&*ALI == *AAI && \"Not the same accesses in the same order\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2007, __PRETTY_FUNCTION__)); | ||||
2008 | ++ALI; | ||||
2009 | ++AAI; | ||||
2010 | } | ||||
2011 | ActualAccesses.clear(); | ||||
2012 | if (DL) { | ||||
2013 | auto DLI = DL->begin(); | ||||
2014 | auto ADI = ActualDefs.begin(); | ||||
2015 | while (DLI != DL->end() && ADI != ActualDefs.end()) { | ||||
2016 | assert(&*DLI == *ADI && "Not the same defs in the same order")((&*DLI == *ADI && "Not the same defs in the same order" ) ? static_cast<void> (0) : __assert_fail ("&*DLI == *ADI && \"Not the same defs in the same order\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2016, __PRETTY_FUNCTION__)); | ||||
2017 | ++DLI; | ||||
2018 | ++ADI; | ||||
2019 | } | ||||
2020 | } | ||||
2021 | ActualDefs.clear(); | ||||
2022 | } | ||||
2023 | #endif | ||||
2024 | } | ||||
2025 | |||||
2026 | /// Verify the domination properties of MemorySSA by checking that each | ||||
2027 | /// definition dominates all of its uses. | ||||
2028 | void MemorySSA::verifyDomination(Function &F) const { | ||||
2029 | #ifndef NDEBUG | ||||
2030 | for (BasicBlock &B : F) { | ||||
2031 | // Phi nodes are attached to basic blocks | ||||
2032 | if (MemoryPhi *MP = getMemoryAccess(&B)) | ||||
2033 | for (const Use &U : MP->uses()) | ||||
2034 | assert(dominates(MP, U) && "Memory PHI does not dominate it's uses")((dominates(MP, U) && "Memory PHI does not dominate it's uses" ) ? static_cast<void> (0) : __assert_fail ("dominates(MP, U) && \"Memory PHI does not dominate it's uses\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2034, __PRETTY_FUNCTION__)); | ||||
2035 | |||||
2036 | for (Instruction &I : B) { | ||||
2037 | MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I)); | ||||
2038 | if (!MD) | ||||
2039 | continue; | ||||
2040 | |||||
2041 | for (const Use &U : MD->uses()) | ||||
2042 | assert(dominates(MD, U) && "Memory Def does not dominate it's uses")((dominates(MD, U) && "Memory Def does not dominate it's uses" ) ? static_cast<void> (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2042, __PRETTY_FUNCTION__)); | ||||
2043 | } | ||||
2044 | } | ||||
2045 | #endif | ||||
2046 | } | ||||
2047 | |||||
2048 | /// Verify the def-use lists in MemorySSA, by verifying that \p Use | ||||
2049 | /// appears in the use list of \p Def. | ||||
2050 | void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { | ||||
2051 | #ifndef NDEBUG | ||||
2052 | // The live on entry use may cause us to get a NULL def here | ||||
2053 | if (!Def) | ||||
2054 | assert(isLiveOnEntryDef(Use) &&((isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? static_cast<void> (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2055, __PRETTY_FUNCTION__)) | ||||
2055 | "Null def but use not point to live on entry def")((isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? static_cast<void> (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2055, __PRETTY_FUNCTION__)); | ||||
2056 | else | ||||
2057 | assert(is_contained(Def->users(), Use) &&((is_contained(Def->users(), Use) && "Did not find use in def's use list" ) ? static_cast<void> (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2058, __PRETTY_FUNCTION__)) | ||||
2058 | "Did not find use in def's use list")((is_contained(Def->users(), Use) && "Did not find use in def's use list" ) ? static_cast<void> (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2058, __PRETTY_FUNCTION__)); | ||||
2059 | #endif | ||||
2060 | } | ||||
2061 | |||||
2062 | /// Verify the immediate use information, by walking all the memory | ||||
2063 | /// accesses and verifying that, for each use, it appears in the | ||||
2064 | /// appropriate def's use list | ||||
2065 | void MemorySSA::verifyDefUses(Function &F) const { | ||||
2066 | #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) | ||||
2067 | for (BasicBlock &B : F) { | ||||
2068 | // Phi nodes are attached to basic blocks | ||||
2069 | if (MemoryPhi *Phi = getMemoryAccess(&B)) { | ||||
2070 | assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(((Phi->getNumOperands() == static_cast<unsigned>(std ::distance( pred_begin(&B), pred_end(&B))) && "Incomplete MemoryPhi Node") ? static_cast<void> (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2072, __PRETTY_FUNCTION__)) | ||||
2071 | pred_begin(&B), pred_end(&B))) &&((Phi->getNumOperands() == static_cast<unsigned>(std ::distance( pred_begin(&B), pred_end(&B))) && "Incomplete MemoryPhi Node") ? static_cast<void> (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2072, __PRETTY_FUNCTION__)) | ||||
2072 | "Incomplete MemoryPhi Node")((Phi->getNumOperands() == static_cast<unsigned>(std ::distance( pred_begin(&B), pred_end(&B))) && "Incomplete MemoryPhi Node") ? static_cast<void> (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2072, __PRETTY_FUNCTION__)); | ||||
2073 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { | ||||
2074 | verifyUseInDefs(Phi->getIncomingValue(I), Phi); | ||||
2075 | assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=((find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end (&B) && "Incoming phi block not a block predecessor" ) ? static_cast<void> (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2077, __PRETTY_FUNCTION__)) | ||||
2076 | pred_end(&B) &&((find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end (&B) && "Incoming phi block not a block predecessor" ) ? static_cast<void> (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2077, __PRETTY_FUNCTION__)) | ||||
2077 | "Incoming phi block not a block predecessor")((find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end (&B) && "Incoming phi block not a block predecessor" ) ? static_cast<void> (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2077, __PRETTY_FUNCTION__)); | ||||
2078 | } | ||||
2079 | } | ||||
2080 | |||||
2081 | for (Instruction &I : B) { | ||||
2082 | if (MemoryUseOrDef *MA = getMemoryAccess(&I)) { | ||||
2083 | verifyUseInDefs(MA->getDefiningAccess(), MA); | ||||
2084 | } | ||||
2085 | } | ||||
2086 | } | ||||
2087 | #endif | ||||
2088 | } | ||||
2089 | |||||
2090 | /// Perform a local numbering on blocks so that instruction ordering can be | ||||
2091 | /// determined in constant time. | ||||
2092 | /// TODO: We currently just number in order. If we numbered by N, we could | ||||
2093 | /// allow at least N-1 sequences of insertBefore or insertAfter (and at least | ||||
2094 | /// log2(N) sequences of mixed before and after) without needing to invalidate | ||||
2095 | /// the numbering. | ||||
2096 | void MemorySSA::renumberBlock(const BasicBlock *B) const { | ||||
2097 | // The pre-increment ensures the numbers really start at 1. | ||||
2098 | unsigned long CurrentNumber = 0; | ||||
2099 | const AccessList *AL = getBlockAccesses(B); | ||||
2100 | assert(AL != nullptr && "Asking to renumber an empty block")((AL != nullptr && "Asking to renumber an empty block" ) ? static_cast<void> (0) : __assert_fail ("AL != nullptr && \"Asking to renumber an empty block\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2100, __PRETTY_FUNCTION__)); | ||||
2101 | for (const auto &I : *AL) | ||||
2102 | BlockNumbering[&I] = ++CurrentNumber; | ||||
2103 | BlockNumberingValid.insert(B); | ||||
2104 | } | ||||
2105 | |||||
2106 | /// Determine, for two memory accesses in the same block, | ||||
2107 | /// whether \p Dominator dominates \p Dominatee. | ||||
2108 | /// \returns True if \p Dominator dominates \p Dominatee. | ||||
2109 | bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, | ||||
2110 | const MemoryAccess *Dominatee) const { | ||||
2111 | const BasicBlock *DominatorBlock = Dominator->getBlock(); | ||||
2112 | |||||
2113 | assert((DominatorBlock == Dominatee->getBlock()) &&(((DominatorBlock == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!" ) ? static_cast<void> (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2114, __PRETTY_FUNCTION__)) | ||||
2114 | "Asking for local domination when accesses are in different blocks!")(((DominatorBlock == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!" ) ? static_cast<void> (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2114, __PRETTY_FUNCTION__)); | ||||
2115 | // A node dominates itself. | ||||
2116 | if (Dominatee == Dominator) | ||||
2117 | return true; | ||||
2118 | |||||
2119 | // When Dominatee is defined on function entry, it is not dominated by another | ||||
2120 | // memory access. | ||||
2121 | if (isLiveOnEntryDef(Dominatee)) | ||||
2122 | return false; | ||||
2123 | |||||
2124 | // When Dominator is defined on function entry, it dominates the other memory | ||||
2125 | // access. | ||||
2126 | if (isLiveOnEntryDef(Dominator)) | ||||
2127 | return true; | ||||
2128 | |||||
2129 | if (!BlockNumberingValid.count(DominatorBlock)) | ||||
2130 | renumberBlock(DominatorBlock); | ||||
2131 | |||||
2132 | unsigned long DominatorNum = BlockNumbering.lookup(Dominator); | ||||
2133 | // All numbers start with 1 | ||||
2134 | assert(DominatorNum != 0 && "Block was not numbered properly")((DominatorNum != 0 && "Block was not numbered properly" ) ? static_cast<void> (0) : __assert_fail ("DominatorNum != 0 && \"Block was not numbered properly\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2134, __PRETTY_FUNCTION__)); | ||||
2135 | unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); | ||||
2136 | assert(DominateeNum != 0 && "Block was not numbered properly")((DominateeNum != 0 && "Block was not numbered properly" ) ? static_cast<void> (0) : __assert_fail ("DominateeNum != 0 && \"Block was not numbered properly\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2136, __PRETTY_FUNCTION__)); | ||||
2137 | return DominatorNum < DominateeNum; | ||||
2138 | } | ||||
2139 | |||||
2140 | bool MemorySSA::dominates(const MemoryAccess *Dominator, | ||||
2141 | const MemoryAccess *Dominatee) const { | ||||
2142 | if (Dominator == Dominatee) | ||||
2143 | return true; | ||||
2144 | |||||
2145 | if (isLiveOnEntryDef(Dominatee)) | ||||
2146 | return false; | ||||
2147 | |||||
2148 | if (Dominator->getBlock() != Dominatee->getBlock()) | ||||
2149 | return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); | ||||
2150 | return locallyDominates(Dominator, Dominatee); | ||||
2151 | } | ||||
2152 | |||||
2153 | bool MemorySSA::dominates(const MemoryAccess *Dominator, | ||||
2154 | const Use &Dominatee) const { | ||||
2155 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { | ||||
2156 | BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); | ||||
2157 | // The def must dominate the incoming block of the phi. | ||||
2158 | if (UseBB != Dominator->getBlock()) | ||||
2159 | return DT->dominates(Dominator->getBlock(), UseBB); | ||||
2160 | // If the UseBB and the DefBB are the same, compare locally. | ||||
2161 | return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); | ||||
2162 | } | ||||
2163 | // If it's not a PHI node use, the normal dominates can already handle it. | ||||
2164 | return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); | ||||
2165 | } | ||||
2166 | |||||
2167 | const static char LiveOnEntryStr[] = "liveOnEntry"; | ||||
2168 | |||||
2169 | void MemoryAccess::print(raw_ostream &OS) const { | ||||
2170 | switch (getValueID()) { | ||||
2171 | case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); | ||||
2172 | case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); | ||||
2173 | case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); | ||||
2174 | } | ||||
2175 | llvm_unreachable("invalid value id")::llvm::llvm_unreachable_internal("invalid value id", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2175); | ||||
2176 | } | ||||
2177 | |||||
2178 | void MemoryDef::print(raw_ostream &OS) const { | ||||
2179 | MemoryAccess *UO = getDefiningAccess(); | ||||
2180 | |||||
2181 | auto printID = [&OS](MemoryAccess *A) { | ||||
2182 | if (A && A->getID()) | ||||
2183 | OS << A->getID(); | ||||
2184 | else | ||||
2185 | OS << LiveOnEntryStr; | ||||
2186 | }; | ||||
2187 | |||||
2188 | OS << getID() << " = MemoryDef("; | ||||
2189 | printID(UO); | ||||
2190 | OS << ")"; | ||||
2191 | |||||
2192 | if (isOptimized()) { | ||||
2193 | OS << "->"; | ||||
2194 | printID(getOptimized()); | ||||
2195 | |||||
2196 | if (Optional<AliasResult> AR = getOptimizedAccessType()) | ||||
2197 | OS << " " << *AR; | ||||
2198 | } | ||||
2199 | } | ||||
2200 | |||||
2201 | void MemoryPhi::print(raw_ostream &OS) const { | ||||
2202 | bool First = true; | ||||
2203 | OS << getID() << " = MemoryPhi("; | ||||
2204 | for (const auto &Op : operands()) { | ||||
2205 | BasicBlock *BB = getIncomingBlock(Op); | ||||
2206 | MemoryAccess *MA = cast<MemoryAccess>(Op); | ||||
2207 | if (!First) | ||||
2208 | OS << ','; | ||||
2209 | else | ||||
2210 | First = false; | ||||
2211 | |||||
2212 | OS << '{'; | ||||
2213 | if (BB->hasName()) | ||||
2214 | OS << BB->getName(); | ||||
2215 | else | ||||
2216 | BB->printAsOperand(OS, false); | ||||
2217 | OS << ','; | ||||
2218 | if (unsigned ID = MA->getID()) | ||||
2219 | OS << ID; | ||||
2220 | else | ||||
2221 | OS << LiveOnEntryStr; | ||||
2222 | OS << '}'; | ||||
2223 | } | ||||
2224 | OS << ')'; | ||||
2225 | } | ||||
2226 | |||||
2227 | void MemoryUse::print(raw_ostream &OS) const { | ||||
2228 | MemoryAccess *UO = getDefiningAccess(); | ||||
2229 | OS << "MemoryUse("; | ||||
2230 | if (UO && UO->getID()) | ||||
2231 | OS << UO->getID(); | ||||
2232 | else | ||||
2233 | OS << LiveOnEntryStr; | ||||
2234 | OS << ')'; | ||||
2235 | |||||
2236 | if (Optional<AliasResult> AR = getOptimizedAccessType()) | ||||
2237 | OS << " " << *AR; | ||||
2238 | } | ||||
2239 | |||||
2240 | void MemoryAccess::dump() const { | ||||
2241 | // Cannot completely remove virtual function even in release mode. | ||||
2242 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||
2243 | print(dbgs()); | ||||
2244 | dbgs() << "\n"; | ||||
2245 | #endif | ||||
2246 | } | ||||
2247 | |||||
2248 | char MemorySSAPrinterLegacyPass::ID = 0; | ||||
2249 | |||||
2250 | MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { | ||||
2251 | initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||
2252 | } | ||||
2253 | |||||
2254 | void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { | ||||
2255 | AU.setPreservesAll(); | ||||
2256 | AU.addRequired<MemorySSAWrapperPass>(); | ||||
2257 | } | ||||
2258 | |||||
2259 | bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { | ||||
2260 | auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); | ||||
2261 | MSSA.print(dbgs()); | ||||
2262 | if (VerifyMemorySSA) | ||||
2263 | MSSA.verifyMemorySSA(); | ||||
2264 | return false; | ||||
2265 | } | ||||
2266 | |||||
2267 | AnalysisKey MemorySSAAnalysis::Key; | ||||
2268 | |||||
2269 | MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, | ||||
2270 | FunctionAnalysisManager &AM) { | ||||
2271 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); | ||||
2272 | auto &AA = AM.getResult<AAManager>(F); | ||||
2273 | return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT)); | ||||
2274 | } | ||||
2275 | |||||
2276 | bool MemorySSAAnalysis::Result::invalidate( | ||||
2277 | Function &F, const PreservedAnalyses &PA, | ||||
2278 | FunctionAnalysisManager::Invalidator &Inv) { | ||||
2279 | auto PAC = PA.getChecker<MemorySSAAnalysis>(); | ||||
2280 | return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || | ||||
2281 | Inv.invalidate<AAManager>(F, PA) || | ||||
2282 | Inv.invalidate<DominatorTreeAnalysis>(F, PA); | ||||
2283 | } | ||||
2284 | |||||
2285 | PreservedAnalyses MemorySSAPrinterPass::run(Function &F, | ||||
2286 | FunctionAnalysisManager &AM) { | ||||
2287 | OS << "MemorySSA for function: " << F.getName() << "\n"; | ||||
2288 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS); | ||||
2289 | |||||
2290 | return PreservedAnalyses::all(); | ||||
2291 | } | ||||
2292 | |||||
2293 | PreservedAnalyses MemorySSAVerifierPass::run(Function &F, | ||||
2294 | FunctionAnalysisManager &AM) { | ||||
2295 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); | ||||
2296 | |||||
2297 | return PreservedAnalyses::all(); | ||||
2298 | } | ||||
2299 | |||||
2300 | char MemorySSAWrapperPass::ID = 0; | ||||
2301 | |||||
2302 | MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { | ||||
2303 | initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); | ||||
2304 | } | ||||
2305 | |||||
2306 | void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } | ||||
2307 | |||||
2308 | void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { | ||||
2309 | AU.setPreservesAll(); | ||||
2310 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); | ||||
2311 | AU.addRequiredTransitive<AAResultsWrapperPass>(); | ||||
2312 | } | ||||
2313 | |||||
2314 | bool MemorySSAWrapperPass::runOnFunction(Function &F) { | ||||
2315 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | ||||
2316 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); | ||||
2317 | MSSA.reset(new MemorySSA(F, &AA, &DT)); | ||||
2318 | return false; | ||||
2319 | } | ||||
2320 | |||||
2321 | void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); } | ||||
| |||||
2322 | |||||
2323 | void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { | ||||
2324 | MSSA->print(OS); | ||||
2325 | } | ||||
2326 | |||||
2327 | MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} | ||||
2328 | |||||
2329 | /// Walk the use-def chains starting at \p StartingAccess and find | ||||
2330 | /// the MemoryAccess that actually clobbers Loc. | ||||
2331 | /// | ||||
2332 | /// \returns our clobbering memory access | ||||
2333 | template <typename AliasAnalysisType> | ||||
2334 | MemoryAccess * | ||||
2335 | MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( | ||||
2336 | MemoryAccess *StartingAccess, const MemoryLocation &Loc, | ||||
2337 | unsigned &UpwardWalkLimit) { | ||||
2338 | if (isa<MemoryPhi>(StartingAccess)) | ||||
2339 | return StartingAccess; | ||||
2340 | |||||
2341 | auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); | ||||
2342 | if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) | ||||
2343 | return StartingUseOrDef; | ||||
2344 | |||||
2345 | Instruction *I = StartingUseOrDef->getMemoryInst(); | ||||
2346 | |||||
2347 | // Conservatively, fences are always clobbers, so don't perform the walk if we | ||||
2348 | // hit a fence. | ||||
2349 | if (!isa<CallBase>(I) && I->isFenceLike()) | ||||
2350 | return StartingUseOrDef; | ||||
2351 | |||||
2352 | UpwardsMemoryQuery Q; | ||||
2353 | Q.OriginalAccess = StartingUseOrDef; | ||||
2354 | Q.StartingLoc = Loc; | ||||
2355 | Q.Inst = I; | ||||
2356 | Q.IsCall = false; | ||||
2357 | |||||
2358 | // Unlike the other function, do not walk to the def of a def, because we are | ||||
2359 | // handed something we already believe is the clobbering access. | ||||
2360 | // We never set SkipSelf to true in Q in this method. | ||||
2361 | MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) | ||||
2362 | ? StartingUseOrDef->getDefiningAccess() | ||||
2363 | : StartingUseOrDef; | ||||
2364 | |||||
2365 | MemoryAccess *Clobber = | ||||
2366 | Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); | ||||
2367 | LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (false); | ||||
2368 | LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *StartingUseOrDef << "\n" ; } } while (false); | ||||
2369 | LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Final Memory SSA clobber for " << *I << " is "; } } while (false); | ||||
2370 | LLVM_DEBUG(dbgs() << *Clobber << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *Clobber << "\n"; } } while (false); | ||||
2371 | return Clobber; | ||||
2372 | } | ||||
2373 | |||||
2374 | template <typename AliasAnalysisType> | ||||
2375 | MemoryAccess * | ||||
2376 | MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( | ||||
2377 | MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) { | ||||
2378 | auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); | ||||
2379 | // If this is a MemoryPhi, we can't do anything. | ||||
2380 | if (!StartingAccess) | ||||
2381 | return MA; | ||||
2382 | |||||
2383 | bool IsOptimized = false; | ||||
2384 | |||||
2385 | // If this is an already optimized use or def, return the optimized result. | ||||
2386 | // Note: Currently, we store the optimized def result in a separate field, | ||||
2387 | // since we can't use the defining access. | ||||
2388 | if (StartingAccess->isOptimized()) { | ||||
2389 | if (!SkipSelf || !isa<MemoryDef>(StartingAccess)) | ||||
2390 | return StartingAccess->getOptimized(); | ||||
2391 | IsOptimized = true; | ||||
2392 | } | ||||
2393 | |||||
2394 | const Instruction *I = StartingAccess->getMemoryInst(); | ||||
2395 | // We can't sanely do anything with a fence, since they conservatively clobber | ||||
2396 | // all memory, and have no locations to get pointers from to try to | ||||
2397 | // disambiguate. | ||||
2398 | if (!isa<CallBase>(I) && I->isFenceLike()) | ||||
2399 | return StartingAccess; | ||||
2400 | |||||
2401 | UpwardsMemoryQuery Q(I, StartingAccess); | ||||
2402 | |||||
2403 | if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) { | ||||
2404 | MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); | ||||
2405 | StartingAccess->setOptimized(LiveOnEntry); | ||||
2406 | StartingAccess->setOptimizedAccessType(None); | ||||
2407 | return LiveOnEntry; | ||||
2408 | } | ||||
2409 | |||||
2410 | MemoryAccess *OptimizedAccess; | ||||
2411 | if (!IsOptimized) { | ||||
2412 | // Start with the thing we already think clobbers this location | ||||
2413 | MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); | ||||
2414 | |||||
2415 | // At this point, DefiningAccess may be the live on entry def. | ||||
2416 | // If it is, we will not get a better result. | ||||
2417 | if (MSSA->isLiveOnEntryDef(DefiningAccess)) { | ||||
2418 | StartingAccess->setOptimized(DefiningAccess); | ||||
2419 | StartingAccess->setOptimizedAccessType(None); | ||||
2420 | return DefiningAccess; | ||||
2421 | } | ||||
2422 | |||||
2423 | OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); | ||||
2424 | StartingAccess->setOptimized(OptimizedAccess); | ||||
2425 | if (MSSA->isLiveOnEntryDef(OptimizedAccess)) | ||||
2426 | StartingAccess->setOptimizedAccessType(None); | ||||
2427 | else if (Q.AR == MustAlias) | ||||
2428 | StartingAccess->setOptimizedAccessType(MustAlias); | ||||
2429 | } else | ||||
2430 | OptimizedAccess = StartingAccess->getOptimized(); | ||||
2431 | |||||
2432 | LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (false); | ||||
2433 | LLVM_DEBUG(dbgs() << *StartingAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *StartingAccess << "\n" ; } } while (false); | ||||
2434 | LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Optimized Memory SSA clobber for " << *I << " is "; } } while (false); | ||||
2435 | LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *OptimizedAccess << "\n" ; } } while (false); | ||||
2436 | |||||
2437 | MemoryAccess *Result; | ||||
2438 | if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) && | ||||
2439 | isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) { | ||||
2440 | assert(isa<MemoryDef>(Q.OriginalAccess))((isa<MemoryDef>(Q.OriginalAccess)) ? static_cast<void > (0) : __assert_fail ("isa<MemoryDef>(Q.OriginalAccess)" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/MemorySSA.cpp" , 2440, __PRETTY_FUNCTION__)); | ||||
2441 | Q.SkipSelfAccess = true; | ||||
2442 | Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit); | ||||
2443 | } else | ||||
2444 | Result = OptimizedAccess; | ||||
2445 | |||||
2446 | LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf; } } while (false); | ||||
2447 | LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "] for " << *I << " is " << *Result << "\n"; } } while (false); | ||||
2448 | |||||
2449 | return Result; | ||||
2450 | } | ||||
2451 | |||||
2452 | MemoryAccess * | ||||
2453 | DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { | ||||
2454 | if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) | ||||
2455 | return Use->getDefiningAccess(); | ||||
2456 | return MA; | ||||
2457 | } | ||||
2458 | |||||
2459 | MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( | ||||
2460 | MemoryAccess *StartingAccess, const MemoryLocation &) { | ||||
2461 | if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) | ||||
2462 | return Use->getDefiningAccess(); | ||||
2463 | return StartingAccess; | ||||
2464 | } | ||||
2465 | |||||
2466 | void MemoryPhi::deleteMe(DerivedUser *Self) { | ||||
2467 | delete static_cast<MemoryPhi *>(Self); | ||||
2468 | } | ||||
2469 | |||||
2470 | void MemoryDef::deleteMe(DerivedUser *Self) { | ||||
2471 | delete static_cast<MemoryDef *>(Self); | ||||
2472 | } | ||||
2473 | |||||
2474 | void MemoryUse::deleteMe(DerivedUser *Self) { | ||||
2475 | delete static_cast<MemoryUse *>(Self); | ||||
2476 | } |
1 | //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// This file exposes an interface to building/using memory SSA to |
11 | /// walk memory instructions using a use/def graph. |
12 | /// |
13 | /// Memory SSA class builds an SSA form that links together memory access |
14 | /// instructions such as loads, stores, atomics, and calls. Additionally, it |
15 | /// does a trivial form of "heap versioning" Every time the memory state changes |
16 | /// in the program, we generate a new heap version. It generates |
17 | /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. |
18 | /// |
19 | /// As a trivial example, |
20 | /// define i32 @main() #0 { |
21 | /// entry: |
22 | /// %call = call noalias i8* @_Znwm(i64 4) #2 |
23 | /// %0 = bitcast i8* %call to i32* |
24 | /// %call1 = call noalias i8* @_Znwm(i64 4) #2 |
25 | /// %1 = bitcast i8* %call1 to i32* |
26 | /// store i32 5, i32* %0, align 4 |
27 | /// store i32 7, i32* %1, align 4 |
28 | /// %2 = load i32* %0, align 4 |
29 | /// %3 = load i32* %1, align 4 |
30 | /// %add = add nsw i32 %2, %3 |
31 | /// ret i32 %add |
32 | /// } |
33 | /// |
34 | /// Will become |
35 | /// define i32 @main() #0 { |
36 | /// entry: |
37 | /// ; 1 = MemoryDef(0) |
38 | /// %call = call noalias i8* @_Znwm(i64 4) #3 |
39 | /// %2 = bitcast i8* %call to i32* |
40 | /// ; 2 = MemoryDef(1) |
41 | /// %call1 = call noalias i8* @_Znwm(i64 4) #3 |
42 | /// %4 = bitcast i8* %call1 to i32* |
43 | /// ; 3 = MemoryDef(2) |
44 | /// store i32 5, i32* %2, align 4 |
45 | /// ; 4 = MemoryDef(3) |
46 | /// store i32 7, i32* %4, align 4 |
47 | /// ; MemoryUse(3) |
48 | /// %7 = load i32* %2, align 4 |
49 | /// ; MemoryUse(4) |
50 | /// %8 = load i32* %4, align 4 |
51 | /// %add = add nsw i32 %7, %8 |
52 | /// ret i32 %add |
53 | /// } |
54 | /// |
55 | /// Given this form, all the stores that could ever effect the load at %8 can be |
56 | /// gotten by using the MemoryUse associated with it, and walking from use to |
57 | /// def until you hit the top of the function. |
58 | /// |
59 | /// Each def also has a list of users associated with it, so you can walk from |
60 | /// both def to users, and users to defs. Note that we disambiguate MemoryUses, |
61 | /// but not the RHS of MemoryDefs. You can see this above at %7, which would |
62 | /// otherwise be a MemoryUse(4). Being disambiguated means that for a given |
63 | /// store, all the MemoryUses on its use lists are may-aliases of that store |
64 | /// (but the MemoryDefs on its use list may not be). |
65 | /// |
66 | /// MemoryDefs are not disambiguated because it would require multiple reaching |
67 | /// definitions, which would require multiple phis, and multiple memoryaccesses |
68 | /// per instruction. |
69 | // |
70 | //===----------------------------------------------------------------------===// |
71 | |
72 | #ifndef LLVM_ANALYSIS_MEMORYSSA_H |
73 | #define LLVM_ANALYSIS_MEMORYSSA_H |
74 | |
75 | #include "llvm/ADT/DenseMap.h" |
76 | #include "llvm/ADT/GraphTraits.h" |
77 | #include "llvm/ADT/SmallPtrSet.h" |
78 | #include "llvm/ADT/SmallVector.h" |
79 | #include "llvm/ADT/ilist.h" |
80 | #include "llvm/ADT/ilist_node.h" |
81 | #include "llvm/ADT/iterator.h" |
82 | #include "llvm/ADT/iterator_range.h" |
83 | #include "llvm/ADT/simple_ilist.h" |
84 | #include "llvm/Analysis/AliasAnalysis.h" |
85 | #include "llvm/Analysis/MemoryLocation.h" |
86 | #include "llvm/Analysis/PHITransAddr.h" |
87 | #include "llvm/IR/BasicBlock.h" |
88 | #include "llvm/IR/DerivedUser.h" |
89 | #include "llvm/IR/Dominators.h" |
90 | #include "llvm/IR/Module.h" |
91 | #include "llvm/IR/Type.h" |
92 | #include "llvm/IR/Use.h" |
93 | #include "llvm/IR/User.h" |
94 | #include "llvm/IR/Value.h" |
95 | #include "llvm/IR/ValueHandle.h" |
96 | #include "llvm/Pass.h" |
97 | #include "llvm/Support/Casting.h" |
98 | #include <algorithm> |
99 | #include <cassert> |
100 | #include <cstddef> |
101 | #include <iterator> |
102 | #include <memory> |
103 | #include <utility> |
104 | |
105 | namespace llvm { |
106 | |
107 | /// Enables memory ssa as a dependency for loop passes. |
108 | extern cl::opt<bool> EnableMSSALoopDependency; |
109 | |
110 | class Function; |
111 | class Instruction; |
112 | class MemoryAccess; |
113 | class MemorySSAWalker; |
114 | class LLVMContext; |
115 | class raw_ostream; |
116 | |
117 | namespace MSSAHelpers { |
118 | |
119 | struct AllAccessTag {}; |
120 | struct DefsOnlyTag {}; |
121 | |
122 | } // end namespace MSSAHelpers |
123 | |
124 | enum : unsigned { |
125 | // Used to signify what the default invalid ID is for MemoryAccess's |
126 | // getID() |
127 | INVALID_MEMORYACCESS_ID = -1U |
128 | }; |
129 | |
130 | template <class T> class memoryaccess_def_iterator_base; |
131 | using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; |
132 | using const_memoryaccess_def_iterator = |
133 | memoryaccess_def_iterator_base<const MemoryAccess>; |
134 | |
135 | // The base for all memory accesses. All memory accesses in a block are |
136 | // linked together using an intrusive list. |
137 | class MemoryAccess |
138 | : public DerivedUser, |
139 | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, |
140 | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { |
141 | public: |
142 | using AllAccessType = |
143 | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
144 | using DefsOnlyType = |
145 | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
146 | |
147 | MemoryAccess(const MemoryAccess &) = delete; |
148 | MemoryAccess &operator=(const MemoryAccess &) = delete; |
149 | |
150 | void *operator new(size_t) = delete; |
151 | |
152 | // Methods for support type inquiry through isa, cast, and |
153 | // dyn_cast |
154 | static bool classof(const Value *V) { |
155 | unsigned ID = V->getValueID(); |
156 | return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; |
157 | } |
158 | |
159 | BasicBlock *getBlock() const { return Block; } |
160 | |
161 | void print(raw_ostream &OS) const; |
162 | void dump() const; |
163 | |
164 | /// The user iterators for a memory access |
165 | using iterator = user_iterator; |
166 | using const_iterator = const_user_iterator; |
167 | |
168 | /// This iterator walks over all of the defs in a given |
169 | /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For |
170 | /// MemoryUse/MemoryDef, this walks the defining access. |
171 | memoryaccess_def_iterator defs_begin(); |
172 | const_memoryaccess_def_iterator defs_begin() const; |
173 | memoryaccess_def_iterator defs_end(); |
174 | const_memoryaccess_def_iterator defs_end() const; |
175 | |
176 | /// Get the iterators for the all access list and the defs only list |
177 | /// We default to the all access list. |
178 | AllAccessType::self_iterator getIterator() { |
179 | return this->AllAccessType::getIterator(); |
180 | } |
181 | AllAccessType::const_self_iterator getIterator() const { |
182 | return this->AllAccessType::getIterator(); |
183 | } |
184 | AllAccessType::reverse_self_iterator getReverseIterator() { |
185 | return this->AllAccessType::getReverseIterator(); |
186 | } |
187 | AllAccessType::const_reverse_self_iterator getReverseIterator() const { |
188 | return this->AllAccessType::getReverseIterator(); |
189 | } |
190 | DefsOnlyType::self_iterator getDefsIterator() { |
191 | return this->DefsOnlyType::getIterator(); |
192 | } |
193 | DefsOnlyType::const_self_iterator getDefsIterator() const { |
194 | return this->DefsOnlyType::getIterator(); |
195 | } |
196 | DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { |
197 | return this->DefsOnlyType::getReverseIterator(); |
198 | } |
199 | DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { |
200 | return this->DefsOnlyType::getReverseIterator(); |
201 | } |
202 | |
203 | protected: |
204 | friend class MemoryDef; |
205 | friend class MemoryPhi; |
206 | friend class MemorySSA; |
207 | friend class MemoryUse; |
208 | friend class MemoryUseOrDef; |
209 | |
210 | /// Used by MemorySSA to change the block of a MemoryAccess when it is |
211 | /// moved. |
212 | void setBlock(BasicBlock *BB) { Block = BB; } |
213 | |
214 | /// Used for debugging and tracking things about MemoryAccesses. |
215 | /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. |
216 | inline unsigned getID() const; |
217 | |
218 | MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, |
219 | BasicBlock *BB, unsigned NumOperands) |
220 | : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), |
221 | Block(BB) {} |
222 | |
223 | // Use deleteValue() to delete a generic MemoryAccess. |
224 | ~MemoryAccess() = default; |
225 | |
226 | private: |
227 | BasicBlock *Block; |
228 | }; |
229 | |
230 | template <> |
231 | struct ilist_alloc_traits<MemoryAccess> { |
232 | static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); } |
233 | }; |
234 | |
235 | inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { |
236 | MA.print(OS); |
237 | return OS; |
238 | } |
239 | |
240 | /// Class that has the common methods + fields of memory uses/defs. It's |
241 | /// a little awkward to have, but there are many cases where we want either a |
242 | /// use or def, and there are many cases where uses are needed (defs aren't |
243 | /// acceptable), and vice-versa. |
244 | /// |
245 | /// This class should never be instantiated directly; make a MemoryUse or |
246 | /// MemoryDef instead. |
247 | class MemoryUseOrDef : public MemoryAccess { |
248 | public: |
249 | void *operator new(size_t) = delete; |
250 | |
251 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
252 | |
253 | /// Get the instruction that this MemoryUse represents. |
254 | Instruction *getMemoryInst() const { return MemoryInstruction; } |
255 | |
256 | /// Get the access that produces the memory state used by this Use. |
257 | MemoryAccess *getDefiningAccess() const { return getOperand(0); } |
258 | |
259 | static bool classof(const Value *MA) { |
260 | return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; |
261 | } |
262 | |
263 | // Sadly, these have to be public because they are needed in some of the |
264 | // iterators. |
265 | inline bool isOptimized() const; |
266 | inline MemoryAccess *getOptimized() const; |
267 | inline void setOptimized(MemoryAccess *); |
268 | |
269 | // Retrieve AliasResult type of the optimized access. Ideally this would be |
270 | // returned by the caching walker and may go away in the future. |
271 | Optional<AliasResult> getOptimizedAccessType() const { |
272 | return OptimizedAccessAlias; |
273 | } |
274 | |
275 | /// Reset the ID of what this MemoryUse was optimized to, causing it to |
276 | /// be rewalked by the walker if necessary. |
277 | /// This really should only be called by tests. |
278 | inline void resetOptimized(); |
279 | |
280 | protected: |
281 | friend class MemorySSA; |
282 | friend class MemorySSAUpdater; |
283 | |
284 | MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, |
285 | DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, |
286 | unsigned NumOperands) |
287 | : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands), |
288 | MemoryInstruction(MI), OptimizedAccessAlias(MayAlias) { |
289 | setDefiningAccess(DMA); |
290 | } |
291 | |
292 | // Use deleteValue() to delete a generic MemoryUseOrDef. |
293 | ~MemoryUseOrDef() = default; |
294 | |
295 | void setOptimizedAccessType(Optional<AliasResult> AR) { |
296 | OptimizedAccessAlias = AR; |
297 | } |
298 | |
299 | void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false, |
300 | Optional<AliasResult> AR = MayAlias) { |
301 | if (!Optimized) { |
302 | setOperand(0, DMA); |
303 | return; |
304 | } |
305 | setOptimized(DMA); |
306 | setOptimizedAccessType(AR); |
307 | } |
308 | |
309 | private: |
310 | Instruction *MemoryInstruction; |
311 | Optional<AliasResult> OptimizedAccessAlias; |
312 | }; |
313 | |
314 | /// Represents read-only accesses to memory |
315 | /// |
316 | /// In particular, the set of Instructions that will be represented by |
317 | /// MemoryUse's is exactly the set of Instructions for which |
318 | /// AliasAnalysis::getModRefInfo returns "Ref". |
319 | class MemoryUse final : public MemoryUseOrDef { |
320 | public: |
321 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
322 | |
323 | MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) |
324 | : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, |
325 | /*NumOperands=*/1) {} |
326 | |
327 | // allocate space for exactly one operand |
328 | void *operator new(size_t s) { return User::operator new(s, 1); } |
329 | |
330 | static bool classof(const Value *MA) { |
331 | return MA->getValueID() == MemoryUseVal; |
332 | } |
333 | |
334 | void print(raw_ostream &OS) const; |
335 | |
336 | void setOptimized(MemoryAccess *DMA) { |
337 | OptimizedID = DMA->getID(); |
338 | setOperand(0, DMA); |
339 | } |
340 | |
341 | bool isOptimized() const { |
342 | return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); |
343 | } |
344 | |
345 | MemoryAccess *getOptimized() const { |
346 | return getDefiningAccess(); |
347 | } |
348 | |
349 | void resetOptimized() { |
350 | OptimizedID = INVALID_MEMORYACCESS_ID; |
351 | } |
352 | |
353 | protected: |
354 | friend class MemorySSA; |
355 | |
356 | private: |
357 | static void deleteMe(DerivedUser *Self); |
358 | |
359 | unsigned OptimizedID = INVALID_MEMORYACCESS_ID; |
360 | }; |
361 | |
362 | template <> |
363 | struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; |
364 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)MemoryUse::op_iterator MemoryUse::op_begin() { return OperandTraits <MemoryUse>::op_begin(this); } MemoryUse::const_op_iterator MemoryUse::op_begin() const { return OperandTraits<MemoryUse >::op_begin(const_cast<MemoryUse*>(this)); } MemoryUse ::op_iterator MemoryUse::op_end() { return OperandTraits<MemoryUse >::op_end(this); } MemoryUse::const_op_iterator MemoryUse:: op_end() const { return OperandTraits<MemoryUse>::op_end (const_cast<MemoryUse*>(this)); } MemoryAccess *MemoryUse ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<MemoryUse>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 364, __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess >( OperandTraits<MemoryUse>::op_begin(const_cast< MemoryUse*>(this))[i_nocapture].get()); } void MemoryUse:: setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture) { ((i_nocapture < OperandTraits<MemoryUse>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 364, __PRETTY_FUNCTION__)); OperandTraits<MemoryUse>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryUse ::getNumOperands() const { return OperandTraits<MemoryUse> ::operands(this); } template <int Idx_nocapture> Use & MemoryUse::Op() { return this->OpFrom<Idx_nocapture> (this); } template <int Idx_nocapture> const Use &MemoryUse ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
365 | |
366 | /// Represents a read-write access to memory, whether it is a must-alias, |
367 | /// or a may-alias. |
368 | /// |
369 | /// In particular, the set of Instructions that will be represented by |
370 | /// MemoryDef's is exactly the set of Instructions for which |
371 | /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". |
372 | /// Note that, in order to provide def-def chains, all defs also have a use |
373 | /// associated with them. This use points to the nearest reaching |
374 | /// MemoryDef/MemoryPhi. |
375 | class MemoryDef final : public MemoryUseOrDef { |
376 | public: |
377 | friend class MemorySSA; |
378 | |
379 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
380 | |
381 | MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, |
382 | unsigned Ver) |
383 | : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, |
384 | /*NumOperands=*/2), |
385 | ID(Ver) {} |
386 | |
387 | // allocate space for exactly two operands |
388 | void *operator new(size_t s) { return User::operator new(s, 2); } |
389 | |
390 | static bool classof(const Value *MA) { |
391 | return MA->getValueID() == MemoryDefVal; |
392 | } |
393 | |
394 | void setOptimized(MemoryAccess *MA) { |
395 | setOperand(1, MA); |
396 | OptimizedID = MA->getID(); |
397 | } |
398 | |
399 | MemoryAccess *getOptimized() const { |
400 | return cast_or_null<MemoryAccess>(getOperand(1)); |
401 | } |
402 | |
403 | bool isOptimized() const { |
404 | return getOptimized() && OptimizedID == getOptimized()->getID(); |
405 | } |
406 | |
407 | void resetOptimized() { |
408 | OptimizedID = INVALID_MEMORYACCESS_ID; |
409 | setOperand(1, nullptr); |
410 | } |
411 | |
412 | void print(raw_ostream &OS) const; |
413 | |
414 | unsigned getID() const { return ID; } |
415 | |
416 | private: |
417 | static void deleteMe(DerivedUser *Self); |
418 | |
419 | const unsigned ID; |
420 | unsigned OptimizedID = INVALID_MEMORYACCESS_ID; |
421 | }; |
422 | |
423 | template <> |
424 | struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {}; |
425 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)MemoryDef::op_iterator MemoryDef::op_begin() { return OperandTraits <MemoryDef>::op_begin(this); } MemoryDef::const_op_iterator MemoryDef::op_begin() const { return OperandTraits<MemoryDef >::op_begin(const_cast<MemoryDef*>(this)); } MemoryDef ::op_iterator MemoryDef::op_end() { return OperandTraits<MemoryDef >::op_end(this); } MemoryDef::const_op_iterator MemoryDef:: op_end() const { return OperandTraits<MemoryDef>::op_end (const_cast<MemoryDef*>(this)); } MemoryAccess *MemoryDef ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<MemoryDef>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 425, __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess >( OperandTraits<MemoryDef>::op_begin(const_cast< MemoryDef*>(this))[i_nocapture].get()); } void MemoryDef:: setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture) { ((i_nocapture < OperandTraits<MemoryDef>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 425, __PRETTY_FUNCTION__)); OperandTraits<MemoryDef>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryDef ::getNumOperands() const { return OperandTraits<MemoryDef> ::operands(this); } template <int Idx_nocapture> Use & MemoryDef::Op() { return this->OpFrom<Idx_nocapture> (this); } template <int Idx_nocapture> const Use &MemoryDef ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
426 | |
427 | template <> |
428 | struct OperandTraits<MemoryUseOrDef> { |
429 | static Use *op_begin(MemoryUseOrDef *MUD) { |
430 | if (auto *MU = dyn_cast<MemoryUse>(MUD)) |
431 | return OperandTraits<MemoryUse>::op_begin(MU); |
432 | return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD)); |
433 | } |
434 | |
435 | static Use *op_end(MemoryUseOrDef *MUD) { |
436 | if (auto *MU = dyn_cast<MemoryUse>(MUD)) |
437 | return OperandTraits<MemoryUse>::op_end(MU); |
438 | return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD)); |
439 | } |
440 | |
441 | static unsigned operands(const MemoryUseOrDef *MUD) { |
442 | if (const auto *MU = dyn_cast<MemoryUse>(MUD)) |
443 | return OperandTraits<MemoryUse>::operands(MU); |
444 | return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD)); |
445 | } |
446 | }; |
447 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)MemoryUseOrDef::op_iterator MemoryUseOrDef::op_begin() { return OperandTraits<MemoryUseOrDef>::op_begin(this); } MemoryUseOrDef ::const_op_iterator MemoryUseOrDef::op_begin() const { return OperandTraits<MemoryUseOrDef>::op_begin(const_cast< MemoryUseOrDef*>(this)); } MemoryUseOrDef::op_iterator MemoryUseOrDef ::op_end() { return OperandTraits<MemoryUseOrDef>::op_end (this); } MemoryUseOrDef::const_op_iterator MemoryUseOrDef::op_end () const { return OperandTraits<MemoryUseOrDef>::op_end (const_cast<MemoryUseOrDef*>(this)); } MemoryAccess *MemoryUseOrDef ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 447, __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess >( OperandTraits<MemoryUseOrDef>::op_begin(const_cast <MemoryUseOrDef*>(this))[i_nocapture].get()); } void MemoryUseOrDef ::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture ) { ((i_nocapture < OperandTraits<MemoryUseOrDef>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 447, __PRETTY_FUNCTION__)); OperandTraits<MemoryUseOrDef >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryUseOrDef::getNumOperands() const { return OperandTraits <MemoryUseOrDef>::operands(this); } template <int Idx_nocapture > Use &MemoryUseOrDef::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &MemoryUseOrDef::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
448 | |
449 | /// Represents phi nodes for memory accesses. |
450 | /// |
451 | /// These have the same semantic as regular phi nodes, with the exception that |
452 | /// only one phi will ever exist in a given basic block. |
453 | /// Guaranteeing one phi per block means guaranteeing there is only ever one |
454 | /// valid reaching MemoryDef/MemoryPHI along each path to the phi node. |
455 | /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or |
456 | /// a MemoryPhi's operands. |
457 | /// That is, given |
458 | /// if (a) { |
459 | /// store %a |
460 | /// store %b |
461 | /// } |
462 | /// it *must* be transformed into |
463 | /// if (a) { |
464 | /// 1 = MemoryDef(liveOnEntry) |
465 | /// store %a |
466 | /// 2 = MemoryDef(1) |
467 | /// store %b |
468 | /// } |
469 | /// and *not* |
470 | /// if (a) { |
471 | /// 1 = MemoryDef(liveOnEntry) |
472 | /// store %a |
473 | /// 2 = MemoryDef(liveOnEntry) |
474 | /// store %b |
475 | /// } |
476 | /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the |
477 | /// end of the branch, and if there are not two phi nodes, one will be |
478 | /// disconnected completely from the SSA graph below that point. |
479 | /// Because MemoryUse's do not generate new definitions, they do not have this |
480 | /// issue. |
481 | class MemoryPhi final : public MemoryAccess { |
482 | // allocate space for exactly zero operands |
483 | void *operator new(size_t s) { return User::operator new(s); } |
484 | |
485 | public: |
486 | /// Provide fast operand accessors |
487 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
488 | |
489 | MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) |
490 | : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), |
491 | ReservedSpace(NumPreds) { |
492 | allocHungoffUses(ReservedSpace); |
493 | } |
494 | |
495 | // Block iterator interface. This provides access to the list of incoming |
496 | // basic blocks, which parallels the list of incoming values. |
497 | using block_iterator = BasicBlock **; |
498 | using const_block_iterator = BasicBlock *const *; |
499 | |
500 | block_iterator block_begin() { |
501 | auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace); |
502 | return reinterpret_cast<block_iterator>(Ref + 1); |
503 | } |
504 | |
505 | const_block_iterator block_begin() const { |
506 | const auto *Ref = |
507 | reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace); |
508 | return reinterpret_cast<const_block_iterator>(Ref + 1); |
509 | } |
510 | |
511 | block_iterator block_end() { return block_begin() + getNumOperands(); } |
512 | |
513 | const_block_iterator block_end() const { |
514 | return block_begin() + getNumOperands(); |
515 | } |
516 | |
517 | iterator_range<block_iterator> blocks() { |
518 | return make_range(block_begin(), block_end()); |
519 | } |
520 | |
521 | iterator_range<const_block_iterator> blocks() const { |
522 | return make_range(block_begin(), block_end()); |
523 | } |
524 | |
525 | op_range incoming_values() { return operands(); } |
526 | |
527 | const_op_range incoming_values() const { return operands(); } |
528 | |
529 | /// Return the number of incoming edges |
530 | unsigned getNumIncomingValues() const { return getNumOperands(); } |
531 | |
532 | /// Return incoming value number x |
533 | MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } |
534 | void setIncomingValue(unsigned I, MemoryAccess *V) { |
535 | assert(V && "PHI node got a null value!")((V && "PHI node got a null value!") ? static_cast< void> (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 535, __PRETTY_FUNCTION__)); |
536 | setOperand(I, V); |
537 | } |
538 | |
539 | static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } |
540 | static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } |
541 | |
542 | /// Return incoming basic block number @p i. |
543 | BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } |
544 | |
545 | /// Return incoming basic block corresponding |
546 | /// to an operand of the PHI. |
547 | BasicBlock *getIncomingBlock(const Use &U) const { |
548 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")((this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? static_cast<void> (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 548, __PRETTY_FUNCTION__)); |
549 | return getIncomingBlock(unsigned(&U - op_begin())); |
550 | } |
551 | |
552 | /// Return incoming basic block corresponding |
553 | /// to value use iterator. |
554 | BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { |
555 | return getIncomingBlock(I.getUse()); |
556 | } |
557 | |
558 | void setIncomingBlock(unsigned I, BasicBlock *BB) { |
559 | assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast <void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 559, __PRETTY_FUNCTION__)); |
560 | block_begin()[I] = BB; |
561 | } |
562 | |
563 | /// Add an incoming value to the end of the PHI list |
564 | void addIncoming(MemoryAccess *V, BasicBlock *BB) { |
565 | if (getNumOperands() == ReservedSpace) |
566 | growOperands(); // Get more space! |
567 | // Initialize some new operands. |
568 | setNumHungOffUseOperands(getNumOperands() + 1); |
569 | setIncomingValue(getNumOperands() - 1, V); |
570 | setIncomingBlock(getNumOperands() - 1, BB); |
571 | } |
572 | |
573 | /// Return the first index of the specified basic |
574 | /// block in the value list for this PHI. Returns -1 if no instance. |
575 | int getBasicBlockIndex(const BasicBlock *BB) const { |
576 | for (unsigned I = 0, E = getNumOperands(); I != E; ++I) |
577 | if (block_begin()[I] == BB) |
578 | return I; |
579 | return -1; |
580 | } |
581 | |
582 | MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const { |
583 | int Idx = getBasicBlockIndex(BB); |
584 | assert(Idx >= 0 && "Invalid basic block argument!")((Idx >= 0 && "Invalid basic block argument!") ? static_cast <void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 584, __PRETTY_FUNCTION__)); |
585 | return getIncomingValue(Idx); |
586 | } |
587 | |
588 | // After deleting incoming position I, the order of incoming may be changed. |
589 | void unorderedDeleteIncoming(unsigned I) { |
590 | unsigned E = getNumOperands(); |
591 | assert(I < E && "Cannot remove out of bounds Phi entry.")((I < E && "Cannot remove out of bounds Phi entry." ) ? static_cast<void> (0) : __assert_fail ("I < E && \"Cannot remove out of bounds Phi entry.\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 591, __PRETTY_FUNCTION__)); |
592 | // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi |
593 | // itself should be deleted. |
594 | assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "((E >= 2 && "Cannot only remove incoming values in MemoryPhis with " "at least 2 values.") ? static_cast<void> (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 595, __PRETTY_FUNCTION__)) |
595 | "at least 2 values.")((E >= 2 && "Cannot only remove incoming values in MemoryPhis with " "at least 2 values.") ? static_cast<void> (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 595, __PRETTY_FUNCTION__)); |
596 | setIncomingValue(I, getIncomingValue(E - 1)); |
597 | setIncomingBlock(I, block_begin()[E - 1]); |
598 | setOperand(E - 1, nullptr); |
599 | block_begin()[E - 1] = nullptr; |
600 | setNumHungOffUseOperands(getNumOperands() - 1); |
601 | } |
602 | |
603 | // After deleting entries that satisfy Pred, remaining entries may have |
604 | // changed order. |
605 | template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) { |
606 | for (unsigned I = 0, E = getNumOperands(); I != E; ++I) |
607 | if (Pred(getIncomingValue(I), getIncomingBlock(I))) { |
608 | unorderedDeleteIncoming(I); |
609 | E = getNumOperands(); |
610 | --I; |
611 | } |
612 | assert(getNumOperands() >= 1 &&((getNumOperands() >= 1 && "Cannot remove all incoming blocks in a MemoryPhi." ) ? static_cast<void> (0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 613, __PRETTY_FUNCTION__)) |
613 | "Cannot remove all incoming blocks in a MemoryPhi.")((getNumOperands() >= 1 && "Cannot remove all incoming blocks in a MemoryPhi." ) ? static_cast<void> (0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 613, __PRETTY_FUNCTION__)); |
614 | } |
615 | |
616 | // After deleting incoming block BB, the incoming blocks order may be changed. |
617 | void unorderedDeleteIncomingBlock(const BasicBlock *BB) { |
618 | unorderedDeleteIncomingIf( |
619 | [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; }); |
620 | } |
621 | |
622 | // After deleting incoming memory access MA, the incoming accesses order may |
623 | // be changed. |
624 | void unorderedDeleteIncomingValue(const MemoryAccess *MA) { |
625 | unorderedDeleteIncomingIf( |
626 | [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; }); |
627 | } |
628 | |
629 | static bool classof(const Value *V) { |
630 | return V->getValueID() == MemoryPhiVal; |
631 | } |
632 | |
633 | void print(raw_ostream &OS) const; |
634 | |
635 | unsigned getID() const { return ID; } |
636 | |
637 | protected: |
638 | friend class MemorySSA; |
639 | |
640 | /// this is more complicated than the generic |
641 | /// User::allocHungoffUses, because we have to allocate Uses for the incoming |
642 | /// values and pointers to the incoming blocks, all in one allocation. |
643 | void allocHungoffUses(unsigned N) { |
644 | User::allocHungoffUses(N, /* IsPhi */ true); |
645 | } |
646 | |
647 | private: |
648 | // For debugging only |
649 | const unsigned ID; |
650 | unsigned ReservedSpace; |
651 | |
652 | /// This grows the operand list in response to a push_back style of |
653 | /// operation. This grows the number of ops by 1.5 times. |
654 | void growOperands() { |
655 | unsigned E = getNumOperands(); |
656 | // 2 op PHI nodes are VERY common, so reserve at least enough for that. |
657 | ReservedSpace = std::max(E + E / 2, 2u); |
658 | growHungoffUses(ReservedSpace, /* IsPhi */ true); |
659 | } |
660 | |
661 | static void deleteMe(DerivedUser *Self); |
662 | }; |
663 | |
664 | inline unsigned MemoryAccess::getID() const { |
665 | assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&(((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && "only memory defs and phis have ids") ? static_cast<void> (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 666, __PRETTY_FUNCTION__)) |
666 | "only memory defs and phis have ids")(((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && "only memory defs and phis have ids") ? static_cast<void> (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 666, __PRETTY_FUNCTION__)); |
667 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
668 | return MD->getID(); |
669 | return cast<MemoryPhi>(this)->getID(); |
670 | } |
671 | |
672 | inline bool MemoryUseOrDef::isOptimized() const { |
673 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
674 | return MD->isOptimized(); |
675 | return cast<MemoryUse>(this)->isOptimized(); |
676 | } |
677 | |
678 | inline MemoryAccess *MemoryUseOrDef::getOptimized() const { |
679 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
680 | return MD->getOptimized(); |
681 | return cast<MemoryUse>(this)->getOptimized(); |
682 | } |
683 | |
684 | inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { |
685 | if (auto *MD = dyn_cast<MemoryDef>(this)) |
686 | MD->setOptimized(MA); |
687 | else |
688 | cast<MemoryUse>(this)->setOptimized(MA); |
689 | } |
690 | |
691 | inline void MemoryUseOrDef::resetOptimized() { |
692 | if (auto *MD = dyn_cast<MemoryDef>(this)) |
693 | MD->resetOptimized(); |
694 | else |
695 | cast<MemoryUse>(this)->resetOptimized(); |
696 | } |
697 | |
698 | template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; |
699 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)MemoryPhi::op_iterator MemoryPhi::op_begin() { return OperandTraits <MemoryPhi>::op_begin(this); } MemoryPhi::const_op_iterator MemoryPhi::op_begin() const { return OperandTraits<MemoryPhi >::op_begin(const_cast<MemoryPhi*>(this)); } MemoryPhi ::op_iterator MemoryPhi::op_end() { return OperandTraits<MemoryPhi >::op_end(this); } MemoryPhi::const_op_iterator MemoryPhi:: op_end() const { return OperandTraits<MemoryPhi>::op_end (const_cast<MemoryPhi*>(this)); } MemoryAccess *MemoryPhi ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<MemoryPhi>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 699, __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess >( OperandTraits<MemoryPhi>::op_begin(const_cast< MemoryPhi*>(this))[i_nocapture].get()); } void MemoryPhi:: setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture) { ((i_nocapture < OperandTraits<MemoryPhi>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 699, __PRETTY_FUNCTION__)); OperandTraits<MemoryPhi>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryPhi ::getNumOperands() const { return OperandTraits<MemoryPhi> ::operands(this); } template <int Idx_nocapture> Use & MemoryPhi::Op() { return this->OpFrom<Idx_nocapture> (this); } template <int Idx_nocapture> const Use &MemoryPhi ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
700 | |
701 | /// Encapsulates MemorySSA, including all data associated with memory |
702 | /// accesses. |
703 | class MemorySSA { |
704 | public: |
705 | MemorySSA(Function &, AliasAnalysis *, DominatorTree *); |
706 | |
707 | // MemorySSA must remain where it's constructed; Walkers it creates store |
708 | // pointers to it. |
709 | MemorySSA(MemorySSA &&) = delete; |
710 | |
711 | ~MemorySSA(); |
712 | |
713 | MemorySSAWalker *getWalker(); |
714 | MemorySSAWalker *getSkipSelfWalker(); |
715 | |
716 | /// Given a memory Mod/Ref'ing instruction, get the MemorySSA |
717 | /// access associated with it. If passed a basic block gets the memory phi |
718 | /// node that exists for that block, if there is one. Otherwise, this will get |
719 | /// a MemoryUseOrDef. |
720 | MemoryUseOrDef *getMemoryAccess(const Instruction *I) const { |
721 | return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); |
722 | } |
723 | |
724 | MemoryPhi *getMemoryAccess(const BasicBlock *BB) const { |
725 | return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); |
726 | } |
727 | |
728 | void dump() const; |
729 | void print(raw_ostream &) const; |
730 | |
731 | /// Return true if \p MA represents the live on entry value |
732 | /// |
733 | /// Loads and stores from pointer arguments and other global values may be |
734 | /// defined by memory operations that do not occur in the current function, so |
735 | /// they may be live on entry to the function. MemorySSA represents such |
736 | /// memory state by the live on entry definition, which is guaranteed to occur |
737 | /// before any other memory access in the function. |
738 | inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { |
739 | return MA == LiveOnEntryDef.get(); |
740 | } |
741 | |
742 | inline MemoryAccess *getLiveOnEntryDef() const { |
743 | return LiveOnEntryDef.get(); |
744 | } |
745 | |
746 | // Sadly, iplists, by default, owns and deletes pointers added to the |
747 | // list. It's not currently possible to have two iplists for the same type, |
748 | // where one owns the pointers, and one does not. This is because the traits |
749 | // are per-type, not per-tag. If this ever changes, we should make the |
750 | // DefList an iplist. |
751 | using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
752 | using DefsList = |
753 | simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
754 | |
755 | /// Return the list of MemoryAccess's for a given basic block. |
756 | /// |
757 | /// This list is not modifiable by the user. |
758 | const AccessList *getBlockAccesses(const BasicBlock *BB) const { |
759 | return getWritableBlockAccesses(BB); |
760 | } |
761 | |
762 | /// Return the list of MemoryDef's and MemoryPhi's for a given basic |
763 | /// block. |
764 | /// |
765 | /// This list is not modifiable by the user. |
766 | const DefsList *getBlockDefs(const BasicBlock *BB) const { |
767 | return getWritableBlockDefs(BB); |
768 | } |
769 | |
770 | /// Given two memory accesses in the same basic block, determine |
771 | /// whether MemoryAccess \p A dominates MemoryAccess \p B. |
772 | bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; |
773 | |
774 | /// Given two memory accesses in potentially different blocks, |
775 | /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. |
776 | bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; |
777 | |
778 | /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A |
779 | /// dominates Use \p B. |
780 | bool dominates(const MemoryAccess *A, const Use &B) const; |
781 | |
782 | /// Verify that MemorySSA is self consistent (IE definitions dominate |
783 | /// all uses, uses appear in the right places). This is used by unit tests. |
784 | void verifyMemorySSA() const; |
785 | |
786 | /// Used in various insertion functions to specify whether we are talking |
787 | /// about the beginning or end of a block. |
788 | enum InsertionPlace { Beginning, End }; |
789 | |
790 | protected: |
791 | // Used by Memory SSA annotater, dumpers, and wrapper pass |
792 | friend class MemorySSAAnnotatedWriter; |
793 | friend class MemorySSAPrinterLegacyPass; |
794 | friend class MemorySSAUpdater; |
795 | |
796 | void verifyPrevDefInPhis(Function &F) const; |
797 | void verifyDefUses(Function &F) const; |
798 | void verifyDomination(Function &F) const; |
799 | void verifyOrdering(Function &F) const; |
800 | void verifyDominationNumbers(const Function &F) const; |
801 | |
802 | // This is used by the use optimizer and updater. |
803 | AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { |
804 | auto It = PerBlockAccesses.find(BB); |
805 | return It == PerBlockAccesses.end() ? nullptr : It->second.get(); |
806 | } |
807 | |
808 | // This is used by the use optimizer and updater. |
809 | DefsList *getWritableBlockDefs(const BasicBlock *BB) const { |
810 | auto It = PerBlockDefs.find(BB); |
811 | return It == PerBlockDefs.end() ? nullptr : It->second.get(); |
812 | } |
813 | |
814 | // These is used by the updater to perform various internal MemorySSA |
815 | // machinsations. They do not always leave the IR in a correct state, and |
816 | // relies on the updater to fixup what it breaks, so it is not public. |
817 | |
818 | void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); |
819 | void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point); |
820 | |
821 | // Rename the dominator tree branch rooted at BB. |
822 | void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, |
823 | SmallPtrSetImpl<BasicBlock *> &Visited) { |
824 | renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); |
825 | } |
826 | |
827 | void removeFromLookups(MemoryAccess *); |
828 | void removeFromLists(MemoryAccess *, bool ShouldDelete = true); |
829 | void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, |
830 | InsertionPlace); |
831 | void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, |
832 | AccessList::iterator); |
833 | MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *, |
834 | const MemoryUseOrDef *Template = nullptr, |
835 | bool CreationMustSucceed = true); |
836 | |
837 | private: |
838 | template <class AliasAnalysisType> class ClobberWalkerBase; |
839 | template <class AliasAnalysisType> class CachingWalker; |
840 | template <class AliasAnalysisType> class SkipSelfWalker; |
841 | class OptimizeUses; |
842 | |
843 | CachingWalker<AliasAnalysis> *getWalkerImpl(); |
844 | void buildMemorySSA(BatchAAResults &BAA); |
845 | void optimizeUses(); |
846 | |
847 | void prepareForMoveTo(MemoryAccess *, BasicBlock *); |
848 | void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; |
849 | |
850 | using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; |
851 | using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; |
852 | |
853 | void |
854 | determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks); |
855 | void markUnreachableAsLiveOnEntry(BasicBlock *BB); |
856 | bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const; |
857 | MemoryPhi *createMemoryPhi(BasicBlock *BB); |
858 | template <typename AliasAnalysisType> |
859 | MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *, |
860 | const MemoryUseOrDef *Template = nullptr); |
861 | MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace); |
862 | void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &); |
863 | MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); |
864 | void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); |
865 | void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, |
866 | SmallPtrSetImpl<BasicBlock *> &Visited, |
867 | bool SkipVisited = false, bool RenameAllUses = false); |
868 | AccessList *getOrCreateAccessList(const BasicBlock *); |
869 | DefsList *getOrCreateDefsList(const BasicBlock *); |
870 | void renumberBlock(const BasicBlock *) const; |
871 | AliasAnalysis *AA; |
872 | DominatorTree *DT; |
873 | Function &F; |
874 | |
875 | // Memory SSA mappings |
876 | DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; |
877 | |
878 | // These two mappings contain the main block to access/def mappings for |
879 | // MemorySSA. The list contained in PerBlockAccesses really owns all the |
880 | // MemoryAccesses. |
881 | // Both maps maintain the invariant that if a block is found in them, the |
882 | // corresponding list is not empty, and if a block is not found in them, the |
883 | // corresponding list is empty. |
884 | AccessMap PerBlockAccesses; |
885 | DefsMap PerBlockDefs; |
886 | std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef; |
887 | |
888 | // Domination mappings |
889 | // Note that the numbering is local to a block, even though the map is |
890 | // global. |
891 | mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; |
892 | mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; |
893 | |
894 | // Memory SSA building info |
895 | std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase; |
896 | std::unique_ptr<CachingWalker<AliasAnalysis>> Walker; |
897 | std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker; |
898 | unsigned NextID; |
899 | }; |
900 | |
901 | // Internal MemorySSA utils, for use by MemorySSA classes and walkers |
902 | class MemorySSAUtil { |
903 | protected: |
904 | friend class GVNHoist; |
905 | friend class MemorySSAWalker; |
906 | |
907 | // This function should not be used by new passes. |
908 | static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, |
909 | AliasAnalysis &AA); |
910 | }; |
911 | |
912 | // This pass does eager building and then printing of MemorySSA. It is used by |
913 | // the tests to be able to build, dump, and verify Memory SSA. |
914 | class MemorySSAPrinterLegacyPass : public FunctionPass { |
915 | public: |
916 | MemorySSAPrinterLegacyPass(); |
917 | |
918 | bool runOnFunction(Function &) override; |
919 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
920 | |
921 | static char ID; |
922 | }; |
923 | |
924 | /// An analysis that produces \c MemorySSA for a function. |
925 | /// |
926 | class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { |
927 | friend AnalysisInfoMixin<MemorySSAAnalysis>; |
928 | |
929 | static AnalysisKey Key; |
930 | |
931 | public: |
932 | // Wrap MemorySSA result to ensure address stability of internal MemorySSA |
933 | // pointers after construction. Use a wrapper class instead of plain |
934 | // unique_ptr<MemorySSA> to avoid build breakage on MSVC. |
935 | struct Result { |
936 | Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} |
937 | |
938 | MemorySSA &getMSSA() { return *MSSA.get(); } |
939 | |
940 | std::unique_ptr<MemorySSA> MSSA; |
941 | |
942 | bool invalidate(Function &F, const PreservedAnalyses &PA, |
943 | FunctionAnalysisManager::Invalidator &Inv); |
944 | }; |
945 | |
946 | Result run(Function &F, FunctionAnalysisManager &AM); |
947 | }; |
948 | |
949 | /// Printer pass for \c MemorySSA. |
950 | class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { |
951 | raw_ostream &OS; |
952 | |
953 | public: |
954 | explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {} |
955 | |
956 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
957 | }; |
958 | |
959 | /// Verifier pass for \c MemorySSA. |
960 | struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { |
961 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
962 | }; |
963 | |
964 | /// Legacy analysis pass which computes \c MemorySSA. |
965 | class MemorySSAWrapperPass : public FunctionPass { |
966 | public: |
967 | MemorySSAWrapperPass(); |
968 | |
969 | static char ID; |
970 | |
971 | bool runOnFunction(Function &) override; |
972 | void releaseMemory() override; |
973 | MemorySSA &getMSSA() { return *MSSA; } |
974 | const MemorySSA &getMSSA() const { return *MSSA; } |
975 | |
976 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
977 | |
978 | void verifyAnalysis() const override; |
979 | void print(raw_ostream &OS, const Module *M = nullptr) const override; |
980 | |
981 | private: |
982 | std::unique_ptr<MemorySSA> MSSA; |
983 | }; |
984 | |
985 | /// This is the generic walker interface for walkers of MemorySSA. |
986 | /// Walkers are used to be able to further disambiguate the def-use chains |
987 | /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives |
988 | /// you. |
989 | /// In particular, while the def-use chains provide basic information, and are |
990 | /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a |
991 | /// MemoryUse as AliasAnalysis considers it, a user mant want better or other |
992 | /// information. In particular, they may want to use SCEV info to further |
993 | /// disambiguate memory accesses, or they may want the nearest dominating |
994 | /// may-aliasing MemoryDef for a call or a store. This API enables a |
995 | /// standardized interface to getting and using that info. |
996 | class MemorySSAWalker { |
997 | public: |
998 | MemorySSAWalker(MemorySSA *); |
999 | virtual ~MemorySSAWalker() = default; |
1000 | |
1001 | using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; |
1002 | |
1003 | /// Given a memory Mod/Ref/ModRef'ing instruction, calling this |
1004 | /// will give you the nearest dominating MemoryAccess that Mod's the location |
1005 | /// the instruction accesses (by skipping any def which AA can prove does not |
1006 | /// alias the location(s) accessed by the instruction given). |
1007 | /// |
1008 | /// Note that this will return a single access, and it must dominate the |
1009 | /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, |
1010 | /// this will return the MemoryPhi, not the operand. This means that |
1011 | /// given: |
1012 | /// if (a) { |
1013 | /// 1 = MemoryDef(liveOnEntry) |
1014 | /// store %a |
1015 | /// } else { |
1016 | /// 2 = MemoryDef(liveOnEntry) |
1017 | /// store %b |
1018 | /// } |
1019 | /// 3 = MemoryPhi(2, 1) |
1020 | /// MemoryUse(3) |
1021 | /// load %a |
1022 | /// |
1023 | /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef |
1024 | /// in the if (a) branch. |
1025 | MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { |
1026 | MemoryAccess *MA = MSSA->getMemoryAccess(I); |
1027 | assert(MA && "Handed an instruction that MemorySSA doesn't recognize?")((MA && "Handed an instruction that MemorySSA doesn't recognize?" ) ? static_cast<void> (0) : __assert_fail ("MA && \"Handed an instruction that MemorySSA doesn't recognize?\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 1027, __PRETTY_FUNCTION__)); |
1028 | return getClobberingMemoryAccess(MA); |
1029 | } |
1030 | |
1031 | /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), |
1032 | /// but takes a MemoryAccess instead of an Instruction. |
1033 | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0; |
1034 | |
1035 | /// Given a potentially clobbering memory access and a new location, |
1036 | /// calling this will give you the nearest dominating clobbering MemoryAccess |
1037 | /// (by skipping non-aliasing def links). |
1038 | /// |
1039 | /// This version of the function is mainly used to disambiguate phi translated |
1040 | /// pointers, where the value of a pointer may have changed from the initial |
1041 | /// memory access. Note that this expects to be handed either a MemoryUse, |
1042 | /// or an already potentially clobbering access. Unlike the above API, if |
1043 | /// given a MemoryDef that clobbers the pointer as the starting access, it |
1044 | /// will return that MemoryDef, whereas the above would return the clobber |
1045 | /// starting from the use side of the memory def. |
1046 | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1047 | const MemoryLocation &) = 0; |
1048 | |
1049 | /// Given a memory access, invalidate anything this walker knows about |
1050 | /// that access. |
1051 | /// This API is used by walkers that store information to perform basic cache |
1052 | /// invalidation. This will be called by MemorySSA at appropriate times for |
1053 | /// the walker it uses or returns. |
1054 | virtual void invalidateInfo(MemoryAccess *) {} |
1055 | |
1056 | protected: |
1057 | friend class MemorySSA; // For updating MSSA pointer in MemorySSA move |
1058 | // constructor. |
1059 | MemorySSA *MSSA; |
1060 | }; |
1061 | |
1062 | /// A MemorySSAWalker that does no alias queries, or anything else. It |
1063 | /// simply returns the links as they were constructed by the builder. |
1064 | class DoNothingMemorySSAWalker final : public MemorySSAWalker { |
1065 | public: |
1066 | // Keep the overrides below from hiding the Instruction overload of |
1067 | // getClobberingMemoryAccess. |
1068 | using MemorySSAWalker::getClobberingMemoryAccess; |
1069 | |
1070 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; |
1071 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1072 | const MemoryLocation &) override; |
1073 | }; |
1074 | |
1075 | using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; |
1076 | using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; |
1077 | |
1078 | /// Iterator base class used to implement const and non-const iterators |
1079 | /// over the defining accesses of a MemoryAccess. |
1080 | template <class T> |
1081 | class memoryaccess_def_iterator_base |
1082 | : public iterator_facade_base<memoryaccess_def_iterator_base<T>, |
1083 | std::forward_iterator_tag, T, ptrdiff_t, T *, |
1084 | T *> { |
1085 | using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; |
1086 | |
1087 | public: |
1088 | memoryaccess_def_iterator_base(T *Start) : Access(Start) {} |
1089 | memoryaccess_def_iterator_base() = default; |
1090 | |
1091 | bool operator==(const memoryaccess_def_iterator_base &Other) const { |
1092 | return Access == Other.Access && (!Access || ArgNo == Other.ArgNo); |
1093 | } |
1094 | |
1095 | // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the |
1096 | // block from the operand in constant time (In a PHINode, the uselist has |
1097 | // both, so it's just subtraction). We provide it as part of the |
1098 | // iterator to avoid callers having to linear walk to get the block. |
1099 | // If the operation becomes constant time on MemoryPHI's, this bit of |
1100 | // abstraction breaking should be removed. |
1101 | BasicBlock *getPhiArgBlock() const { |
1102 | MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); |
1103 | assert(MP && "Tried to get phi arg block when not iterating over a PHI")((MP && "Tried to get phi arg block when not iterating over a PHI" ) ? static_cast<void> (0) : __assert_fail ("MP && \"Tried to get phi arg block when not iterating over a PHI\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 1103, __PRETTY_FUNCTION__)); |
1104 | return MP->getIncomingBlock(ArgNo); |
1105 | } |
1106 | |
1107 | typename BaseT::iterator::pointer operator*() const { |
1108 | assert(Access && "Tried to access past the end of our iterator")((Access && "Tried to access past the end of our iterator" ) ? static_cast<void> (0) : __assert_fail ("Access && \"Tried to access past the end of our iterator\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 1108, __PRETTY_FUNCTION__)); |
1109 | // Go to the first argument for phis, and the defining access for everything |
1110 | // else. |
1111 | if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) |
1112 | return MP->getIncomingValue(ArgNo); |
1113 | return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); |
1114 | } |
1115 | |
1116 | using BaseT::operator++; |
1117 | memoryaccess_def_iterator_base &operator++() { |
1118 | assert(Access && "Hit end of iterator")((Access && "Hit end of iterator") ? static_cast<void > (0) : __assert_fail ("Access && \"Hit end of iterator\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 1118, __PRETTY_FUNCTION__)); |
1119 | if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) { |
1120 | if (++ArgNo >= MP->getNumIncomingValues()) { |
1121 | ArgNo = 0; |
1122 | Access = nullptr; |
1123 | } |
1124 | } else { |
1125 | Access = nullptr; |
1126 | } |
1127 | return *this; |
1128 | } |
1129 | |
1130 | private: |
1131 | T *Access = nullptr; |
1132 | unsigned ArgNo = 0; |
1133 | }; |
1134 | |
1135 | inline memoryaccess_def_iterator MemoryAccess::defs_begin() { |
1136 | return memoryaccess_def_iterator(this); |
1137 | } |
1138 | |
1139 | inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { |
1140 | return const_memoryaccess_def_iterator(this); |
1141 | } |
1142 | |
1143 | inline memoryaccess_def_iterator MemoryAccess::defs_end() { |
1144 | return memoryaccess_def_iterator(); |
1145 | } |
1146 | |
1147 | inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { |
1148 | return const_memoryaccess_def_iterator(); |
1149 | } |
1150 | |
1151 | /// GraphTraits for a MemoryAccess, which walks defs in the normal case, |
1152 | /// and uses in the inverse case. |
1153 | template <> struct GraphTraits<MemoryAccess *> { |
1154 | using NodeRef = MemoryAccess *; |
1155 | using ChildIteratorType = memoryaccess_def_iterator; |
1156 | |
1157 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1158 | static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } |
1159 | static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } |
1160 | }; |
1161 | |
1162 | template <> struct GraphTraits<Inverse<MemoryAccess *>> { |
1163 | using NodeRef = MemoryAccess *; |
1164 | using ChildIteratorType = MemoryAccess::iterator; |
1165 | |
1166 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1167 | static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } |
1168 | static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } |
1169 | }; |
1170 | |
1171 | /// Provide an iterator that walks defs, giving both the memory access, |
1172 | /// and the current pointer location, updating the pointer location as it |
1173 | /// changes due to phi node translation. |
1174 | /// |
1175 | /// This iterator, while somewhat specialized, is what most clients actually |
1176 | /// want when walking upwards through MemorySSA def chains. It takes a pair of |
1177 | /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the |
1178 | /// memory location through phi nodes for the user. |
1179 | class upward_defs_iterator |
1180 | : public iterator_facade_base<upward_defs_iterator, |
1181 | std::forward_iterator_tag, |
1182 | const MemoryAccessPair> { |
1183 | using BaseT = upward_defs_iterator::iterator_facade_base; |
1184 | |
1185 | public: |
1186 | upward_defs_iterator(const MemoryAccessPair &Info) |
1187 | : DefIterator(Info.first), Location(Info.second), |
1188 | OriginalAccess(Info.first) { |
1189 | CurrentPair.first = nullptr; |
1190 | |
1191 | WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); |
1192 | fillInCurrentPair(); |
1193 | } |
1194 | |
1195 | upward_defs_iterator() { CurrentPair.first = nullptr; } |
1196 | |
1197 | bool operator==(const upward_defs_iterator &Other) const { |
1198 | return DefIterator == Other.DefIterator; |
1199 | } |
1200 | |
1201 | BaseT::iterator::reference operator*() const { |
1202 | assert(DefIterator != OriginalAccess->defs_end() &&((DefIterator != OriginalAccess->defs_end() && "Tried to access past the end of our iterator" ) ? static_cast<void> (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 1203, __PRETTY_FUNCTION__)) |
1203 | "Tried to access past the end of our iterator")((DefIterator != OriginalAccess->defs_end() && "Tried to access past the end of our iterator" ) ? static_cast<void> (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 1203, __PRETTY_FUNCTION__)); |
1204 | return CurrentPair; |
1205 | } |
1206 | |
1207 | using BaseT::operator++; |
1208 | upward_defs_iterator &operator++() { |
1209 | assert(DefIterator != OriginalAccess->defs_end() &&((DefIterator != OriginalAccess->defs_end() && "Tried to access past the end of the iterator" ) ? static_cast<void> (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 1210, __PRETTY_FUNCTION__)) |
1210 | "Tried to access past the end of the iterator")((DefIterator != OriginalAccess->defs_end() && "Tried to access past the end of the iterator" ) ? static_cast<void> (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 1210, __PRETTY_FUNCTION__)); |
1211 | ++DefIterator; |
1212 | if (DefIterator != OriginalAccess->defs_end()) |
1213 | fillInCurrentPair(); |
1214 | return *this; |
1215 | } |
1216 | |
1217 | BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } |
1218 | |
1219 | private: |
1220 | void fillInCurrentPair() { |
1221 | CurrentPair.first = *DefIterator; |
1222 | if (WalkingPhi && Location.Ptr) { |
1223 | PHITransAddr Translator( |
1224 | const_cast<Value *>(Location.Ptr), |
1225 | OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); |
1226 | if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), |
1227 | DefIterator.getPhiArgBlock(), nullptr, |
1228 | false)) |
1229 | if (Translator.getAddr() != Location.Ptr) { |
1230 | CurrentPair.second = Location.getWithNewPtr(Translator.getAddr()); |
1231 | return; |
1232 | } |
1233 | } |
1234 | CurrentPair.second = Location; |
1235 | } |
1236 | |
1237 | MemoryAccessPair CurrentPair; |
1238 | memoryaccess_def_iterator DefIterator; |
1239 | MemoryLocation Location; |
1240 | MemoryAccess *OriginalAccess = nullptr; |
1241 | bool WalkingPhi = false; |
1242 | }; |
1243 | |
1244 | inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) { |
1245 | return upward_defs_iterator(Pair); |
1246 | } |
1247 | |
1248 | inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } |
1249 | |
1250 | inline iterator_range<upward_defs_iterator> |
1251 | upward_defs(const MemoryAccessPair &Pair) { |
1252 | return make_range(upward_defs_begin(Pair), upward_defs_end()); |
1253 | } |
1254 | |
1255 | /// Walks the defining accesses of MemoryDefs. Stops after we hit something that |
1256 | /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when |
1257 | /// comparing against a null def_chain_iterator, this will compare equal only |
1258 | /// after walking said Phi/liveOnEntry. |
1259 | /// |
1260 | /// The UseOptimizedChain flag specifies whether to walk the clobbering |
1261 | /// access chain, or all the accesses. |
1262 | /// |
1263 | /// Normally, MemoryDef are all just def/use linked together, so a def_chain on |
1264 | /// a MemoryDef will walk all MemoryDefs above it in the program until it hits |
1265 | /// a phi node. The optimized chain walks the clobbering access of a store. |
1266 | /// So if you are just trying to find, given a store, what the next |
1267 | /// thing that would clobber the same memory is, you want the optimized chain. |
1268 | template <class T, bool UseOptimizedChain = false> |
1269 | struct def_chain_iterator |
1270 | : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, |
1271 | std::forward_iterator_tag, MemoryAccess *> { |
1272 | def_chain_iterator() : MA(nullptr) {} |
1273 | def_chain_iterator(T MA) : MA(MA) {} |
1274 | |
1275 | T operator*() const { return MA; } |
1276 | |
1277 | def_chain_iterator &operator++() { |
1278 | // N.B. liveOnEntry has a null defining access. |
1279 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { |
1280 | if (UseOptimizedChain && MUD->isOptimized()) |
1281 | MA = MUD->getOptimized(); |
1282 | else |
1283 | MA = MUD->getDefiningAccess(); |
1284 | } else { |
1285 | MA = nullptr; |
1286 | } |
1287 | |
1288 | return *this; |
1289 | } |
1290 | |
1291 | bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } |
1292 | |
1293 | private: |
1294 | T MA; |
1295 | }; |
1296 | |
1297 | template <class T> |
1298 | inline iterator_range<def_chain_iterator<T>> |
1299 | def_chain(T MA, MemoryAccess *UpTo = nullptr) { |
1300 | #ifdef EXPENSIVE_CHECKS |
1301 | assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&(((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator< T>()) && "UpTo isn't in the def chain!") ? static_cast <void> (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 1302, __PRETTY_FUNCTION__)) |
1302 | "UpTo isn't in the def chain!")(((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator< T>()) && "UpTo isn't in the def chain!") ? static_cast <void> (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\"" , "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/MemorySSA.h" , 1302, __PRETTY_FUNCTION__)); |
1303 | #endif |
1304 | return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); |
1305 | } |
1306 | |
1307 | template <class T> |
1308 | inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { |
1309 | return make_range(def_chain_iterator<T, true>(MA), |
1310 | def_chain_iterator<T, true>(nullptr)); |
1311 | } |
1312 | |
1313 | } // end namespace llvm |
1314 | |
1315 | #endif // LLVM_ANALYSIS_MEMORYSSA_H |