File: | build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Analysis/MemorySSA.cpp |
Warning: | line 2068, column 5 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file implements the MemorySSA class. | ||||
10 | // | ||||
11 | //===----------------------------------------------------------------------===// | ||||
12 | |||||
13 | #include "llvm/Analysis/MemorySSA.h" | ||||
14 | #include "llvm/ADT/DenseMap.h" | ||||
15 | #include "llvm/ADT/DenseMapInfo.h" | ||||
16 | #include "llvm/ADT/DenseSet.h" | ||||
17 | #include "llvm/ADT/DepthFirstIterator.h" | ||||
18 | #include "llvm/ADT/Hashing.h" | ||||
19 | #include "llvm/ADT/None.h" | ||||
20 | #include "llvm/ADT/Optional.h" | ||||
21 | #include "llvm/ADT/STLExtras.h" | ||||
22 | #include "llvm/ADT/SmallPtrSet.h" | ||||
23 | #include "llvm/ADT/SmallVector.h" | ||||
24 | #include "llvm/ADT/StringExtras.h" | ||||
25 | #include "llvm/ADT/iterator.h" | ||||
26 | #include "llvm/ADT/iterator_range.h" | ||||
27 | #include "llvm/Analysis/AliasAnalysis.h" | ||||
28 | #include "llvm/Analysis/CFGPrinter.h" | ||||
29 | #include "llvm/Analysis/IteratedDominanceFrontier.h" | ||||
30 | #include "llvm/Analysis/MemoryLocation.h" | ||||
31 | #include "llvm/Config/llvm-config.h" | ||||
32 | #include "llvm/IR/AssemblyAnnotationWriter.h" | ||||
33 | #include "llvm/IR/BasicBlock.h" | ||||
34 | #include "llvm/IR/Dominators.h" | ||||
35 | #include "llvm/IR/Function.h" | ||||
36 | #include "llvm/IR/Instruction.h" | ||||
37 | #include "llvm/IR/Instructions.h" | ||||
38 | #include "llvm/IR/IntrinsicInst.h" | ||||
39 | #include "llvm/IR/LLVMContext.h" | ||||
40 | #include "llvm/IR/Operator.h" | ||||
41 | #include "llvm/IR/PassManager.h" | ||||
42 | #include "llvm/IR/Use.h" | ||||
43 | #include "llvm/InitializePasses.h" | ||||
44 | #include "llvm/Pass.h" | ||||
45 | #include "llvm/Support/AtomicOrdering.h" | ||||
46 | #include "llvm/Support/Casting.h" | ||||
47 | #include "llvm/Support/CommandLine.h" | ||||
48 | #include "llvm/Support/Compiler.h" | ||||
49 | #include "llvm/Support/Debug.h" | ||||
50 | #include "llvm/Support/ErrorHandling.h" | ||||
51 | #include "llvm/Support/FormattedStream.h" | ||||
52 | #include "llvm/Support/GraphWriter.h" | ||||
53 | #include "llvm/Support/raw_ostream.h" | ||||
54 | #include <algorithm> | ||||
55 | #include <cassert> | ||||
56 | #include <iterator> | ||||
57 | #include <memory> | ||||
58 | #include <utility> | ||||
59 | |||||
60 | using namespace llvm; | ||||
61 | |||||
62 | #define DEBUG_TYPE"memoryssa" "memoryssa" | ||||
63 | |||||
64 | static cl::opt<std::string> | ||||
65 | DotCFGMSSA("dot-cfg-mssa", | ||||
66 | cl::value_desc("file name for generated dot file"), | ||||
67 | cl::desc("file name for generated dot file"), cl::init("")); | ||||
68 | |||||
69 | INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { | ||||
70 | true)static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { | ||||
71 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||
72 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | ||||
73 | INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } | ||||
74 | true)PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } | ||||
75 | |||||
76 | INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { | ||||
77 | "Memory SSA Printer", false, false)static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { | ||||
78 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | ||||
79 | INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } | ||||
80 | "Memory SSA Printer", false, false)PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } | ||||
81 | |||||
82 | static cl::opt<unsigned> MaxCheckLimit( | ||||
83 | "memssa-check-limit", cl::Hidden, cl::init(100), | ||||
84 | cl::desc("The maximum number of stores/phis MemorySSA" | ||||
85 | "will consider trying to walk past (default = 100)")); | ||||
86 | |||||
87 | // Always verify MemorySSA if expensive checking is enabled. | ||||
88 | #ifdef EXPENSIVE_CHECKS | ||||
89 | bool llvm::VerifyMemorySSA = true; | ||||
90 | #else | ||||
91 | bool llvm::VerifyMemorySSA = false; | ||||
92 | #endif | ||||
93 | |||||
94 | static cl::opt<bool, true> | ||||
95 | VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), | ||||
96 | cl::Hidden, cl::desc("Enable verification of MemorySSA.")); | ||||
97 | |||||
98 | const static char LiveOnEntryStr[] = "liveOnEntry"; | ||||
99 | |||||
100 | namespace { | ||||
101 | |||||
102 | /// An assembly annotator class to print Memory SSA information in | ||||
103 | /// comments. | ||||
104 | class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { | ||||
105 | const MemorySSA *MSSA; | ||||
106 | |||||
107 | public: | ||||
108 | MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} | ||||
109 | |||||
110 | void emitBasicBlockStartAnnot(const BasicBlock *BB, | ||||
111 | formatted_raw_ostream &OS) override { | ||||
112 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) | ||||
113 | OS << "; " << *MA << "\n"; | ||||
114 | } | ||||
115 | |||||
116 | void emitInstructionAnnot(const Instruction *I, | ||||
117 | formatted_raw_ostream &OS) override { | ||||
118 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) | ||||
119 | OS << "; " << *MA << "\n"; | ||||
120 | } | ||||
121 | }; | ||||
122 | |||||
123 | /// An assembly annotator class to print Memory SSA information in | ||||
124 | /// comments. | ||||
125 | class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter { | ||||
126 | MemorySSA *MSSA; | ||||
127 | MemorySSAWalker *Walker; | ||||
128 | |||||
129 | public: | ||||
130 | MemorySSAWalkerAnnotatedWriter(MemorySSA *M) | ||||
131 | : MSSA(M), Walker(M->getWalker()) {} | ||||
132 | |||||
133 | void emitBasicBlockStartAnnot(const BasicBlock *BB, | ||||
134 | formatted_raw_ostream &OS) override { | ||||
135 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) | ||||
136 | OS << "; " << *MA << "\n"; | ||||
137 | } | ||||
138 | |||||
139 | void emitInstructionAnnot(const Instruction *I, | ||||
140 | formatted_raw_ostream &OS) override { | ||||
141 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) { | ||||
142 | MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA); | ||||
143 | OS << "; " << *MA; | ||||
144 | if (Clobber) { | ||||
145 | OS << " - clobbered by "; | ||||
146 | if (MSSA->isLiveOnEntryDef(Clobber)) | ||||
147 | OS << LiveOnEntryStr; | ||||
148 | else | ||||
149 | OS << *Clobber; | ||||
150 | } | ||||
151 | OS << "\n"; | ||||
152 | } | ||||
153 | } | ||||
154 | }; | ||||
155 | |||||
156 | } // namespace | ||||
157 | |||||
158 | namespace { | ||||
159 | |||||
160 | /// Our current alias analysis API differentiates heavily between calls and | ||||
161 | /// non-calls, and functions called on one usually assert on the other. | ||||
162 | /// This class encapsulates the distinction to simplify other code that wants | ||||
163 | /// "Memory affecting instructions and related data" to use as a key. | ||||
164 | /// For example, this class is used as a densemap key in the use optimizer. | ||||
165 | class MemoryLocOrCall { | ||||
166 | public: | ||||
167 | bool IsCall = false; | ||||
168 | |||||
169 | MemoryLocOrCall(MemoryUseOrDef *MUD) | ||||
170 | : MemoryLocOrCall(MUD->getMemoryInst()) {} | ||||
171 | MemoryLocOrCall(const MemoryUseOrDef *MUD) | ||||
172 | : MemoryLocOrCall(MUD->getMemoryInst()) {} | ||||
173 | |||||
174 | MemoryLocOrCall(Instruction *Inst) { | ||||
175 | if (auto *C = dyn_cast<CallBase>(Inst)) { | ||||
176 | IsCall = true; | ||||
177 | Call = C; | ||||
178 | } else { | ||||
179 | IsCall = false; | ||||
180 | // There is no such thing as a memorylocation for a fence inst, and it is | ||||
181 | // unique in that regard. | ||||
182 | if (!isa<FenceInst>(Inst)) | ||||
183 | Loc = MemoryLocation::get(Inst); | ||||
184 | } | ||||
185 | } | ||||
186 | |||||
187 | explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} | ||||
188 | |||||
189 | const CallBase *getCall() const { | ||||
190 | assert(IsCall)(static_cast <bool> (IsCall) ? void (0) : __assert_fail ("IsCall", "llvm/lib/Analysis/MemorySSA.cpp", 190, __extension__ __PRETTY_FUNCTION__)); | ||||
191 | return Call; | ||||
192 | } | ||||
193 | |||||
194 | MemoryLocation getLoc() const { | ||||
195 | assert(!IsCall)(static_cast <bool> (!IsCall) ? void (0) : __assert_fail ("!IsCall", "llvm/lib/Analysis/MemorySSA.cpp", 195, __extension__ __PRETTY_FUNCTION__)); | ||||
196 | return Loc; | ||||
197 | } | ||||
198 | |||||
199 | bool operator==(const MemoryLocOrCall &Other) const { | ||||
200 | if (IsCall != Other.IsCall) | ||||
201 | return false; | ||||
202 | |||||
203 | if (!IsCall) | ||||
204 | return Loc == Other.Loc; | ||||
205 | |||||
206 | if (Call->getCalledOperand() != Other.Call->getCalledOperand()) | ||||
207 | return false; | ||||
208 | |||||
209 | return Call->arg_size() == Other.Call->arg_size() && | ||||
210 | std::equal(Call->arg_begin(), Call->arg_end(), | ||||
211 | Other.Call->arg_begin()); | ||||
212 | } | ||||
213 | |||||
214 | private: | ||||
215 | union { | ||||
216 | const CallBase *Call; | ||||
217 | MemoryLocation Loc; | ||||
218 | }; | ||||
219 | }; | ||||
220 | |||||
221 | } // end anonymous namespace | ||||
222 | |||||
223 | namespace llvm { | ||||
224 | |||||
225 | template <> struct DenseMapInfo<MemoryLocOrCall> { | ||||
226 | static inline MemoryLocOrCall getEmptyKey() { | ||||
227 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); | ||||
228 | } | ||||
229 | |||||
230 | static inline MemoryLocOrCall getTombstoneKey() { | ||||
231 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); | ||||
232 | } | ||||
233 | |||||
234 | static unsigned getHashValue(const MemoryLocOrCall &MLOC) { | ||||
235 | if (!MLOC.IsCall) | ||||
236 | return hash_combine( | ||||
237 | MLOC.IsCall, | ||||
238 | DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); | ||||
239 | |||||
240 | hash_code hash = | ||||
241 | hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue( | ||||
242 | MLOC.getCall()->getCalledOperand())); | ||||
243 | |||||
244 | for (const Value *Arg : MLOC.getCall()->args()) | ||||
245 | hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg)); | ||||
246 | return hash; | ||||
247 | } | ||||
248 | |||||
249 | static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { | ||||
250 | return LHS == RHS; | ||||
251 | } | ||||
252 | }; | ||||
253 | |||||
254 | } // end namespace llvm | ||||
255 | |||||
256 | /// This does one-way checks to see if Use could theoretically be hoisted above | ||||
257 | /// MayClobber. This will not check the other way around. | ||||
258 | /// | ||||
259 | /// This assumes that, for the purposes of MemorySSA, Use comes directly after | ||||
260 | /// MayClobber, with no potentially clobbering operations in between them. | ||||
261 | /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) | ||||
262 | static bool areLoadsReorderable(const LoadInst *Use, | ||||
263 | const LoadInst *MayClobber) { | ||||
264 | bool VolatileUse = Use->isVolatile(); | ||||
265 | bool VolatileClobber = MayClobber->isVolatile(); | ||||
266 | // Volatile operations may never be reordered with other volatile operations. | ||||
267 | if (VolatileUse && VolatileClobber) | ||||
268 | return false; | ||||
269 | // Otherwise, volatile doesn't matter here. From the language reference: | ||||
270 | // 'optimizers may change the order of volatile operations relative to | ||||
271 | // non-volatile operations.'" | ||||
272 | |||||
273 | // If a load is seq_cst, it cannot be moved above other loads. If its ordering | ||||
274 | // is weaker, it can be moved above other loads. We just need to be sure that | ||||
275 | // MayClobber isn't an acquire load, because loads can't be moved above | ||||
276 | // acquire loads. | ||||
277 | // | ||||
278 | // Note that this explicitly *does* allow the free reordering of monotonic (or | ||||
279 | // weaker) loads of the same address. | ||||
280 | bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; | ||||
281 | bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), | ||||
282 | AtomicOrdering::Acquire); | ||||
283 | return !(SeqCstUse || MayClobberIsAcquire); | ||||
284 | } | ||||
285 | |||||
286 | namespace { | ||||
287 | |||||
288 | struct ClobberAlias { | ||||
289 | bool IsClobber; | ||||
290 | Optional<AliasResult> AR; | ||||
291 | }; | ||||
292 | |||||
293 | } // end anonymous namespace | ||||
294 | |||||
295 | // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being | ||||
296 | // ignored if IsClobber = false. | ||||
297 | template <typename AliasAnalysisType> | ||||
298 | static ClobberAlias | ||||
299 | instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, | ||||
300 | const Instruction *UseInst, AliasAnalysisType &AA) { | ||||
301 | Instruction *DefInst = MD->getMemoryInst(); | ||||
302 | assert(DefInst && "Defining instruction not actually an instruction")(static_cast <bool> (DefInst && "Defining instruction not actually an instruction" ) ? void (0) : __assert_fail ("DefInst && \"Defining instruction not actually an instruction\"" , "llvm/lib/Analysis/MemorySSA.cpp", 302, __extension__ __PRETTY_FUNCTION__ )); | ||||
303 | Optional<AliasResult> AR; | ||||
304 | |||||
305 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { | ||||
306 | // These intrinsics will show up as affecting memory, but they are just | ||||
307 | // markers, mostly. | ||||
308 | // | ||||
309 | // FIXME: We probably don't actually want MemorySSA to model these at all | ||||
310 | // (including creating MemoryAccesses for them): we just end up inventing | ||||
311 | // clobbers where they don't really exist at all. Please see D43269 for | ||||
312 | // context. | ||||
313 | switch (II->getIntrinsicID()) { | ||||
314 | case Intrinsic::invariant_start: | ||||
315 | case Intrinsic::invariant_end: | ||||
316 | case Intrinsic::assume: | ||||
317 | case Intrinsic::experimental_noalias_scope_decl: | ||||
318 | case Intrinsic::pseudoprobe: | ||||
319 | return {false, AliasResult(AliasResult::NoAlias)}; | ||||
320 | case Intrinsic::dbg_addr: | ||||
321 | case Intrinsic::dbg_declare: | ||||
322 | case Intrinsic::dbg_label: | ||||
323 | case Intrinsic::dbg_value: | ||||
324 | llvm_unreachable("debuginfo shouldn't have associated defs!")::llvm::llvm_unreachable_internal("debuginfo shouldn't have associated defs!" , "llvm/lib/Analysis/MemorySSA.cpp", 324); | ||||
325 | default: | ||||
326 | break; | ||||
327 | } | ||||
328 | } | ||||
329 | |||||
330 | if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) { | ||||
331 | ModRefInfo I = AA.getModRefInfo(DefInst, CB); | ||||
332 | AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias; | ||||
333 | return {isModOrRefSet(I), AR}; | ||||
334 | } | ||||
335 | |||||
336 | if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) | ||||
337 | if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst)) | ||||
338 | return {!areLoadsReorderable(UseLoad, DefLoad), | ||||
339 | AliasResult(AliasResult::MayAlias)}; | ||||
340 | |||||
341 | ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc); | ||||
342 | AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias; | ||||
343 | return {isModSet(I), AR}; | ||||
344 | } | ||||
345 | |||||
346 | template <typename AliasAnalysisType> | ||||
347 | static ClobberAlias instructionClobbersQuery(MemoryDef *MD, | ||||
348 | const MemoryUseOrDef *MU, | ||||
349 | const MemoryLocOrCall &UseMLOC, | ||||
350 | AliasAnalysisType &AA) { | ||||
351 | // FIXME: This is a temporary hack to allow a single instructionClobbersQuery | ||||
352 | // to exist while MemoryLocOrCall is pushed through places. | ||||
353 | if (UseMLOC.IsCall) | ||||
354 | return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), | ||||
355 | AA); | ||||
356 | return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), | ||||
357 | AA); | ||||
358 | } | ||||
359 | |||||
360 | // Return true when MD may alias MU, return false otherwise. | ||||
361 | bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, | ||||
362 | AliasAnalysis &AA) { | ||||
363 | return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber; | ||||
364 | } | ||||
365 | |||||
366 | namespace { | ||||
367 | |||||
368 | struct UpwardsMemoryQuery { | ||||
369 | // True if our original query started off as a call | ||||
370 | bool IsCall = false; | ||||
371 | // The pointer location we started the query with. This will be empty if | ||||
372 | // IsCall is true. | ||||
373 | MemoryLocation StartingLoc; | ||||
374 | // This is the instruction we were querying about. | ||||
375 | const Instruction *Inst = nullptr; | ||||
376 | // The MemoryAccess we actually got called with, used to test local domination | ||||
377 | const MemoryAccess *OriginalAccess = nullptr; | ||||
378 | Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias); | ||||
379 | bool SkipSelfAccess = false; | ||||
380 | |||||
381 | UpwardsMemoryQuery() = default; | ||||
382 | |||||
383 | UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) | ||||
384 | : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) { | ||||
385 | if (!IsCall) | ||||
386 | StartingLoc = MemoryLocation::get(Inst); | ||||
387 | } | ||||
388 | }; | ||||
389 | |||||
390 | } // end anonymous namespace | ||||
391 | |||||
392 | template <typename AliasAnalysisType> | ||||
393 | static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, | ||||
394 | const Instruction *I) { | ||||
395 | // If the memory can't be changed, then loads of the memory can't be | ||||
396 | // clobbered. | ||||
397 | if (auto *LI = dyn_cast<LoadInst>(I)) | ||||
398 | return I->hasMetadata(LLVMContext::MD_invariant_load) || | ||||
399 | AA.pointsToConstantMemory(MemoryLocation::get(LI)); | ||||
400 | return false; | ||||
401 | } | ||||
402 | |||||
403 | /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing | ||||
404 | /// inbetween `Start` and `ClobberAt` can clobbers `Start`. | ||||
405 | /// | ||||
406 | /// This is meant to be as simple and self-contained as possible. Because it | ||||
407 | /// uses no cache, etc., it can be relatively expensive. | ||||
408 | /// | ||||
409 | /// \param Start The MemoryAccess that we want to walk from. | ||||
410 | /// \param ClobberAt A clobber for Start. | ||||
411 | /// \param StartLoc The MemoryLocation for Start. | ||||
412 | /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to. | ||||
413 | /// \param Query The UpwardsMemoryQuery we used for our search. | ||||
414 | /// \param AA The AliasAnalysis we used for our search. | ||||
415 | /// \param AllowImpreciseClobber Always false, unless we do relaxed verify. | ||||
416 | |||||
417 | template <typename AliasAnalysisType> | ||||
418 | LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) static void | ||||
419 | checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, | ||||
420 | const MemoryLocation &StartLoc, const MemorySSA &MSSA, | ||||
421 | const UpwardsMemoryQuery &Query, AliasAnalysisType &AA, | ||||
422 | bool AllowImpreciseClobber = false) { | ||||
423 | assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?")(static_cast <bool> (MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?") ? void (0) : __assert_fail ("MSSA.dominates(ClobberAt, Start) && \"Clobber doesn't dominate start?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 423, __extension__ __PRETTY_FUNCTION__ )); | ||||
424 | |||||
425 | if (MSSA.isLiveOnEntryDef(Start)) { | ||||
426 | assert(MSSA.isLiveOnEntryDef(ClobberAt) &&(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself") ? void (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "llvm/lib/Analysis/MemorySSA.cpp", 427, __extension__ __PRETTY_FUNCTION__ )) | ||||
427 | "liveOnEntry must clobber itself")(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself") ? void (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "llvm/lib/Analysis/MemorySSA.cpp", 427, __extension__ __PRETTY_FUNCTION__ )); | ||||
428 | return; | ||||
429 | } | ||||
430 | |||||
431 | bool FoundClobber = false; | ||||
432 | DenseSet<ConstMemoryAccessPair> VisitedPhis; | ||||
433 | SmallVector<ConstMemoryAccessPair, 8> Worklist; | ||||
434 | Worklist.emplace_back(Start, StartLoc); | ||||
435 | // Walk all paths from Start to ClobberAt, while looking for clobbers. If one | ||||
436 | // is found, complain. | ||||
437 | while (!Worklist.empty()) { | ||||
438 | auto MAP = Worklist.pop_back_val(); | ||||
439 | // All we care about is that nothing from Start to ClobberAt clobbers Start. | ||||
440 | // We learn nothing from revisiting nodes. | ||||
441 | if (!VisitedPhis.insert(MAP).second) | ||||
442 | continue; | ||||
443 | |||||
444 | for (const auto *MA : def_chain(MAP.first)) { | ||||
445 | if (MA == ClobberAt) { | ||||
446 | if (const auto *MD = dyn_cast<MemoryDef>(MA)) { | ||||
447 | // instructionClobbersQuery isn't essentially free, so don't use `|=`, | ||||
448 | // since it won't let us short-circuit. | ||||
449 | // | ||||
450 | // Also, note that this can't be hoisted out of the `Worklist` loop, | ||||
451 | // since MD may only act as a clobber for 1 of N MemoryLocations. | ||||
452 | FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD); | ||||
453 | if (!FoundClobber) { | ||||
454 | ClobberAlias CA = | ||||
455 | instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); | ||||
456 | if (CA.IsClobber) { | ||||
457 | FoundClobber = true; | ||||
458 | // Not used: CA.AR; | ||||
459 | } | ||||
460 | } | ||||
461 | } | ||||
462 | break; | ||||
463 | } | ||||
464 | |||||
465 | // We should never hit liveOnEntry, unless it's the clobber. | ||||
466 | assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(MA) && \"Hit liveOnEntry before clobber?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 466, __extension__ __PRETTY_FUNCTION__ )); | ||||
467 | |||||
468 | if (const auto *MD = dyn_cast<MemoryDef>(MA)) { | ||||
469 | // If Start is a Def, skip self. | ||||
470 | if (MD == Start) | ||||
471 | continue; | ||||
472 | |||||
473 | assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)(static_cast <bool> (!instructionClobbersQuery(MD, MAP. second, Query.Inst, AA) .IsClobber && "Found clobber before reaching ClobberAt!" ) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 475, __extension__ __PRETTY_FUNCTION__ )) | ||||
474 | .IsClobber &&(static_cast <bool> (!instructionClobbersQuery(MD, MAP. second, Query.Inst, AA) .IsClobber && "Found clobber before reaching ClobberAt!" ) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 475, __extension__ __PRETTY_FUNCTION__ )) | ||||
475 | "Found clobber before reaching ClobberAt!")(static_cast <bool> (!instructionClobbersQuery(MD, MAP. second, Query.Inst, AA) .IsClobber && "Found clobber before reaching ClobberAt!" ) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 475, __extension__ __PRETTY_FUNCTION__ )); | ||||
476 | continue; | ||||
477 | } | ||||
478 | |||||
479 | if (const auto *MU = dyn_cast<MemoryUse>(MA)) { | ||||
480 | (void)MU; | ||||
481 | assert (MU == Start &&(static_cast <bool> (MU == Start && "Can only find use in def chain if Start is a use" ) ? void (0) : __assert_fail ("MU == Start && \"Can only find use in def chain if Start is a use\"" , "llvm/lib/Analysis/MemorySSA.cpp", 482, __extension__ __PRETTY_FUNCTION__ )) | ||||
482 | "Can only find use in def chain if Start is a use")(static_cast <bool> (MU == Start && "Can only find use in def chain if Start is a use" ) ? void (0) : __assert_fail ("MU == Start && \"Can only find use in def chain if Start is a use\"" , "llvm/lib/Analysis/MemorySSA.cpp", 482, __extension__ __PRETTY_FUNCTION__ )); | ||||
483 | continue; | ||||
484 | } | ||||
485 | |||||
486 | assert(isa<MemoryPhi>(MA))(static_cast <bool> (isa<MemoryPhi>(MA)) ? void ( 0) : __assert_fail ("isa<MemoryPhi>(MA)", "llvm/lib/Analysis/MemorySSA.cpp" , 486, __extension__ __PRETTY_FUNCTION__)); | ||||
487 | |||||
488 | // Add reachable phi predecessors | ||||
489 | for (auto ItB = upward_defs_begin( | ||||
490 | {const_cast<MemoryAccess *>(MA), MAP.second}, | ||||
491 | MSSA.getDomTree()), | ||||
492 | ItE = upward_defs_end(); | ||||
493 | ItB != ItE; ++ItB) | ||||
494 | if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock())) | ||||
495 | Worklist.emplace_back(*ItB); | ||||
496 | } | ||||
497 | } | ||||
498 | |||||
499 | // If the verify is done following an optimization, it's possible that | ||||
500 | // ClobberAt was a conservative clobbering, that we can now infer is not a | ||||
501 | // true clobbering access. Don't fail the verify if that's the case. | ||||
502 | // We do have accesses that claim they're optimized, but could be optimized | ||||
503 | // further. Updating all these can be expensive, so allow it for now (FIXME). | ||||
504 | if (AllowImpreciseClobber) | ||||
505 | return; | ||||
506 | |||||
507 | // If ClobberAt is a MemoryPhi, we can assume something above it acted as a | ||||
508 | // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. | ||||
509 | assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber" ) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "llvm/lib/Analysis/MemorySSA.cpp", 510, __extension__ __PRETTY_FUNCTION__ )) | ||||
510 | "ClobberAt never acted as a clobber")(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber" ) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "llvm/lib/Analysis/MemorySSA.cpp", 510, __extension__ __PRETTY_FUNCTION__ )); | ||||
511 | } | ||||
512 | |||||
513 | namespace { | ||||
514 | |||||
515 | /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up | ||||
516 | /// in one class. | ||||
517 | template <class AliasAnalysisType> class ClobberWalker { | ||||
518 | /// Save a few bytes by using unsigned instead of size_t. | ||||
519 | using ListIndex = unsigned; | ||||
520 | |||||
521 | /// Represents a span of contiguous MemoryDefs, potentially ending in a | ||||
522 | /// MemoryPhi. | ||||
523 | struct DefPath { | ||||
524 | MemoryLocation Loc; | ||||
525 | // Note that, because we always walk in reverse, Last will always dominate | ||||
526 | // First. Also note that First and Last are inclusive. | ||||
527 | MemoryAccess *First; | ||||
528 | MemoryAccess *Last; | ||||
529 | Optional<ListIndex> Previous; | ||||
530 | |||||
531 | DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, | ||||
532 | Optional<ListIndex> Previous) | ||||
533 | : Loc(Loc), First(First), Last(Last), Previous(Previous) {} | ||||
534 | |||||
535 | DefPath(const MemoryLocation &Loc, MemoryAccess *Init, | ||||
536 | Optional<ListIndex> Previous) | ||||
537 | : DefPath(Loc, Init, Init, Previous) {} | ||||
538 | }; | ||||
539 | |||||
540 | const MemorySSA &MSSA; | ||||
541 | AliasAnalysisType &AA; | ||||
542 | DominatorTree &DT; | ||||
543 | UpwardsMemoryQuery *Query; | ||||
544 | unsigned *UpwardWalkLimit; | ||||
545 | |||||
546 | // Phi optimization bookkeeping: | ||||
547 | // List of DefPath to process during the current phi optimization walk. | ||||
548 | SmallVector<DefPath, 32> Paths; | ||||
549 | // List of visited <Access, Location> pairs; we can skip paths already | ||||
550 | // visited with the same memory location. | ||||
551 | DenseSet<ConstMemoryAccessPair> VisitedPhis; | ||||
552 | // Record if phi translation has been performed during the current phi | ||||
553 | // optimization walk, as merging alias results after phi translation can | ||||
554 | // yield incorrect results. Context in PR46156. | ||||
555 | bool PerformedPhiTranslation = false; | ||||
556 | |||||
557 | /// Find the nearest def or phi that `From` can legally be optimized to. | ||||
558 | const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { | ||||
559 | assert(From->getNumOperands() && "Phi with no operands?")(static_cast <bool> (From->getNumOperands() && "Phi with no operands?") ? void (0) : __assert_fail ("From->getNumOperands() && \"Phi with no operands?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 559, __extension__ __PRETTY_FUNCTION__ )); | ||||
560 | |||||
561 | BasicBlock *BB = From->getBlock(); | ||||
562 | MemoryAccess *Result = MSSA.getLiveOnEntryDef(); | ||||
563 | DomTreeNode *Node = DT.getNode(BB); | ||||
564 | while ((Node = Node->getIDom())) { | ||||
565 | auto *Defs = MSSA.getBlockDefs(Node->getBlock()); | ||||
566 | if (Defs) | ||||
567 | return &*Defs->rbegin(); | ||||
568 | } | ||||
569 | return Result; | ||||
570 | } | ||||
571 | |||||
572 | /// Result of calling walkToPhiOrClobber. | ||||
573 | struct UpwardsWalkResult { | ||||
574 | /// The "Result" of the walk. Either a clobber, the last thing we walked, or | ||||
575 | /// both. Include alias info when clobber found. | ||||
576 | MemoryAccess *Result; | ||||
577 | bool IsKnownClobber; | ||||
578 | Optional<AliasResult> AR; | ||||
579 | }; | ||||
580 | |||||
581 | /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. | ||||
582 | /// This will update Desc.Last as it walks. It will (optionally) also stop at | ||||
583 | /// StopAt. | ||||
584 | /// | ||||
585 | /// This does not test for whether StopAt is a clobber | ||||
586 | UpwardsWalkResult | ||||
587 | walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr, | ||||
588 | const MemoryAccess *SkipStopAt = nullptr) const { | ||||
589 | assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world")(static_cast <bool> (!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world") ? void (0) : __assert_fail ( "!isa<MemoryUse>(Desc.Last) && \"Uses don't exist in my world\"" , "llvm/lib/Analysis/MemorySSA.cpp", 589, __extension__ __PRETTY_FUNCTION__ )); | ||||
590 | assert(UpwardWalkLimit && "Need a valid walk limit")(static_cast <bool> (UpwardWalkLimit && "Need a valid walk limit" ) ? void (0) : __assert_fail ("UpwardWalkLimit && \"Need a valid walk limit\"" , "llvm/lib/Analysis/MemorySSA.cpp", 590, __extension__ __PRETTY_FUNCTION__ )); | ||||
591 | bool LimitAlreadyReached = false; | ||||
592 | // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set | ||||
593 | // it to 1. This will not do any alias() calls. It either returns in the | ||||
594 | // first iteration in the loop below, or is set back to 0 if all def chains | ||||
595 | // are free of MemoryDefs. | ||||
596 | if (!*UpwardWalkLimit) { | ||||
597 | *UpwardWalkLimit = 1; | ||||
598 | LimitAlreadyReached = true; | ||||
599 | } | ||||
600 | |||||
601 | for (MemoryAccess *Current : def_chain(Desc.Last)) { | ||||
602 | Desc.Last = Current; | ||||
603 | if (Current == StopAt || Current == SkipStopAt) | ||||
604 | return {Current, false, AliasResult(AliasResult::MayAlias)}; | ||||
605 | |||||
606 | if (auto *MD = dyn_cast<MemoryDef>(Current)) { | ||||
607 | if (MSSA.isLiveOnEntryDef(MD)) | ||||
608 | return {MD, true, AliasResult(AliasResult::MustAlias)}; | ||||
609 | |||||
610 | if (!--*UpwardWalkLimit) | ||||
611 | return {Current, true, AliasResult(AliasResult::MayAlias)}; | ||||
612 | |||||
613 | ClobberAlias CA = | ||||
614 | instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA); | ||||
615 | if (CA.IsClobber) | ||||
616 | return {MD, true, CA.AR}; | ||||
617 | } | ||||
618 | } | ||||
619 | |||||
620 | if (LimitAlreadyReached) | ||||
621 | *UpwardWalkLimit = 0; | ||||
622 | |||||
623 | assert(isa<MemoryPhi>(Desc.Last) &&(static_cast <bool> (isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 624, __extension__ __PRETTY_FUNCTION__ )) | ||||
624 | "Ended at a non-clobber that's not a phi?")(static_cast <bool> (isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 624, __extension__ __PRETTY_FUNCTION__ )); | ||||
625 | return {Desc.Last, false, AliasResult(AliasResult::MayAlias)}; | ||||
626 | } | ||||
627 | |||||
628 | void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, | ||||
629 | ListIndex PriorNode) { | ||||
630 | auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT, | ||||
631 | &PerformedPhiTranslation); | ||||
632 | auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end()); | ||||
633 | for (const MemoryAccessPair &P : UpwardDefs) { | ||||
634 | PausedSearches.push_back(Paths.size()); | ||||
635 | Paths.emplace_back(P.second, P.first, PriorNode); | ||||
636 | } | ||||
637 | } | ||||
638 | |||||
639 | /// Represents a search that terminated after finding a clobber. This clobber | ||||
640 | /// may or may not be present in the path of defs from LastNode..SearchStart, | ||||
641 | /// since it may have been retrieved from cache. | ||||
642 | struct TerminatedPath { | ||||
643 | MemoryAccess *Clobber; | ||||
644 | ListIndex LastNode; | ||||
645 | }; | ||||
646 | |||||
647 | /// Get an access that keeps us from optimizing to the given phi. | ||||
648 | /// | ||||
649 | /// PausedSearches is an array of indices into the Paths array. Its incoming | ||||
650 | /// value is the indices of searches that stopped at the last phi optimization | ||||
651 | /// target. It's left in an unspecified state. | ||||
652 | /// | ||||
653 | /// If this returns None, NewPaused is a vector of searches that terminated | ||||
654 | /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. | ||||
655 | Optional<TerminatedPath> | ||||
656 | getBlockingAccess(const MemoryAccess *StopWhere, | ||||
657 | SmallVectorImpl<ListIndex> &PausedSearches, | ||||
658 | SmallVectorImpl<ListIndex> &NewPaused, | ||||
659 | SmallVectorImpl<TerminatedPath> &Terminated) { | ||||
660 | assert(!PausedSearches.empty() && "No searches to continue?")(static_cast <bool> (!PausedSearches.empty() && "No searches to continue?") ? void (0) : __assert_fail ("!PausedSearches.empty() && \"No searches to continue?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 660, __extension__ __PRETTY_FUNCTION__ )); | ||||
661 | |||||
662 | // BFS vs DFS really doesn't make a difference here, so just do a DFS with | ||||
663 | // PausedSearches as our stack. | ||||
664 | while (!PausedSearches.empty()) { | ||||
665 | ListIndex PathIndex = PausedSearches.pop_back_val(); | ||||
666 | DefPath &Node = Paths[PathIndex]; | ||||
667 | |||||
668 | // If we've already visited this path with this MemoryLocation, we don't | ||||
669 | // need to do so again. | ||||
670 | // | ||||
671 | // NOTE: That we just drop these paths on the ground makes caching | ||||
672 | // behavior sporadic. e.g. given a diamond: | ||||
673 | // A | ||||
674 | // B C | ||||
675 | // D | ||||
676 | // | ||||
677 | // ...If we walk D, B, A, C, we'll only cache the result of phi | ||||
678 | // optimization for A, B, and D; C will be skipped because it dies here. | ||||
679 | // This arguably isn't the worst thing ever, since: | ||||
680 | // - We generally query things in a top-down order, so if we got below D | ||||
681 | // without needing cache entries for {C, MemLoc}, then chances are | ||||
682 | // that those cache entries would end up ultimately unused. | ||||
683 | // - We still cache things for A, so C only needs to walk up a bit. | ||||
684 | // If this behavior becomes problematic, we can fix without a ton of extra | ||||
685 | // work. | ||||
686 | if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) { | ||||
687 | if (PerformedPhiTranslation) { | ||||
688 | // If visiting this path performed Phi translation, don't continue, | ||||
689 | // since it may not be correct to merge results from two paths if one | ||||
690 | // relies on the phi translation. | ||||
691 | TerminatedPath Term{Node.Last, PathIndex}; | ||||
692 | return Term; | ||||
693 | } | ||||
694 | continue; | ||||
695 | } | ||||
696 | |||||
697 | const MemoryAccess *SkipStopWhere = nullptr; | ||||
698 | if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) { | ||||
699 | assert(isa<MemoryDef>(Query->OriginalAccess))(static_cast <bool> (isa<MemoryDef>(Query->OriginalAccess )) ? void (0) : __assert_fail ("isa<MemoryDef>(Query->OriginalAccess)" , "llvm/lib/Analysis/MemorySSA.cpp", 699, __extension__ __PRETTY_FUNCTION__ )); | ||||
700 | SkipStopWhere = Query->OriginalAccess; | ||||
701 | } | ||||
702 | |||||
703 | UpwardsWalkResult Res = walkToPhiOrClobber(Node, | ||||
704 | /*StopAt=*/StopWhere, | ||||
705 | /*SkipStopAt=*/SkipStopWhere); | ||||
706 | if (Res.IsKnownClobber) { | ||||
707 | assert(Res.Result != StopWhere && Res.Result != SkipStopWhere)(static_cast <bool> (Res.Result != StopWhere && Res.Result != SkipStopWhere) ? void (0) : __assert_fail ("Res.Result != StopWhere && Res.Result != SkipStopWhere" , "llvm/lib/Analysis/MemorySSA.cpp", 707, __extension__ __PRETTY_FUNCTION__ )); | ||||
708 | |||||
709 | // If this wasn't a cache hit, we hit a clobber when walking. That's a | ||||
710 | // failure. | ||||
711 | TerminatedPath Term{Res.Result, PathIndex}; | ||||
712 | if (!MSSA.dominates(Res.Result, StopWhere)) | ||||
713 | return Term; | ||||
714 | |||||
715 | // Otherwise, it's a valid thing to potentially optimize to. | ||||
716 | Terminated.push_back(Term); | ||||
717 | continue; | ||||
718 | } | ||||
719 | |||||
720 | if (Res.Result == StopWhere || Res.Result == SkipStopWhere) { | ||||
721 | // We've hit our target. Save this path off for if we want to continue | ||||
722 | // walking. If we are in the mode of skipping the OriginalAccess, and | ||||
723 | // we've reached back to the OriginalAccess, do not save path, we've | ||||
724 | // just looped back to self. | ||||
725 | if (Res.Result != SkipStopWhere) | ||||
726 | NewPaused.push_back(PathIndex); | ||||
727 | continue; | ||||
728 | } | ||||
729 | |||||
730 | assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Res.Result) && \"liveOnEntry is a clobber\"" , "llvm/lib/Analysis/MemorySSA.cpp", 730, __extension__ __PRETTY_FUNCTION__ )); | ||||
731 | addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); | ||||
732 | } | ||||
733 | |||||
734 | return None; | ||||
735 | } | ||||
736 | |||||
737 | template <typename T, typename Walker> | ||||
738 | struct generic_def_path_iterator | ||||
739 | : public iterator_facade_base<generic_def_path_iterator<T, Walker>, | ||||
740 | std::forward_iterator_tag, T *> { | ||||
741 | generic_def_path_iterator() = default; | ||||
742 | generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} | ||||
743 | |||||
744 | T &operator*() const { return curNode(); } | ||||
745 | |||||
746 | generic_def_path_iterator &operator++() { | ||||
747 | N = curNode().Previous; | ||||
748 | return *this; | ||||
749 | } | ||||
750 | |||||
751 | bool operator==(const generic_def_path_iterator &O) const { | ||||
752 | if (N.hasValue() != O.N.hasValue()) | ||||
753 | return false; | ||||
754 | return !N.hasValue() || *N == *O.N; | ||||
755 | } | ||||
756 | |||||
757 | private: | ||||
758 | T &curNode() const { return W->Paths[*N]; } | ||||
759 | |||||
760 | Walker *W = nullptr; | ||||
761 | Optional<ListIndex> N = None; | ||||
762 | }; | ||||
763 | |||||
764 | using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; | ||||
765 | using const_def_path_iterator = | ||||
766 | generic_def_path_iterator<const DefPath, const ClobberWalker>; | ||||
767 | |||||
768 | iterator_range<def_path_iterator> def_path(ListIndex From) { | ||||
769 | return make_range(def_path_iterator(this, From), def_path_iterator()); | ||||
770 | } | ||||
771 | |||||
772 | iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { | ||||
773 | return make_range(const_def_path_iterator(this, From), | ||||
774 | const_def_path_iterator()); | ||||
775 | } | ||||
776 | |||||
777 | struct OptznResult { | ||||
778 | /// The path that contains our result. | ||||
779 | TerminatedPath PrimaryClobber; | ||||
780 | /// The paths that we can legally cache back from, but that aren't | ||||
781 | /// necessarily the result of the Phi optimization. | ||||
782 | SmallVector<TerminatedPath, 4> OtherClobbers; | ||||
783 | }; | ||||
784 | |||||
785 | ListIndex defPathIndex(const DefPath &N) const { | ||||
786 | // The assert looks nicer if we don't need to do &N | ||||
787 | const DefPath *NP = &N; | ||||
788 | assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&(static_cast <bool> (!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 789, __extension__ __PRETTY_FUNCTION__ )) | ||||
789 | "Out of bounds DefPath!")(static_cast <bool> (!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 789, __extension__ __PRETTY_FUNCTION__ )); | ||||
790 | return NP - &Paths.front(); | ||||
791 | } | ||||
792 | |||||
793 | /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths | ||||
794 | /// that act as legal clobbers. Note that this won't return *all* clobbers. | ||||
795 | /// | ||||
796 | /// Phi optimization algorithm tl;dr: | ||||
797 | /// - Find the earliest def/phi, A, we can optimize to | ||||
798 | /// - Find if all paths from the starting memory access ultimately reach A | ||||
799 | /// - If not, optimization isn't possible. | ||||
800 | /// - Otherwise, walk from A to another clobber or phi, A'. | ||||
801 | /// - If A' is a def, we're done. | ||||
802 | /// - If A' is a phi, try to optimize it. | ||||
803 | /// | ||||
804 | /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path | ||||
805 | /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. | ||||
806 | OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, | ||||
807 | const MemoryLocation &Loc) { | ||||
808 | assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation &&(static_cast <bool> (Paths.empty() && VisitedPhis .empty() && !PerformedPhiTranslation && "Reset the optimization state." ) ? void (0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation && \"Reset the optimization state.\"" , "llvm/lib/Analysis/MemorySSA.cpp", 809, __extension__ __PRETTY_FUNCTION__ )) | ||||
809 | "Reset the optimization state.")(static_cast <bool> (Paths.empty() && VisitedPhis .empty() && !PerformedPhiTranslation && "Reset the optimization state." ) ? void (0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation && \"Reset the optimization state.\"" , "llvm/lib/Analysis/MemorySSA.cpp", 809, __extension__ __PRETTY_FUNCTION__ )); | ||||
810 | |||||
811 | Paths.emplace_back(Loc, Start, Phi, None); | ||||
812 | // Stores how many "valid" optimization nodes we had prior to calling | ||||
813 | // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. | ||||
814 | auto PriorPathsSize = Paths.size(); | ||||
815 | |||||
816 | SmallVector<ListIndex, 16> PausedSearches; | ||||
817 | SmallVector<ListIndex, 8> NewPaused; | ||||
818 | SmallVector<TerminatedPath, 4> TerminatedPaths; | ||||
819 | |||||
820 | addSearches(Phi, PausedSearches, 0); | ||||
821 | |||||
822 | // Moves the TerminatedPath with the "most dominated" Clobber to the end of | ||||
823 | // Paths. | ||||
824 | auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { | ||||
825 | assert(!Paths.empty() && "Need a path to move")(static_cast <bool> (!Paths.empty() && "Need a path to move" ) ? void (0) : __assert_fail ("!Paths.empty() && \"Need a path to move\"" , "llvm/lib/Analysis/MemorySSA.cpp", 825, __extension__ __PRETTY_FUNCTION__ )); | ||||
826 | auto Dom = Paths.begin(); | ||||
827 | for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) | ||||
828 | if (!MSSA.dominates(I->Clobber, Dom->Clobber)) | ||||
829 | Dom = I; | ||||
830 | auto Last = Paths.end() - 1; | ||||
831 | if (Last != Dom) | ||||
832 | std::iter_swap(Last, Dom); | ||||
833 | }; | ||||
834 | |||||
835 | MemoryPhi *Current = Phi; | ||||
836 | while (true) { | ||||
837 | assert(!MSSA.isLiveOnEntryDef(Current) &&(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 838, __extension__ __PRETTY_FUNCTION__ )) | ||||
838 | "liveOnEntry wasn't treated as a clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 838, __extension__ __PRETTY_FUNCTION__ )); | ||||
839 | |||||
840 | const auto *Target = getWalkTarget(Current); | ||||
841 | // If a TerminatedPath doesn't dominate Target, then it wasn't a legal | ||||
842 | // optimization for the prior phi. | ||||
843 | assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target ); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "llvm/lib/Analysis/MemorySSA.cpp", 845, __extension__ __PRETTY_FUNCTION__ )) | ||||
844 | return MSSA.dominates(P.Clobber, Target);(static_cast <bool> (all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target ); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "llvm/lib/Analysis/MemorySSA.cpp", 845, __extension__ __PRETTY_FUNCTION__ )) | ||||
845 | }))(static_cast <bool> (all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target ); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "llvm/lib/Analysis/MemorySSA.cpp", 845, __extension__ __PRETTY_FUNCTION__ )); | ||||
846 | |||||
847 | // FIXME: This is broken, because the Blocker may be reported to be | ||||
848 | // liveOnEntry, and we'll happily wait for that to disappear (read: never) | ||||
849 | // For the moment, this is fine, since we do nothing with blocker info. | ||||
850 | if (Optional<TerminatedPath> Blocker = getBlockingAccess( | ||||
851 | Target, PausedSearches, NewPaused, TerminatedPaths)) { | ||||
852 | |||||
853 | // Find the node we started at. We can't search based on N->Last, since | ||||
854 | // we may have gone around a loop with a different MemoryLocation. | ||||
855 | auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { | ||||
856 | return defPathIndex(N) < PriorPathsSize; | ||||
857 | }); | ||||
858 | assert(Iter != def_path_iterator())(static_cast <bool> (Iter != def_path_iterator()) ? void (0) : __assert_fail ("Iter != def_path_iterator()", "llvm/lib/Analysis/MemorySSA.cpp" , 858, __extension__ __PRETTY_FUNCTION__)); | ||||
859 | |||||
860 | DefPath &CurNode = *Iter; | ||||
861 | assert(CurNode.Last == Current)(static_cast <bool> (CurNode.Last == Current) ? void (0 ) : __assert_fail ("CurNode.Last == Current", "llvm/lib/Analysis/MemorySSA.cpp" , 861, __extension__ __PRETTY_FUNCTION__)); | ||||
862 | |||||
863 | // Two things: | ||||
864 | // A. We can't reliably cache all of NewPaused back. Consider a case | ||||
865 | // where we have two paths in NewPaused; one of which can't optimize | ||||
866 | // above this phi, whereas the other can. If we cache the second path | ||||
867 | // back, we'll end up with suboptimal cache entries. We can handle | ||||
868 | // cases like this a bit better when we either try to find all | ||||
869 | // clobbers that block phi optimization, or when our cache starts | ||||
870 | // supporting unfinished searches. | ||||
871 | // B. We can't reliably cache TerminatedPaths back here without doing | ||||
872 | // extra checks; consider a case like: | ||||
873 | // T | ||||
874 | // / \ | ||||
875 | // D C | ||||
876 | // \ / | ||||
877 | // S | ||||
878 | // Where T is our target, C is a node with a clobber on it, D is a | ||||
879 | // diamond (with a clobber *only* on the left or right node, N), and | ||||
880 | // S is our start. Say we walk to D, through the node opposite N | ||||
881 | // (read: ignoring the clobber), and see a cache entry in the top | ||||
882 | // node of D. That cache entry gets put into TerminatedPaths. We then | ||||
883 | // walk up to C (N is later in our worklist), find the clobber, and | ||||
884 | // quit. If we append TerminatedPaths to OtherClobbers, we'll cache | ||||
885 | // the bottom part of D to the cached clobber, ignoring the clobber | ||||
886 | // in N. Again, this problem goes away if we start tracking all | ||||
887 | // blockers for a given phi optimization. | ||||
888 | TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; | ||||
889 | return {Result, {}}; | ||||
890 | } | ||||
891 | |||||
892 | // If there's nothing left to search, then all paths led to valid clobbers | ||||
893 | // that we got from our cache; pick the nearest to the start, and allow | ||||
894 | // the rest to be cached back. | ||||
895 | if (NewPaused.empty()) { | ||||
896 | MoveDominatedPathToEnd(TerminatedPaths); | ||||
897 | TerminatedPath Result = TerminatedPaths.pop_back_val(); | ||||
898 | return {Result, std::move(TerminatedPaths)}; | ||||
899 | } | ||||
900 | |||||
901 | MemoryAccess *DefChainEnd = nullptr; | ||||
902 | SmallVector<TerminatedPath, 4> Clobbers; | ||||
903 | for (ListIndex Paused : NewPaused) { | ||||
904 | UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); | ||||
905 | if (WR.IsKnownClobber) | ||||
906 | Clobbers.push_back({WR.Result, Paused}); | ||||
907 | else | ||||
908 | // Micro-opt: If we hit the end of the chain, save it. | ||||
909 | DefChainEnd = WR.Result; | ||||
910 | } | ||||
911 | |||||
912 | if (!TerminatedPaths.empty()) { | ||||
913 | // If we couldn't find the dominating phi/liveOnEntry in the above loop, | ||||
914 | // do it now. | ||||
915 | if (!DefChainEnd) | ||||
916 | for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) | ||||
917 | DefChainEnd = MA; | ||||
918 | assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry")(static_cast <bool> (DefChainEnd && "Failed to find dominating phi/liveOnEntry" ) ? void (0) : __assert_fail ("DefChainEnd && \"Failed to find dominating phi/liveOnEntry\"" , "llvm/lib/Analysis/MemorySSA.cpp", 918, __extension__ __PRETTY_FUNCTION__ )); | ||||
919 | |||||
920 | // If any of the terminated paths don't dominate the phi we'll try to | ||||
921 | // optimize, we need to figure out what they are and quit. | ||||
922 | const BasicBlock *ChainBB = DefChainEnd->getBlock(); | ||||
923 | for (const TerminatedPath &TP : TerminatedPaths) { | ||||
924 | // Because we know that DefChainEnd is as "high" as we can go, we | ||||
925 | // don't need local dominance checks; BB dominance is sufficient. | ||||
926 | if (DT.dominates(ChainBB, TP.Clobber->getBlock())) | ||||
927 | Clobbers.push_back(TP); | ||||
928 | } | ||||
929 | } | ||||
930 | |||||
931 | // If we have clobbers in the def chain, find the one closest to Current | ||||
932 | // and quit. | ||||
933 | if (!Clobbers.empty()) { | ||||
934 | MoveDominatedPathToEnd(Clobbers); | ||||
935 | TerminatedPath Result = Clobbers.pop_back_val(); | ||||
936 | return {Result, std::move(Clobbers)}; | ||||
937 | } | ||||
938 | |||||
939 | assert(all_of(NewPaused,(static_cast <bool> (all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "llvm/lib/Analysis/MemorySSA.cpp", 940, __extension__ __PRETTY_FUNCTION__ )) | ||||
940 | [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }))(static_cast <bool> (all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "llvm/lib/Analysis/MemorySSA.cpp", 940, __extension__ __PRETTY_FUNCTION__ )); | ||||
941 | |||||
942 | // Because liveOnEntry is a clobber, this must be a phi. | ||||
943 | auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); | ||||
944 | |||||
945 | PriorPathsSize = Paths.size(); | ||||
946 | PausedSearches.clear(); | ||||
947 | for (ListIndex I : NewPaused) | ||||
948 | addSearches(DefChainPhi, PausedSearches, I); | ||||
949 | NewPaused.clear(); | ||||
950 | |||||
951 | Current = DefChainPhi; | ||||
952 | } | ||||
953 | } | ||||
954 | |||||
955 | void verifyOptResult(const OptznResult &R) const { | ||||
956 | assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R. PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "llvm/lib/Analysis/MemorySSA.cpp", 958, __extension__ __PRETTY_FUNCTION__ )) | ||||
957 | return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);(static_cast <bool> (all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R. PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "llvm/lib/Analysis/MemorySSA.cpp", 958, __extension__ __PRETTY_FUNCTION__ )) | ||||
958 | }))(static_cast <bool> (all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R. PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "llvm/lib/Analysis/MemorySSA.cpp", 958, __extension__ __PRETTY_FUNCTION__ )); | ||||
959 | } | ||||
960 | |||||
961 | void resetPhiOptznState() { | ||||
962 | Paths.clear(); | ||||
963 | VisitedPhis.clear(); | ||||
964 | PerformedPhiTranslation = false; | ||||
965 | } | ||||
966 | |||||
967 | public: | ||||
968 | ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT) | ||||
969 | : MSSA(MSSA), AA(AA), DT(DT) {} | ||||
970 | |||||
971 | AliasAnalysisType *getAA() { return &AA; } | ||||
972 | /// Finds the nearest clobber for the given query, optimizing phis if | ||||
973 | /// possible. | ||||
974 | MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q, | ||||
975 | unsigned &UpWalkLimit) { | ||||
976 | Query = &Q; | ||||
977 | UpwardWalkLimit = &UpWalkLimit; | ||||
978 | // Starting limit must be > 0. | ||||
979 | if (!UpWalkLimit) | ||||
980 | UpWalkLimit++; | ||||
981 | |||||
982 | MemoryAccess *Current = Start; | ||||
983 | // This walker pretends uses don't exist. If we're handed one, silently grab | ||||
984 | // its def. (This has the nice side-effect of ensuring we never cache uses) | ||||
985 | if (auto *MU = dyn_cast<MemoryUse>(Start)) | ||||
986 | Current = MU->getDefiningAccess(); | ||||
987 | |||||
988 | DefPath FirstDesc(Q.StartingLoc, Current, Current, None); | ||||
989 | // Fast path for the overly-common case (no crazy phi optimization | ||||
990 | // necessary) | ||||
991 | UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); | ||||
992 | MemoryAccess *Result; | ||||
993 | if (WalkResult.IsKnownClobber) { | ||||
994 | Result = WalkResult.Result; | ||||
995 | Q.AR = WalkResult.AR; | ||||
996 | } else { | ||||
997 | OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), | ||||
998 | Current, Q.StartingLoc); | ||||
999 | verifyOptResult(OptRes); | ||||
1000 | resetPhiOptznState(); | ||||
1001 | Result = OptRes.PrimaryClobber.Clobber; | ||||
1002 | } | ||||
1003 | |||||
1004 | #ifdef EXPENSIVE_CHECKS | ||||
1005 | if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0) | ||||
1006 | checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); | ||||
1007 | #endif | ||||
1008 | return Result; | ||||
1009 | } | ||||
1010 | }; | ||||
1011 | |||||
1012 | struct RenamePassData { | ||||
1013 | DomTreeNode *DTN; | ||||
1014 | DomTreeNode::const_iterator ChildIt; | ||||
1015 | MemoryAccess *IncomingVal; | ||||
1016 | |||||
1017 | RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, | ||||
1018 | MemoryAccess *M) | ||||
1019 | : DTN(D), ChildIt(It), IncomingVal(M) {} | ||||
1020 | |||||
1021 | void swap(RenamePassData &RHS) { | ||||
1022 | std::swap(DTN, RHS.DTN); | ||||
1023 | std::swap(ChildIt, RHS.ChildIt); | ||||
1024 | std::swap(IncomingVal, RHS.IncomingVal); | ||||
1025 | } | ||||
1026 | }; | ||||
1027 | |||||
1028 | } // end anonymous namespace | ||||
1029 | |||||
1030 | namespace llvm { | ||||
1031 | |||||
1032 | template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase { | ||||
1033 | ClobberWalker<AliasAnalysisType> Walker; | ||||
1034 | MemorySSA *MSSA; | ||||
1035 | |||||
1036 | public: | ||||
1037 | ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D) | ||||
1038 | : Walker(*M, *A, *D), MSSA(M) {} | ||||
1039 | |||||
1040 | MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, | ||||
1041 | const MemoryLocation &, | ||||
1042 | unsigned &); | ||||
1043 | // Third argument (bool), defines whether the clobber search should skip the | ||||
1044 | // original queried access. If true, there will be a follow-up query searching | ||||
1045 | // for a clobber access past "self". Note that the Optimized access is not | ||||
1046 | // updated if a new clobber is found by this SkipSelf search. If this | ||||
1047 | // additional query becomes heavily used we may decide to cache the result. | ||||
1048 | // Walker instantiations will decide how to set the SkipSelf bool. | ||||
1049 | MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool, | ||||
1050 | bool UseInvariantGroup = true); | ||||
1051 | }; | ||||
1052 | |||||
1053 | /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no | ||||
1054 | /// longer does caching on its own, but the name has been retained for the | ||||
1055 | /// moment. | ||||
1056 | template <class AliasAnalysisType> | ||||
1057 | class MemorySSA::CachingWalker final : public MemorySSAWalker { | ||||
1058 | ClobberWalkerBase<AliasAnalysisType> *Walker; | ||||
1059 | |||||
1060 | public: | ||||
1061 | CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) | ||||
1062 | : MemorySSAWalker(M), Walker(W) {} | ||||
1063 | ~CachingWalker() override = default; | ||||
1064 | |||||
1065 | using MemorySSAWalker::getClobberingMemoryAccess; | ||||
1066 | |||||
1067 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { | ||||
1068 | return Walker->getClobberingMemoryAccessBase(MA, UWL, false); | ||||
1069 | } | ||||
1070 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1071 | const MemoryLocation &Loc, | ||||
1072 | unsigned &UWL) { | ||||
1073 | return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); | ||||
1074 | } | ||||
1075 | // This method is not accessible outside of this file. | ||||
1076 | MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA, | ||||
1077 | unsigned &UWL) { | ||||
1078 | return Walker->getClobberingMemoryAccessBase(MA, UWL, false, false); | ||||
1079 | } | ||||
1080 | |||||
1081 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { | ||||
1082 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1083 | return getClobberingMemoryAccess(MA, UpwardWalkLimit); | ||||
1084 | } | ||||
1085 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1086 | const MemoryLocation &Loc) override { | ||||
1087 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1088 | return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); | ||||
1089 | } | ||||
1090 | |||||
1091 | void invalidateInfo(MemoryAccess *MA) override { | ||||
1092 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1093 | MUD->resetOptimized(); | ||||
1094 | } | ||||
1095 | }; | ||||
1096 | |||||
1097 | template <class AliasAnalysisType> | ||||
1098 | class MemorySSA::SkipSelfWalker final : public MemorySSAWalker { | ||||
1099 | ClobberWalkerBase<AliasAnalysisType> *Walker; | ||||
1100 | |||||
1101 | public: | ||||
1102 | SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) | ||||
1103 | : MemorySSAWalker(M), Walker(W) {} | ||||
1104 | ~SkipSelfWalker() override = default; | ||||
1105 | |||||
1106 | using MemorySSAWalker::getClobberingMemoryAccess; | ||||
1107 | |||||
1108 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { | ||||
1109 | return Walker->getClobberingMemoryAccessBase(MA, UWL, true); | ||||
1110 | } | ||||
1111 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1112 | const MemoryLocation &Loc, | ||||
1113 | unsigned &UWL) { | ||||
1114 | return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); | ||||
1115 | } | ||||
1116 | |||||
1117 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { | ||||
1118 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1119 | return getClobberingMemoryAccess(MA, UpwardWalkLimit); | ||||
1120 | } | ||||
1121 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1122 | const MemoryLocation &Loc) override { | ||||
1123 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1124 | return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); | ||||
1125 | } | ||||
1126 | |||||
1127 | void invalidateInfo(MemoryAccess *MA) override { | ||||
1128 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1129 | MUD->resetOptimized(); | ||||
1130 | } | ||||
1131 | }; | ||||
1132 | |||||
1133 | } // end namespace llvm | ||||
1134 | |||||
1135 | void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, | ||||
1136 | bool RenameAllUses) { | ||||
1137 | // Pass through values to our successors | ||||
1138 | for (const BasicBlock *S : successors(BB)) { | ||||
1139 | auto It = PerBlockAccesses.find(S); | ||||
1140 | // Rename the phi nodes in our successor block | ||||
1141 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | ||||
1142 | continue; | ||||
1143 | AccessList *Accesses = It->second.get(); | ||||
1144 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | ||||
1145 | if (RenameAllUses) { | ||||
1146 | bool ReplacementDone = false; | ||||
1147 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) | ||||
1148 | if (Phi->getIncomingBlock(I) == BB) { | ||||
1149 | Phi->setIncomingValue(I, IncomingVal); | ||||
1150 | ReplacementDone = true; | ||||
1151 | } | ||||
1152 | (void) ReplacementDone; | ||||
1153 | assert(ReplacementDone && "Incomplete phi during partial rename")(static_cast <bool> (ReplacementDone && "Incomplete phi during partial rename" ) ? void (0) : __assert_fail ("ReplacementDone && \"Incomplete phi during partial rename\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1153, __extension__ __PRETTY_FUNCTION__ )); | ||||
1154 | } else | ||||
1155 | Phi->addIncoming(IncomingVal, BB); | ||||
1156 | } | ||||
1157 | } | ||||
1158 | |||||
1159 | /// Rename a single basic block into MemorySSA form. | ||||
1160 | /// Uses the standard SSA renaming algorithm. | ||||
1161 | /// \returns The new incoming value. | ||||
1162 | MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, | ||||
1163 | bool RenameAllUses) { | ||||
1164 | auto It = PerBlockAccesses.find(BB); | ||||
1165 | // Skip most processing if the list is empty. | ||||
1166 | if (It != PerBlockAccesses.end()) { | ||||
1167 | AccessList *Accesses = It->second.get(); | ||||
1168 | for (MemoryAccess &L : *Accesses) { | ||||
1169 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { | ||||
1170 | if (MUD->getDefiningAccess() == nullptr || RenameAllUses) | ||||
1171 | MUD->setDefiningAccess(IncomingVal); | ||||
1172 | if (isa<MemoryDef>(&L)) | ||||
1173 | IncomingVal = &L; | ||||
1174 | } else { | ||||
1175 | IncomingVal = &L; | ||||
1176 | } | ||||
1177 | } | ||||
1178 | } | ||||
1179 | return IncomingVal; | ||||
1180 | } | ||||
1181 | |||||
1182 | /// This is the standard SSA renaming algorithm. | ||||
1183 | /// | ||||
1184 | /// We walk the dominator tree in preorder, renaming accesses, and then filling | ||||
1185 | /// in phi nodes in our successors. | ||||
1186 | void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, | ||||
1187 | SmallPtrSetImpl<BasicBlock *> &Visited, | ||||
1188 | bool SkipVisited, bool RenameAllUses) { | ||||
1189 | assert(Root && "Trying to rename accesses in an unreachable block")(static_cast <bool> (Root && "Trying to rename accesses in an unreachable block" ) ? void (0) : __assert_fail ("Root && \"Trying to rename accesses in an unreachable block\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1189, __extension__ __PRETTY_FUNCTION__ )); | ||||
1190 | |||||
1191 | SmallVector<RenamePassData, 32> WorkStack; | ||||
1192 | // Skip everything if we already renamed this block and we are skipping. | ||||
1193 | // Note: You can't sink this into the if, because we need it to occur | ||||
1194 | // regardless of whether we skip blocks or not. | ||||
1195 | bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; | ||||
1196 | if (SkipVisited && AlreadyVisited) | ||||
1197 | return; | ||||
1198 | |||||
1199 | IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); | ||||
1200 | renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); | ||||
1201 | WorkStack.push_back({Root, Root->begin(), IncomingVal}); | ||||
1202 | |||||
1203 | while (!WorkStack.empty()) { | ||||
1204 | DomTreeNode *Node = WorkStack.back().DTN; | ||||
1205 | DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; | ||||
1206 | IncomingVal = WorkStack.back().IncomingVal; | ||||
1207 | |||||
1208 | if (ChildIt == Node->end()) { | ||||
1209 | WorkStack.pop_back(); | ||||
1210 | } else { | ||||
1211 | DomTreeNode *Child = *ChildIt; | ||||
1212 | ++WorkStack.back().ChildIt; | ||||
1213 | BasicBlock *BB = Child->getBlock(); | ||||
1214 | // Note: You can't sink this into the if, because we need it to occur | ||||
1215 | // regardless of whether we skip blocks or not. | ||||
1216 | AlreadyVisited = !Visited.insert(BB).second; | ||||
1217 | if (SkipVisited && AlreadyVisited) { | ||||
1218 | // We already visited this during our renaming, which can happen when | ||||
1219 | // being asked to rename multiple blocks. Figure out the incoming val, | ||||
1220 | // which is the last def. | ||||
1221 | // Incoming value can only change if there is a block def, and in that | ||||
1222 | // case, it's the last block def in the list. | ||||
1223 | if (auto *BlockDefs = getWritableBlockDefs(BB)) | ||||
1224 | IncomingVal = &*BlockDefs->rbegin(); | ||||
1225 | } else | ||||
1226 | IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); | ||||
1227 | renameSuccessorPhis(BB, IncomingVal, RenameAllUses); | ||||
1228 | WorkStack.push_back({Child, Child->begin(), IncomingVal}); | ||||
1229 | } | ||||
1230 | } | ||||
1231 | } | ||||
1232 | |||||
1233 | /// This handles unreachable block accesses by deleting phi nodes in | ||||
1234 | /// unreachable blocks, and marking all other unreachable MemoryAccess's as | ||||
1235 | /// being uses of the live on entry definition. | ||||
1236 | void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { | ||||
1237 | assert(!DT->isReachableFromEntry(BB) &&(static_cast <bool> (!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks") ? void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1238, __extension__ __PRETTY_FUNCTION__ )) | ||||
1238 | "Reachable block found while handling unreachable blocks")(static_cast <bool> (!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks") ? void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1238, __extension__ __PRETTY_FUNCTION__ )); | ||||
1239 | |||||
1240 | // Make sure phi nodes in our reachable successors end up with a | ||||
1241 | // LiveOnEntryDef for our incoming edge, even though our block is forward | ||||
1242 | // unreachable. We could just disconnect these blocks from the CFG fully, | ||||
1243 | // but we do not right now. | ||||
1244 | for (const BasicBlock *S : successors(BB)) { | ||||
1245 | if (!DT->isReachableFromEntry(S)) | ||||
1246 | continue; | ||||
1247 | auto It = PerBlockAccesses.find(S); | ||||
1248 | // Rename the phi nodes in our successor block | ||||
1249 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | ||||
1250 | continue; | ||||
1251 | AccessList *Accesses = It->second.get(); | ||||
1252 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | ||||
1253 | Phi->addIncoming(LiveOnEntryDef.get(), BB); | ||||
1254 | } | ||||
1255 | |||||
1256 | auto It = PerBlockAccesses.find(BB); | ||||
1257 | if (It == PerBlockAccesses.end()) | ||||
1258 | return; | ||||
1259 | |||||
1260 | auto &Accesses = It->second; | ||||
1261 | for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { | ||||
1262 | auto Next = std::next(AI); | ||||
1263 | // If we have a phi, just remove it. We are going to replace all | ||||
1264 | // users with live on entry. | ||||
1265 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) | ||||
1266 | UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); | ||||
1267 | else | ||||
1268 | Accesses->erase(AI); | ||||
1269 | AI = Next; | ||||
1270 | } | ||||
1271 | } | ||||
1272 | |||||
1273 | MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) | ||||
1274 | : DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), | ||||
1275 | SkipWalker(nullptr) { | ||||
1276 | // Build MemorySSA using a batch alias analysis. This reuses the internal | ||||
1277 | // state that AA collects during an alias()/getModRefInfo() call. This is | ||||
1278 | // safe because there are no CFG changes while building MemorySSA and can | ||||
1279 | // significantly reduce the time spent by the compiler in AA, because we will | ||||
1280 | // make queries about all the instructions in the Function. | ||||
1281 | assert(AA && "No alias analysis?")(static_cast <bool> (AA && "No alias analysis?" ) ? void (0) : __assert_fail ("AA && \"No alias analysis?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1281, __extension__ __PRETTY_FUNCTION__ )); | ||||
1282 | BatchAAResults BatchAA(*AA); | ||||
1283 | buildMemorySSA(BatchAA); | ||||
1284 | // Intentionally leave AA to nullptr while building so we don't accidently | ||||
1285 | // use non-batch AliasAnalysis. | ||||
1286 | this->AA = AA; | ||||
1287 | // Also create the walker here. | ||||
1288 | getWalker(); | ||||
1289 | } | ||||
1290 | |||||
1291 | MemorySSA::~MemorySSA() { | ||||
1292 | // Drop all our references | ||||
1293 | for (const auto &Pair : PerBlockAccesses) | ||||
1294 | for (MemoryAccess &MA : *Pair.second) | ||||
1295 | MA.dropAllReferences(); | ||||
1296 | } | ||||
1297 | |||||
1298 | MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { | ||||
1299 | auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); | ||||
1300 | |||||
1301 | if (Res.second) | ||||
1302 | Res.first->second = std::make_unique<AccessList>(); | ||||
1303 | return Res.first->second.get(); | ||||
1304 | } | ||||
1305 | |||||
1306 | MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { | ||||
1307 | auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); | ||||
1308 | |||||
1309 | if (Res.second) | ||||
1310 | Res.first->second = std::make_unique<DefsList>(); | ||||
1311 | return Res.first->second.get(); | ||||
1312 | } | ||||
1313 | |||||
1314 | namespace llvm { | ||||
1315 | |||||
1316 | /// This class is a batch walker of all MemoryUse's in the program, and points | ||||
1317 | /// their defining access at the thing that actually clobbers them. Because it | ||||
1318 | /// is a batch walker that touches everything, it does not operate like the | ||||
1319 | /// other walkers. This walker is basically performing a top-down SSA renaming | ||||
1320 | /// pass, where the version stack is used as the cache. This enables it to be | ||||
1321 | /// significantly more time and memory efficient than using the regular walker, | ||||
1322 | /// which is walking bottom-up. | ||||
1323 | class MemorySSA::OptimizeUses { | ||||
1324 | public: | ||||
1325 | OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker, | ||||
1326 | BatchAAResults *BAA, DominatorTree *DT) | ||||
1327 | : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {} | ||||
1328 | |||||
1329 | void optimizeUses(); | ||||
1330 | |||||
1331 | private: | ||||
1332 | /// This represents where a given memorylocation is in the stack. | ||||
1333 | struct MemlocStackInfo { | ||||
1334 | // This essentially is keeping track of versions of the stack. Whenever | ||||
1335 | // the stack changes due to pushes or pops, these versions increase. | ||||
1336 | unsigned long StackEpoch; | ||||
1337 | unsigned long PopEpoch; | ||||
1338 | // This is the lower bound of places on the stack to check. It is equal to | ||||
1339 | // the place the last stack walk ended. | ||||
1340 | // Note: Correctness depends on this being initialized to 0, which densemap | ||||
1341 | // does | ||||
1342 | unsigned long LowerBound; | ||||
1343 | const BasicBlock *LowerBoundBlock; | ||||
1344 | // This is where the last walk for this memory location ended. | ||||
1345 | unsigned long LastKill; | ||||
1346 | bool LastKillValid; | ||||
1347 | Optional<AliasResult> AR; | ||||
1348 | }; | ||||
1349 | |||||
1350 | void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, | ||||
1351 | SmallVectorImpl<MemoryAccess *> &, | ||||
1352 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &); | ||||
1353 | |||||
1354 | MemorySSA *MSSA; | ||||
1355 | CachingWalker<BatchAAResults> *Walker; | ||||
1356 | BatchAAResults *AA; | ||||
1357 | DominatorTree *DT; | ||||
1358 | }; | ||||
1359 | |||||
1360 | } // end namespace llvm | ||||
1361 | |||||
1362 | /// Optimize the uses in a given block This is basically the SSA renaming | ||||
1363 | /// algorithm, with one caveat: We are able to use a single stack for all | ||||
1364 | /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is | ||||
1365 | /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just | ||||
1366 | /// going to be some position in that stack of possible ones. | ||||
1367 | /// | ||||
1368 | /// We track the stack positions that each MemoryLocation needs | ||||
1369 | /// to check, and last ended at. This is because we only want to check the | ||||
1370 | /// things that changed since last time. The same MemoryLocation should | ||||
1371 | /// get clobbered by the same store (getModRefInfo does not use invariantness or | ||||
1372 | /// things like this, and if they start, we can modify MemoryLocOrCall to | ||||
1373 | /// include relevant data) | ||||
1374 | void MemorySSA::OptimizeUses::optimizeUsesInBlock( | ||||
1375 | const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, | ||||
1376 | SmallVectorImpl<MemoryAccess *> &VersionStack, | ||||
1377 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { | ||||
1378 | |||||
1379 | /// If no accesses, nothing to do. | ||||
1380 | MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); | ||||
1381 | if (Accesses == nullptr) | ||||
1382 | return; | ||||
1383 | |||||
1384 | // Pop everything that doesn't dominate the current block off the stack, | ||||
1385 | // increment the PopEpoch to account for this. | ||||
1386 | while (true) { | ||||
1387 | assert((static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1389, __extension__ __PRETTY_FUNCTION__ )) | ||||
1388 | !VersionStack.empty() &&(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1389, __extension__ __PRETTY_FUNCTION__ )) | ||||
1389 | "Version stack should have liveOnEntry sentinel dominating everything")(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1389, __extension__ __PRETTY_FUNCTION__ )); | ||||
1390 | BasicBlock *BackBlock = VersionStack.back()->getBlock(); | ||||
1391 | if (DT->dominates(BackBlock, BB)) | ||||
1392 | break; | ||||
1393 | while (VersionStack.back()->getBlock() == BackBlock) | ||||
1394 | VersionStack.pop_back(); | ||||
1395 | ++PopEpoch; | ||||
1396 | } | ||||
1397 | |||||
1398 | for (MemoryAccess &MA : *Accesses) { | ||||
1399 | auto *MU = dyn_cast<MemoryUse>(&MA); | ||||
1400 | if (!MU) { | ||||
1401 | VersionStack.push_back(&MA); | ||||
1402 | ++StackEpoch; | ||||
1403 | continue; | ||||
1404 | } | ||||
1405 | |||||
1406 | if (MU->isOptimized()) | ||||
1407 | continue; | ||||
1408 | |||||
1409 | if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { | ||||
1410 | MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None); | ||||
1411 | continue; | ||||
1412 | } | ||||
1413 | |||||
1414 | MemoryLocOrCall UseMLOC(MU); | ||||
1415 | auto &LocInfo = LocStackInfo[UseMLOC]; | ||||
1416 | // If the pop epoch changed, it means we've removed stuff from top of | ||||
1417 | // stack due to changing blocks. We may have to reset the lower bound or | ||||
1418 | // last kill info. | ||||
1419 | if (LocInfo.PopEpoch != PopEpoch) { | ||||
1420 | LocInfo.PopEpoch = PopEpoch; | ||||
1421 | LocInfo.StackEpoch = StackEpoch; | ||||
1422 | // If the lower bound was in something that no longer dominates us, we | ||||
1423 | // have to reset it. | ||||
1424 | // We can't simply track stack size, because the stack may have had | ||||
1425 | // pushes/pops in the meantime. | ||||
1426 | // XXX: This is non-optimal, but only is slower cases with heavily | ||||
1427 | // branching dominator trees. To get the optimal number of queries would | ||||
1428 | // be to make lowerbound and lastkill a per-loc stack, and pop it until | ||||
1429 | // the top of that stack dominates us. This does not seem worth it ATM. | ||||
1430 | // A much cheaper optimization would be to always explore the deepest | ||||
1431 | // branch of the dominator tree first. This will guarantee this resets on | ||||
1432 | // the smallest set of blocks. | ||||
1433 | if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && | ||||
1434 | !DT->dominates(LocInfo.LowerBoundBlock, BB)) { | ||||
1435 | // Reset the lower bound of things to check. | ||||
1436 | // TODO: Some day we should be able to reset to last kill, rather than | ||||
1437 | // 0. | ||||
1438 | LocInfo.LowerBound = 0; | ||||
1439 | LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); | ||||
1440 | LocInfo.LastKillValid = false; | ||||
1441 | } | ||||
1442 | } else if (LocInfo.StackEpoch != StackEpoch) { | ||||
1443 | // If all that has changed is the StackEpoch, we only have to check the | ||||
1444 | // new things on the stack, because we've checked everything before. In | ||||
1445 | // this case, the lower bound of things to check remains the same. | ||||
1446 | LocInfo.PopEpoch = PopEpoch; | ||||
1447 | LocInfo.StackEpoch = StackEpoch; | ||||
1448 | } | ||||
1449 | if (!LocInfo.LastKillValid) { | ||||
1450 | LocInfo.LastKill = VersionStack.size() - 1; | ||||
1451 | LocInfo.LastKillValid = true; | ||||
1452 | LocInfo.AR = AliasResult::MayAlias; | ||||
1453 | } | ||||
1454 | |||||
1455 | // At this point, we should have corrected last kill and LowerBound to be | ||||
1456 | // in bounds. | ||||
1457 | assert(LocInfo.LowerBound < VersionStack.size() &&(static_cast <bool> (LocInfo.LowerBound < VersionStack .size() && "Lower bound out of range") ? void (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1458, __extension__ __PRETTY_FUNCTION__ )) | ||||
1458 | "Lower bound out of range")(static_cast <bool> (LocInfo.LowerBound < VersionStack .size() && "Lower bound out of range") ? void (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1458, __extension__ __PRETTY_FUNCTION__ )); | ||||
1459 | assert(LocInfo.LastKill < VersionStack.size() &&(static_cast <bool> (LocInfo.LastKill < VersionStack .size() && "Last kill info out of range") ? void (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1460, __extension__ __PRETTY_FUNCTION__ )) | ||||
1460 | "Last kill info out of range")(static_cast <bool> (LocInfo.LastKill < VersionStack .size() && "Last kill info out of range") ? void (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1460, __extension__ __PRETTY_FUNCTION__ )); | ||||
1461 | // In any case, the new upper bound is the top of the stack. | ||||
1462 | unsigned long UpperBound = VersionStack.size() - 1; | ||||
1463 | |||||
1464 | if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { | ||||
1465 | LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1466 | << *(MU->getMemoryInst()) << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1467 | << " because there are "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1468 | << UpperBound - LocInfo.LowerBounddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1469 | << " stores to disambiguate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false); | ||||
1470 | // Because we did not walk, LastKill is no longer valid, as this may | ||||
1471 | // have been a kill. | ||||
1472 | LocInfo.LastKillValid = false; | ||||
1473 | continue; | ||||
1474 | } | ||||
1475 | bool FoundClobberResult = false; | ||||
1476 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1477 | while (UpperBound > LocInfo.LowerBound) { | ||||
1478 | if (isa<MemoryPhi>(VersionStack[UpperBound])) { | ||||
1479 | // For phis, use the walker, see where we ended up, go there. | ||||
1480 | // The invariant.group handling in MemorySSA is ad-hoc and doesn't | ||||
1481 | // support updates, so don't use it to optimize uses. | ||||
1482 | MemoryAccess *Result = | ||||
1483 | Walker->getClobberingMemoryAccessWithoutInvariantGroup( | ||||
1484 | MU, UpwardWalkLimit); | ||||
1485 | // We are guaranteed to find it or something is wrong. | ||||
1486 | while (VersionStack[UpperBound] != Result) { | ||||
1487 | assert(UpperBound != 0)(static_cast <bool> (UpperBound != 0) ? void (0) : __assert_fail ("UpperBound != 0", "llvm/lib/Analysis/MemorySSA.cpp", 1487, __extension__ __PRETTY_FUNCTION__)); | ||||
1488 | --UpperBound; | ||||
1489 | } | ||||
1490 | FoundClobberResult = true; | ||||
1491 | break; | ||||
1492 | } | ||||
1493 | |||||
1494 | MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); | ||||
1495 | ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA); | ||||
1496 | if (CA.IsClobber) { | ||||
1497 | FoundClobberResult = true; | ||||
1498 | LocInfo.AR = CA.AR; | ||||
1499 | break; | ||||
1500 | } | ||||
1501 | --UpperBound; | ||||
1502 | } | ||||
1503 | |||||
1504 | // Note: Phis always have AliasResult AR set to MayAlias ATM. | ||||
1505 | |||||
1506 | // At the end of this loop, UpperBound is either a clobber, or lower bound | ||||
1507 | // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. | ||||
1508 | if (FoundClobberResult || UpperBound < LocInfo.LastKill) { | ||||
1509 | // We were last killed now by where we got to | ||||
1510 | if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound])) | ||||
1511 | LocInfo.AR = None; | ||||
1512 | MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR); | ||||
1513 | LocInfo.LastKill = UpperBound; | ||||
1514 | } else { | ||||
1515 | // Otherwise, we checked all the new ones, and now we know we can get to | ||||
1516 | // LastKill. | ||||
1517 | MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR); | ||||
1518 | } | ||||
1519 | LocInfo.LowerBound = VersionStack.size() - 1; | ||||
1520 | LocInfo.LowerBoundBlock = BB; | ||||
1521 | } | ||||
1522 | } | ||||
1523 | |||||
1524 | /// Optimize uses to point to their actual clobbering definitions. | ||||
1525 | void MemorySSA::OptimizeUses::optimizeUses() { | ||||
1526 | SmallVector<MemoryAccess *, 16> VersionStack; | ||||
1527 | DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; | ||||
1528 | VersionStack.push_back(MSSA->getLiveOnEntryDef()); | ||||
1529 | |||||
1530 | unsigned long StackEpoch = 1; | ||||
1531 | unsigned long PopEpoch = 1; | ||||
1532 | // We perform a non-recursive top-down dominator tree walk. | ||||
1533 | for (const auto *DomNode : depth_first(DT->getRootNode())) | ||||
1534 | optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, | ||||
1535 | LocStackInfo); | ||||
1536 | } | ||||
1537 | |||||
1538 | void MemorySSA::placePHINodes( | ||||
1539 | const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) { | ||||
1540 | // Determine where our MemoryPhi's should go | ||||
1541 | ForwardIDFCalculator IDFs(*DT); | ||||
1542 | IDFs.setDefiningBlocks(DefiningBlocks); | ||||
1543 | SmallVector<BasicBlock *, 32> IDFBlocks; | ||||
1544 | IDFs.calculate(IDFBlocks); | ||||
1545 | |||||
1546 | // Now place MemoryPhi nodes. | ||||
1547 | for (auto &BB : IDFBlocks) | ||||
1548 | createMemoryPhi(BB); | ||||
1549 | } | ||||
1550 | |||||
1551 | void MemorySSA::buildMemorySSA(BatchAAResults &BAA) { | ||||
1552 | // We create an access to represent "live on entry", for things like | ||||
1553 | // arguments or users of globals, where the memory they use is defined before | ||||
1554 | // the beginning of the function. We do not actually insert it into the IR. | ||||
1555 | // We do not define a live on exit for the immediate uses, and thus our | ||||
1556 | // semantics do *not* imply that something with no immediate uses can simply | ||||
1557 | // be removed. | ||||
1558 | BasicBlock &StartingPoint = F.getEntryBlock(); | ||||
1559 | LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr, | ||||
1560 | &StartingPoint, NextID++)); | ||||
1561 | |||||
1562 | // We maintain lists of memory accesses per-block, trading memory for time. We | ||||
1563 | // could just look up the memory access for every possible instruction in the | ||||
1564 | // stream. | ||||
1565 | SmallPtrSet<BasicBlock *, 32> DefiningBlocks; | ||||
1566 | // Go through each block, figure out where defs occur, and chain together all | ||||
1567 | // the accesses. | ||||
1568 | for (BasicBlock &B : F) { | ||||
1569 | bool InsertIntoDef = false; | ||||
1570 | AccessList *Accesses = nullptr; | ||||
1571 | DefsList *Defs = nullptr; | ||||
1572 | for (Instruction &I : B) { | ||||
1573 | MemoryUseOrDef *MUD = createNewAccess(&I, &BAA); | ||||
1574 | if (!MUD) | ||||
1575 | continue; | ||||
1576 | |||||
1577 | if (!Accesses) | ||||
1578 | Accesses = getOrCreateAccessList(&B); | ||||
1579 | Accesses->push_back(MUD); | ||||
1580 | if (isa<MemoryDef>(MUD)) { | ||||
1581 | InsertIntoDef = true; | ||||
1582 | if (!Defs) | ||||
1583 | Defs = getOrCreateDefsList(&B); | ||||
1584 | Defs->push_back(*MUD); | ||||
1585 | } | ||||
1586 | } | ||||
1587 | if (InsertIntoDef) | ||||
1588 | DefiningBlocks.insert(&B); | ||||
1589 | } | ||||
1590 | placePHINodes(DefiningBlocks); | ||||
1591 | |||||
1592 | // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get | ||||
1593 | // filled in with all blocks. | ||||
1594 | SmallPtrSet<BasicBlock *, 16> Visited; | ||||
1595 | renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); | ||||
1596 | |||||
1597 | // Mark the uses in unreachable blocks as live on entry, so that they go | ||||
1598 | // somewhere. | ||||
1599 | for (auto &BB : F) | ||||
1600 | if (!Visited.count(&BB)) | ||||
1601 | markUnreachableAsLiveOnEntry(&BB); | ||||
1602 | } | ||||
1603 | |||||
1604 | MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } | ||||
1605 | |||||
1606 | MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() { | ||||
1607 | if (Walker) | ||||
1608 | return Walker.get(); | ||||
1609 | |||||
1610 | if (!WalkerBase) | ||||
1611 | WalkerBase = | ||||
1612 | std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); | ||||
1613 | |||||
1614 | Walker = | ||||
1615 | std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get()); | ||||
1616 | return Walker.get(); | ||||
1617 | } | ||||
1618 | |||||
1619 | MemorySSAWalker *MemorySSA::getSkipSelfWalker() { | ||||
1620 | if (SkipWalker) | ||||
1621 | return SkipWalker.get(); | ||||
1622 | |||||
1623 | if (!WalkerBase) | ||||
1624 | WalkerBase = | ||||
1625 | std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); | ||||
1626 | |||||
1627 | SkipWalker = | ||||
1628 | std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get()); | ||||
1629 | return SkipWalker.get(); | ||||
1630 | } | ||||
1631 | |||||
1632 | |||||
1633 | // This is a helper function used by the creation routines. It places NewAccess | ||||
1634 | // into the access and defs lists for a given basic block, at the given | ||||
1635 | // insertion point. | ||||
1636 | void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, | ||||
1637 | const BasicBlock *BB, | ||||
1638 | InsertionPlace Point) { | ||||
1639 | auto *Accesses = getOrCreateAccessList(BB); | ||||
1640 | if (Point == Beginning) { | ||||
1641 | // If it's a phi node, it goes first, otherwise, it goes after any phi | ||||
1642 | // nodes. | ||||
1643 | if (isa<MemoryPhi>(NewAccess)) { | ||||
1644 | Accesses->push_front(NewAccess); | ||||
1645 | auto *Defs = getOrCreateDefsList(BB); | ||||
1646 | Defs->push_front(*NewAccess); | ||||
1647 | } else { | ||||
1648 | auto AI = find_if_not( | ||||
1649 | *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); | ||||
1650 | Accesses->insert(AI, NewAccess); | ||||
1651 | if (!isa<MemoryUse>(NewAccess)) { | ||||
1652 | auto *Defs = getOrCreateDefsList(BB); | ||||
1653 | auto DI = find_if_not( | ||||
1654 | *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); | ||||
1655 | Defs->insert(DI, *NewAccess); | ||||
1656 | } | ||||
1657 | } | ||||
1658 | } else { | ||||
1659 | Accesses->push_back(NewAccess); | ||||
1660 | if (!isa<MemoryUse>(NewAccess)) { | ||||
1661 | auto *Defs = getOrCreateDefsList(BB); | ||||
1662 | Defs->push_back(*NewAccess); | ||||
1663 | } | ||||
1664 | } | ||||
1665 | BlockNumberingValid.erase(BB); | ||||
1666 | } | ||||
1667 | |||||
1668 | void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, | ||||
1669 | AccessList::iterator InsertPt) { | ||||
1670 | auto *Accesses = getWritableBlockAccesses(BB); | ||||
1671 | bool WasEnd = InsertPt == Accesses->end(); | ||||
1672 | Accesses->insert(AccessList::iterator(InsertPt), What); | ||||
1673 | if (!isa<MemoryUse>(What)) { | ||||
1674 | auto *Defs = getOrCreateDefsList(BB); | ||||
1675 | // If we got asked to insert at the end, we have an easy job, just shove it | ||||
1676 | // at the end. If we got asked to insert before an existing def, we also get | ||||
1677 | // an iterator. If we got asked to insert before a use, we have to hunt for | ||||
1678 | // the next def. | ||||
1679 | if (WasEnd) { | ||||
1680 | Defs->push_back(*What); | ||||
1681 | } else if (isa<MemoryDef>(InsertPt)) { | ||||
1682 | Defs->insert(InsertPt->getDefsIterator(), *What); | ||||
1683 | } else { | ||||
1684 | while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) | ||||
1685 | ++InsertPt; | ||||
1686 | // Either we found a def, or we are inserting at the end | ||||
1687 | if (InsertPt == Accesses->end()) | ||||
1688 | Defs->push_back(*What); | ||||
1689 | else | ||||
1690 | Defs->insert(InsertPt->getDefsIterator(), *What); | ||||
1691 | } | ||||
1692 | } | ||||
1693 | BlockNumberingValid.erase(BB); | ||||
1694 | } | ||||
1695 | |||||
1696 | void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) { | ||||
1697 | // Keep it in the lookup tables, remove from the lists | ||||
1698 | removeFromLists(What, false); | ||||
1699 | |||||
1700 | // Note that moving should implicitly invalidate the optimized state of a | ||||
1701 | // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a | ||||
1702 | // MemoryDef. | ||||
1703 | if (auto *MD = dyn_cast<MemoryDef>(What)) | ||||
1704 | MD->resetOptimized(); | ||||
1705 | What->setBlock(BB); | ||||
1706 | } | ||||
1707 | |||||
1708 | // Move What before Where in the IR. The end result is that What will belong to | ||||
1709 | // the right lists and have the right Block set, but will not otherwise be | ||||
1710 | // correct. It will not have the right defining access, and if it is a def, | ||||
1711 | // things below it will not properly be updated. | ||||
1712 | void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, | ||||
1713 | AccessList::iterator Where) { | ||||
1714 | prepareForMoveTo(What, BB); | ||||
1715 | insertIntoListsBefore(What, BB, Where); | ||||
1716 | } | ||||
1717 | |||||
1718 | void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB, | ||||
1719 | InsertionPlace Point) { | ||||
1720 | if (isa<MemoryPhi>(What)) { | ||||
1721 | assert(Point == Beginning &&(static_cast <bool> (Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? void (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1722, __extension__ __PRETTY_FUNCTION__ )) | ||||
1722 | "Can only move a Phi at the beginning of the block")(static_cast <bool> (Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? void (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1722, __extension__ __PRETTY_FUNCTION__ )); | ||||
1723 | // Update lookup table entry | ||||
1724 | ValueToMemoryAccess.erase(What->getBlock()); | ||||
1725 | bool Inserted = ValueToMemoryAccess.insert({BB, What}).second; | ||||
1726 | (void)Inserted; | ||||
1727 | assert(Inserted && "Cannot move a Phi to a block that already has one")(static_cast <bool> (Inserted && "Cannot move a Phi to a block that already has one" ) ? void (0) : __assert_fail ("Inserted && \"Cannot move a Phi to a block that already has one\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1727, __extension__ __PRETTY_FUNCTION__ )); | ||||
1728 | } | ||||
1729 | |||||
1730 | prepareForMoveTo(What, BB); | ||||
1731 | insertIntoListsForBlock(What, BB, Point); | ||||
1732 | } | ||||
1733 | |||||
1734 | MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { | ||||
1735 | assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB")(static_cast <bool> (!getMemoryAccess(BB) && "MemoryPhi already exists for this BB" ) ? void (0) : __assert_fail ("!getMemoryAccess(BB) && \"MemoryPhi already exists for this BB\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1735, __extension__ __PRETTY_FUNCTION__ )); | ||||
1736 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); | ||||
1737 | // Phi's always are placed at the front of the block. | ||||
1738 | insertIntoListsForBlock(Phi, BB, Beginning); | ||||
1739 | ValueToMemoryAccess[BB] = Phi; | ||||
1740 | return Phi; | ||||
1741 | } | ||||
1742 | |||||
1743 | MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, | ||||
1744 | MemoryAccess *Definition, | ||||
1745 | const MemoryUseOrDef *Template, | ||||
1746 | bool CreationMustSucceed) { | ||||
1747 | assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI")(static_cast <bool> (!isa<PHINode>(I) && "Cannot create a defined access for a PHI" ) ? void (0) : __assert_fail ("!isa<PHINode>(I) && \"Cannot create a defined access for a PHI\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1747, __extension__ __PRETTY_FUNCTION__ )); | ||||
1748 | MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template); | ||||
1749 | if (CreationMustSucceed) | ||||
1750 | assert(NewAccess != nullptr && "Tried to create a memory access for a "(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a " "non-memory touching instruction") ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a \" \"non-memory touching instruction\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1751, __extension__ __PRETTY_FUNCTION__ )) | ||||
1751 | "non-memory touching instruction")(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a " "non-memory touching instruction") ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a \" \"non-memory touching instruction\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1751, __extension__ __PRETTY_FUNCTION__ )); | ||||
1752 | if (NewAccess) { | ||||
1753 | assert((!Definition || !isa<MemoryUse>(Definition)) &&(static_cast <bool> ((!Definition || !isa<MemoryUse> (Definition)) && "A use cannot be a defining access") ? void (0) : __assert_fail ("(!Definition || !isa<MemoryUse>(Definition)) && \"A use cannot be a defining access\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1754, __extension__ __PRETTY_FUNCTION__ )) | ||||
1754 | "A use cannot be a defining access")(static_cast <bool> ((!Definition || !isa<MemoryUse> (Definition)) && "A use cannot be a defining access") ? void (0) : __assert_fail ("(!Definition || !isa<MemoryUse>(Definition)) && \"A use cannot be a defining access\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1754, __extension__ __PRETTY_FUNCTION__ )); | ||||
1755 | NewAccess->setDefiningAccess(Definition); | ||||
1756 | } | ||||
1757 | return NewAccess; | ||||
1758 | } | ||||
1759 | |||||
1760 | // Return true if the instruction has ordering constraints. | ||||
1761 | // Note specifically that this only considers stores and loads | ||||
1762 | // because others are still considered ModRef by getModRefInfo. | ||||
1763 | static inline bool isOrdered(const Instruction *I) { | ||||
1764 | if (auto *SI = dyn_cast<StoreInst>(I)) { | ||||
1765 | if (!SI->isUnordered()) | ||||
1766 | return true; | ||||
1767 | } else if (auto *LI = dyn_cast<LoadInst>(I)) { | ||||
1768 | if (!LI->isUnordered()) | ||||
1769 | return true; | ||||
1770 | } | ||||
1771 | return false; | ||||
1772 | } | ||||
1773 | |||||
1774 | /// Helper function to create new memory accesses | ||||
1775 | template <typename AliasAnalysisType> | ||||
1776 | MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I, | ||||
1777 | AliasAnalysisType *AAP, | ||||
1778 | const MemoryUseOrDef *Template) { | ||||
1779 | // The assume intrinsic has a control dependency which we model by claiming | ||||
1780 | // that it writes arbitrarily. Debuginfo intrinsics may be considered | ||||
1781 | // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory | ||||
1782 | // dependencies here. | ||||
1783 | // FIXME: Replace this special casing with a more accurate modelling of | ||||
1784 | // assume's control dependency. | ||||
1785 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | ||||
1786 | switch (II->getIntrinsicID()) { | ||||
1787 | default: | ||||
1788 | break; | ||||
1789 | case Intrinsic::assume: | ||||
1790 | case Intrinsic::experimental_noalias_scope_decl: | ||||
1791 | case Intrinsic::pseudoprobe: | ||||
1792 | return nullptr; | ||||
1793 | } | ||||
1794 | } | ||||
1795 | |||||
1796 | // Using a nonstandard AA pipelines might leave us with unexpected modref | ||||
1797 | // results for I, so add a check to not model instructions that may not read | ||||
1798 | // from or write to memory. This is necessary for correctness. | ||||
1799 | if (!I->mayReadFromMemory() && !I->mayWriteToMemory()) | ||||
1800 | return nullptr; | ||||
1801 | |||||
1802 | bool Def, Use; | ||||
1803 | if (Template) { | ||||
1804 | Def = isa<MemoryDef>(Template); | ||||
1805 | Use = isa<MemoryUse>(Template); | ||||
1806 | #if !defined(NDEBUG) | ||||
1807 | ModRefInfo ModRef = AAP->getModRefInfo(I, None); | ||||
1808 | bool DefCheck, UseCheck; | ||||
1809 | DefCheck = isModSet(ModRef) || isOrdered(I); | ||||
1810 | UseCheck = isRefSet(ModRef); | ||||
1811 | assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template")(static_cast <bool> (Def == DefCheck && (Def || Use == UseCheck) && "Invalid template") ? void (0) : __assert_fail ("Def == DefCheck && (Def || Use == UseCheck) && \"Invalid template\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1811, __extension__ __PRETTY_FUNCTION__ )); | ||||
1812 | #endif | ||||
1813 | } else { | ||||
1814 | // Find out what affect this instruction has on memory. | ||||
1815 | ModRefInfo ModRef = AAP->getModRefInfo(I, None); | ||||
1816 | // The isOrdered check is used to ensure that volatiles end up as defs | ||||
1817 | // (atomics end up as ModRef right now anyway). Until we separate the | ||||
1818 | // ordering chain from the memory chain, this enables people to see at least | ||||
1819 | // some relative ordering to volatiles. Note that getClobberingMemoryAccess | ||||
1820 | // will still give an answer that bypasses other volatile loads. TODO: | ||||
1821 | // Separate memory aliasing and ordering into two different chains so that | ||||
1822 | // we can precisely represent both "what memory will this read/write/is | ||||
1823 | // clobbered by" and "what instructions can I move this past". | ||||
1824 | Def = isModSet(ModRef) || isOrdered(I); | ||||
1825 | Use = isRefSet(ModRef); | ||||
1826 | } | ||||
1827 | |||||
1828 | // It's possible for an instruction to not modify memory at all. During | ||||
1829 | // construction, we ignore them. | ||||
1830 | if (!Def && !Use) | ||||
1831 | return nullptr; | ||||
1832 | |||||
1833 | MemoryUseOrDef *MUD; | ||||
1834 | if (Def) | ||||
1835 | MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); | ||||
1836 | else | ||||
1837 | MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); | ||||
1838 | ValueToMemoryAccess[I] = MUD; | ||||
1839 | return MUD; | ||||
1840 | } | ||||
1841 | |||||
1842 | /// Properly remove \p MA from all of MemorySSA's lookup tables. | ||||
1843 | void MemorySSA::removeFromLookups(MemoryAccess *MA) { | ||||
1844 | assert(MA->use_empty() &&(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses" ) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1845, __extension__ __PRETTY_FUNCTION__ )) | ||||
1845 | "Trying to remove memory access that still has uses")(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses" ) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1845, __extension__ __PRETTY_FUNCTION__ )); | ||||
1846 | BlockNumbering.erase(MA); | ||||
1847 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1848 | MUD->setDefiningAccess(nullptr); | ||||
1849 | // Invalidate our walker's cache if necessary | ||||
1850 | if (!isa<MemoryUse>(MA)) | ||||
1851 | getWalker()->invalidateInfo(MA); | ||||
1852 | |||||
1853 | Value *MemoryInst; | ||||
1854 | if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1855 | MemoryInst = MUD->getMemoryInst(); | ||||
1856 | else | ||||
1857 | MemoryInst = MA->getBlock(); | ||||
1858 | |||||
1859 | auto VMA = ValueToMemoryAccess.find(MemoryInst); | ||||
1860 | if (VMA->second == MA) | ||||
1861 | ValueToMemoryAccess.erase(VMA); | ||||
1862 | } | ||||
1863 | |||||
1864 | /// Properly remove \p MA from all of MemorySSA's lists. | ||||
1865 | /// | ||||
1866 | /// Because of the way the intrusive list and use lists work, it is important to | ||||
1867 | /// do removal in the right order. | ||||
1868 | /// ShouldDelete defaults to true, and will cause the memory access to also be | ||||
1869 | /// deleted, not just removed. | ||||
1870 | void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { | ||||
1871 | BasicBlock *BB = MA->getBlock(); | ||||
1872 | // The access list owns the reference, so we erase it from the non-owning list | ||||
1873 | // first. | ||||
1874 | if (!isa<MemoryUse>(MA)) { | ||||
1875 | auto DefsIt = PerBlockDefs.find(BB); | ||||
1876 | std::unique_ptr<DefsList> &Defs = DefsIt->second; | ||||
1877 | Defs->remove(*MA); | ||||
1878 | if (Defs->empty()) | ||||
1879 | PerBlockDefs.erase(DefsIt); | ||||
1880 | } | ||||
1881 | |||||
1882 | // The erase call here will delete it. If we don't want it deleted, we call | ||||
1883 | // remove instead. | ||||
1884 | auto AccessIt = PerBlockAccesses.find(BB); | ||||
1885 | std::unique_ptr<AccessList> &Accesses = AccessIt->second; | ||||
1886 | if (ShouldDelete) | ||||
1887 | Accesses->erase(MA); | ||||
1888 | else | ||||
1889 | Accesses->remove(MA); | ||||
1890 | |||||
1891 | if (Accesses->empty()) { | ||||
1892 | PerBlockAccesses.erase(AccessIt); | ||||
1893 | BlockNumberingValid.erase(BB); | ||||
1894 | } | ||||
1895 | } | ||||
1896 | |||||
1897 | void MemorySSA::print(raw_ostream &OS) const { | ||||
1898 | MemorySSAAnnotatedWriter Writer(this); | ||||
1899 | F.print(OS, &Writer); | ||||
1900 | } | ||||
1901 | |||||
1902 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||
1903 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MemorySSA::dump() const { print(dbgs()); } | ||||
1904 | #endif | ||||
1905 | |||||
1906 | void MemorySSA::verifyMemorySSA(VerificationLevel VL) const { | ||||
1907 | #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) | ||||
1908 | VL = VerificationLevel::Full; | ||||
1909 | #endif | ||||
1910 | |||||
1911 | #ifndef NDEBUG | ||||
1912 | verifyOrderingDominationAndDefUses(F, VL); | ||||
1913 | verifyDominationNumbers(F); | ||||
1914 | if (VL == VerificationLevel::Full) | ||||
1915 | verifyPrevDefInPhis(F); | ||||
1916 | #endif | ||||
1917 | // Previously, the verification used to also verify that the clobberingAccess | ||||
1918 | // cached by MemorySSA is the same as the clobberingAccess found at a later | ||||
1919 | // query to AA. This does not hold true in general due to the current fragility | ||||
1920 | // of BasicAA which has arbitrary caps on the things it analyzes before giving | ||||
1921 | // up. As a result, transformations that are correct, will lead to BasicAA | ||||
1922 | // returning different Alias answers before and after that transformation. | ||||
1923 | // Invalidating MemorySSA is not an option, as the results in BasicAA can be so | ||||
1924 | // random, in the worst case we'd need to rebuild MemorySSA from scratch after | ||||
1925 | // every transformation, which defeats the purpose of using it. For such an | ||||
1926 | // example, see test4 added in D51960. | ||||
1927 | } | ||||
1928 | |||||
1929 | void MemorySSA::verifyPrevDefInPhis(Function &F) const { | ||||
1930 | for (const BasicBlock &BB : F) { | ||||
1931 | if (MemoryPhi *Phi = getMemoryAccess(&BB)) { | ||||
1932 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { | ||||
1933 | auto *Pred = Phi->getIncomingBlock(I); | ||||
1934 | auto *IncAcc = Phi->getIncomingValue(I); | ||||
1935 | // If Pred has no unreachable predecessors, get last def looking at | ||||
1936 | // IDoms. If, while walkings IDoms, any of these has an unreachable | ||||
1937 | // predecessor, then the incoming def can be any access. | ||||
1938 | if (auto *DTNode = DT->getNode(Pred)) { | ||||
1939 | while (DTNode) { | ||||
1940 | if (auto *DefList = getBlockDefs(DTNode->getBlock())) { | ||||
1941 | auto *LastAcc = &*(--DefList->end()); | ||||
1942 | assert(LastAcc == IncAcc &&(static_cast <bool> (LastAcc == IncAcc && "Incorrect incoming access into phi." ) ? void (0) : __assert_fail ("LastAcc == IncAcc && \"Incorrect incoming access into phi.\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1943, __extension__ __PRETTY_FUNCTION__ )) | ||||
1943 | "Incorrect incoming access into phi.")(static_cast <bool> (LastAcc == IncAcc && "Incorrect incoming access into phi." ) ? void (0) : __assert_fail ("LastAcc == IncAcc && \"Incorrect incoming access into phi.\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1943, __extension__ __PRETTY_FUNCTION__ )); | ||||
1944 | (void)IncAcc; | ||||
1945 | (void)LastAcc; | ||||
1946 | break; | ||||
1947 | } | ||||
1948 | DTNode = DTNode->getIDom(); | ||||
1949 | } | ||||
1950 | } else { | ||||
1951 | // If Pred has unreachable predecessors, but has at least a Def, the | ||||
1952 | // incoming access can be the last Def in Pred, or it could have been | ||||
1953 | // optimized to LoE. After an update, though, the LoE may have been | ||||
1954 | // replaced by another access, so IncAcc may be any access. | ||||
1955 | // If Pred has unreachable predecessors and no Defs, incoming access | ||||
1956 | // should be LoE; However, after an update, it may be any access. | ||||
1957 | } | ||||
1958 | } | ||||
1959 | } | ||||
1960 | } | ||||
1961 | } | ||||
1962 | |||||
1963 | /// Verify that all of the blocks we believe to have valid domination numbers | ||||
1964 | /// actually have valid domination numbers. | ||||
1965 | void MemorySSA::verifyDominationNumbers(const Function &F) const { | ||||
1966 | if (BlockNumberingValid.empty()) | ||||
1967 | return; | ||||
1968 | |||||
1969 | SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid; | ||||
1970 | for (const BasicBlock &BB : F) { | ||||
1971 | if (!ValidBlocks.count(&BB)) | ||||
1972 | continue; | ||||
1973 | |||||
1974 | ValidBlocks.erase(&BB); | ||||
1975 | |||||
1976 | const AccessList *Accesses = getBlockAccesses(&BB); | ||||
1977 | // It's correct to say an empty block has valid numbering. | ||||
1978 | if (!Accesses) | ||||
1979 | continue; | ||||
1980 | |||||
1981 | // Block numbering starts at 1. | ||||
1982 | unsigned long LastNumber = 0; | ||||
1983 | for (const MemoryAccess &MA : *Accesses) { | ||||
1984 | auto ThisNumberIter = BlockNumbering.find(&MA); | ||||
1985 | assert(ThisNumberIter != BlockNumbering.end() &&(static_cast <bool> (ThisNumberIter != BlockNumbering.end () && "MemoryAccess has no domination number in a valid block!" ) ? void (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1986, __extension__ __PRETTY_FUNCTION__ )) | ||||
1986 | "MemoryAccess has no domination number in a valid block!")(static_cast <bool> (ThisNumberIter != BlockNumbering.end () && "MemoryAccess has no domination number in a valid block!" ) ? void (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1986, __extension__ __PRETTY_FUNCTION__ )); | ||||
1987 | |||||
1988 | unsigned long ThisNumber = ThisNumberIter->second; | ||||
1989 | assert(ThisNumber > LastNumber &&(static_cast <bool> (ThisNumber > LastNumber && "Domination numbers should be strictly increasing!") ? void ( 0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1990, __extension__ __PRETTY_FUNCTION__ )) | ||||
1990 | "Domination numbers should be strictly increasing!")(static_cast <bool> (ThisNumber > LastNumber && "Domination numbers should be strictly increasing!") ? void ( 0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1990, __extension__ __PRETTY_FUNCTION__ )); | ||||
1991 | (void)LastNumber; | ||||
1992 | LastNumber = ThisNumber; | ||||
1993 | } | ||||
1994 | } | ||||
1995 | |||||
1996 | assert(ValidBlocks.empty() &&(static_cast <bool> (ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? void (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1997, __extension__ __PRETTY_FUNCTION__ )) | ||||
1997 | "All valid BasicBlocks should exist in F -- dangling pointers?")(static_cast <bool> (ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? void (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1997, __extension__ __PRETTY_FUNCTION__ )); | ||||
1998 | } | ||||
1999 | |||||
2000 | /// Verify ordering: the order and existence of MemoryAccesses matches the | ||||
2001 | /// order and existence of memory affecting instructions. | ||||
2002 | /// Verify domination: each definition dominates all of its uses. | ||||
2003 | /// Verify def-uses: the immediate use information - walk all the memory | ||||
2004 | /// accesses and verifying that, for each use, it appears in the appropriate | ||||
2005 | /// def's use list | ||||
2006 | void MemorySSA::verifyOrderingDominationAndDefUses(Function &F, | ||||
2007 | VerificationLevel VL) const { | ||||
2008 | // Walk all the blocks, comparing what the lookups think and what the access | ||||
2009 | // lists think, as well as the order in the blocks vs the order in the access | ||||
2010 | // lists. | ||||
2011 | SmallVector<MemoryAccess *, 32> ActualAccesses; | ||||
2012 | SmallVector<MemoryAccess *, 32> ActualDefs; | ||||
2013 | for (BasicBlock &B : F) { | ||||
2014 | const AccessList *AL = getBlockAccesses(&B); | ||||
2015 | const auto *DL = getBlockDefs(&B); | ||||
2016 | MemoryPhi *Phi = getMemoryAccess(&B); | ||||
2017 | if (Phi
| ||||
2018 | // Verify ordering. | ||||
2019 | ActualAccesses.push_back(Phi); | ||||
2020 | ActualDefs.push_back(Phi); | ||||
2021 | // Verify domination | ||||
2022 | for (const Use &U : Phi->uses()) { | ||||
2023 | assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses")(static_cast <bool> (dominates(Phi, U) && "Memory PHI does not dominate it's uses" ) ? void (0) : __assert_fail ("dominates(Phi, U) && \"Memory PHI does not dominate it's uses\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2023, __extension__ __PRETTY_FUNCTION__ )); | ||||
2024 | (void)U; | ||||
2025 | } | ||||
2026 | // Verify def-uses for full verify. | ||||
2027 | if (VL == VerificationLevel::Full) { | ||||
2028 | assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance((static_cast <bool> (Phi->getNumOperands() == static_cast <unsigned>(std::distance( pred_begin(&B), pred_end( &B))) && "Incomplete MemoryPhi Node") ? void (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2030, __extension__ __PRETTY_FUNCTION__ )) | ||||
2029 | pred_begin(&B), pred_end(&B))) &&(static_cast <bool> (Phi->getNumOperands() == static_cast <unsigned>(std::distance( pred_begin(&B), pred_end( &B))) && "Incomplete MemoryPhi Node") ? void (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2030, __extension__ __PRETTY_FUNCTION__ )) | ||||
2030 | "Incomplete MemoryPhi Node")(static_cast <bool> (Phi->getNumOperands() == static_cast <unsigned>(std::distance( pred_begin(&B), pred_end( &B))) && "Incomplete MemoryPhi Node") ? void (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2030, __extension__ __PRETTY_FUNCTION__ )); | ||||
2031 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { | ||||
2032 | verifyUseInDefs(Phi->getIncomingValue(I), Phi); | ||||
2033 | assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&(static_cast <bool> (is_contained(predecessors(&B), Phi->getIncomingBlock(I)) && "Incoming phi block not a block predecessor" ) ? void (0) : __assert_fail ("is_contained(predecessors(&B), Phi->getIncomingBlock(I)) && \"Incoming phi block not a block predecessor\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2034, __extension__ __PRETTY_FUNCTION__ )) | ||||
2034 | "Incoming phi block not a block predecessor")(static_cast <bool> (is_contained(predecessors(&B), Phi->getIncomingBlock(I)) && "Incoming phi block not a block predecessor" ) ? void (0) : __assert_fail ("is_contained(predecessors(&B), Phi->getIncomingBlock(I)) && \"Incoming phi block not a block predecessor\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2034, __extension__ __PRETTY_FUNCTION__ )); | ||||
2035 | } | ||||
2036 | } | ||||
2037 | } | ||||
2038 | |||||
2039 | for (Instruction &I : B) { | ||||
2040 | MemoryUseOrDef *MA = getMemoryAccess(&I); | ||||
2041 | assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2044, __extension__ __PRETTY_FUNCTION__ )) | ||||
2042 | "We have memory affecting instructions "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2044, __extension__ __PRETTY_FUNCTION__ )) | ||||
2043 | "in this block but they are not in the "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2044, __extension__ __PRETTY_FUNCTION__ )) | ||||
2044 | "access list or defs list")(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2044, __extension__ __PRETTY_FUNCTION__ )); | ||||
2045 | if (MA) { | ||||
2046 | // Verify ordering. | ||||
2047 | ActualAccesses.push_back(MA); | ||||
2048 | if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) { | ||||
2049 | // Verify ordering. | ||||
2050 | ActualDefs.push_back(MA); | ||||
2051 | // Verify domination. | ||||
2052 | for (const Use &U : MD->uses()) { | ||||
2053 | assert(dominates(MD, U) &&(static_cast <bool> (dominates(MD, U) && "Memory Def does not dominate it's uses" ) ? void (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2054, __extension__ __PRETTY_FUNCTION__ )) | ||||
2054 | "Memory Def does not dominate it's uses")(static_cast <bool> (dominates(MD, U) && "Memory Def does not dominate it's uses" ) ? void (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2054, __extension__ __PRETTY_FUNCTION__ )); | ||||
2055 | (void)U; | ||||
2056 | } | ||||
2057 | } | ||||
2058 | // Verify def-uses for full verify. | ||||
2059 | if (VL == VerificationLevel::Full) | ||||
2060 | verifyUseInDefs(MA->getDefiningAccess(), MA); | ||||
2061 | } | ||||
2062 | } | ||||
2063 | // Either we hit the assert, really have no accesses, or we have both | ||||
2064 | // accesses and an access list. Same with defs. | ||||
2065 | if (!AL && !DL) | ||||
2066 | continue; | ||||
2067 | // Verify ordering. | ||||
2068 | assert(AL->size() == ActualAccesses.size() &&(static_cast <bool> (AL->size() == ActualAccesses.size () && "We don't have the same number of accesses in the block as on the " "access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2070, __extension__ __PRETTY_FUNCTION__ )) | ||||
| |||||
2069 | "We don't have the same number of accesses in the block as on the "(static_cast <bool> (AL->size() == ActualAccesses.size () && "We don't have the same number of accesses in the block as on the " "access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2070, __extension__ __PRETTY_FUNCTION__ )) | ||||
2070 | "access list")(static_cast <bool> (AL->size() == ActualAccesses.size () && "We don't have the same number of accesses in the block as on the " "access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2070, __extension__ __PRETTY_FUNCTION__ )); | ||||
2071 | assert((DL || ActualDefs.size() == 0) &&(static_cast <bool> ((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2072, __extension__ __PRETTY_FUNCTION__ )) | ||||
2072 | "Either we should have a defs list, or we should have no defs")(static_cast <bool> ((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2072, __extension__ __PRETTY_FUNCTION__ )); | ||||
2073 | assert((!DL || DL->size() == ActualDefs.size()) &&(static_cast <bool> ((!DL || DL->size() == ActualDefs .size()) && "We don't have the same number of defs in the block as on the " "def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2075, __extension__ __PRETTY_FUNCTION__ )) | ||||
2074 | "We don't have the same number of defs in the block as on the "(static_cast <bool> ((!DL || DL->size() == ActualDefs .size()) && "We don't have the same number of defs in the block as on the " "def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2075, __extension__ __PRETTY_FUNCTION__ )) | ||||
2075 | "def list")(static_cast <bool> ((!DL || DL->size() == ActualDefs .size()) && "We don't have the same number of defs in the block as on the " "def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2075, __extension__ __PRETTY_FUNCTION__ )); | ||||
2076 | auto ALI = AL->begin(); | ||||
2077 | auto AAI = ActualAccesses.begin(); | ||||
2078 | while (ALI != AL->end() && AAI != ActualAccesses.end()) { | ||||
2079 | assert(&*ALI == *AAI && "Not the same accesses in the same order")(static_cast <bool> (&*ALI == *AAI && "Not the same accesses in the same order" ) ? void (0) : __assert_fail ("&*ALI == *AAI && \"Not the same accesses in the same order\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2079, __extension__ __PRETTY_FUNCTION__ )); | ||||
2080 | ++ALI; | ||||
2081 | ++AAI; | ||||
2082 | } | ||||
2083 | ActualAccesses.clear(); | ||||
2084 | if (DL) { | ||||
2085 | auto DLI = DL->begin(); | ||||
2086 | auto ADI = ActualDefs.begin(); | ||||
2087 | while (DLI != DL->end() && ADI != ActualDefs.end()) { | ||||
2088 | assert(&*DLI == *ADI && "Not the same defs in the same order")(static_cast <bool> (&*DLI == *ADI && "Not the same defs in the same order" ) ? void (0) : __assert_fail ("&*DLI == *ADI && \"Not the same defs in the same order\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2088, __extension__ __PRETTY_FUNCTION__ )); | ||||
2089 | ++DLI; | ||||
2090 | ++ADI; | ||||
2091 | } | ||||
2092 | } | ||||
2093 | ActualDefs.clear(); | ||||
2094 | } | ||||
2095 | } | ||||
2096 | |||||
2097 | /// Verify the def-use lists in MemorySSA, by verifying that \p Use | ||||
2098 | /// appears in the use list of \p Def. | ||||
2099 | void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { | ||||
2100 | // The live on entry use may cause us to get a NULL def here | ||||
2101 | if (!Def) | ||||
2102 | assert(isLiveOnEntryDef(Use) &&(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2103, __extension__ __PRETTY_FUNCTION__ )) | ||||
2103 | "Null def but use not point to live on entry def")(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2103, __extension__ __PRETTY_FUNCTION__ )); | ||||
2104 | else | ||||
2105 | assert(is_contained(Def->users(), Use) &&(static_cast <bool> (is_contained(Def->users(), Use) && "Did not find use in def's use list") ? void (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2106, __extension__ __PRETTY_FUNCTION__ )) | ||||
2106 | "Did not find use in def's use list")(static_cast <bool> (is_contained(Def->users(), Use) && "Did not find use in def's use list") ? void (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2106, __extension__ __PRETTY_FUNCTION__ )); | ||||
2107 | } | ||||
2108 | |||||
2109 | /// Perform a local numbering on blocks so that instruction ordering can be | ||||
2110 | /// determined in constant time. | ||||
2111 | /// TODO: We currently just number in order. If we numbered by N, we could | ||||
2112 | /// allow at least N-1 sequences of insertBefore or insertAfter (and at least | ||||
2113 | /// log2(N) sequences of mixed before and after) without needing to invalidate | ||||
2114 | /// the numbering. | ||||
2115 | void MemorySSA::renumberBlock(const BasicBlock *B) const { | ||||
2116 | // The pre-increment ensures the numbers really start at 1. | ||||
2117 | unsigned long CurrentNumber = 0; | ||||
2118 | const AccessList *AL = getBlockAccesses(B); | ||||
2119 | assert(AL != nullptr && "Asking to renumber an empty block")(static_cast <bool> (AL != nullptr && "Asking to renumber an empty block" ) ? void (0) : __assert_fail ("AL != nullptr && \"Asking to renumber an empty block\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2119, __extension__ __PRETTY_FUNCTION__ )); | ||||
2120 | for (const auto &I : *AL) | ||||
2121 | BlockNumbering[&I] = ++CurrentNumber; | ||||
2122 | BlockNumberingValid.insert(B); | ||||
2123 | } | ||||
2124 | |||||
2125 | /// Determine, for two memory accesses in the same block, | ||||
2126 | /// whether \p Dominator dominates \p Dominatee. | ||||
2127 | /// \returns True if \p Dominator dominates \p Dominatee. | ||||
2128 | bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, | ||||
2129 | const MemoryAccess *Dominatee) const { | ||||
2130 | const BasicBlock *DominatorBlock = Dominator->getBlock(); | ||||
2131 | |||||
2132 | assert((DominatorBlock == Dominatee->getBlock()) &&(static_cast <bool> ((DominatorBlock == Dominatee->getBlock ()) && "Asking for local domination when accesses are in different blocks!" ) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2133, __extension__ __PRETTY_FUNCTION__ )) | ||||
2133 | "Asking for local domination when accesses are in different blocks!")(static_cast <bool> ((DominatorBlock == Dominatee->getBlock ()) && "Asking for local domination when accesses are in different blocks!" ) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2133, __extension__ __PRETTY_FUNCTION__ )); | ||||
2134 | // A node dominates itself. | ||||
2135 | if (Dominatee == Dominator) | ||||
2136 | return true; | ||||
2137 | |||||
2138 | // When Dominatee is defined on function entry, it is not dominated by another | ||||
2139 | // memory access. | ||||
2140 | if (isLiveOnEntryDef(Dominatee)) | ||||
2141 | return false; | ||||
2142 | |||||
2143 | // When Dominator is defined on function entry, it dominates the other memory | ||||
2144 | // access. | ||||
2145 | if (isLiveOnEntryDef(Dominator)) | ||||
2146 | return true; | ||||
2147 | |||||
2148 | if (!BlockNumberingValid.count(DominatorBlock)) | ||||
2149 | renumberBlock(DominatorBlock); | ||||
2150 | |||||
2151 | unsigned long DominatorNum = BlockNumbering.lookup(Dominator); | ||||
2152 | // All numbers start with 1 | ||||
2153 | assert(DominatorNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominatorNum != 0 && "Block was not numbered properly" ) ? void (0) : __assert_fail ("DominatorNum != 0 && \"Block was not numbered properly\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2153, __extension__ __PRETTY_FUNCTION__ )); | ||||
2154 | unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); | ||||
2155 | assert(DominateeNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominateeNum != 0 && "Block was not numbered properly" ) ? void (0) : __assert_fail ("DominateeNum != 0 && \"Block was not numbered properly\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2155, __extension__ __PRETTY_FUNCTION__ )); | ||||
2156 | return DominatorNum < DominateeNum; | ||||
2157 | } | ||||
2158 | |||||
2159 | bool MemorySSA::dominates(const MemoryAccess *Dominator, | ||||
2160 | const MemoryAccess *Dominatee) const { | ||||
2161 | if (Dominator == Dominatee) | ||||
2162 | return true; | ||||
2163 | |||||
2164 | if (isLiveOnEntryDef(Dominatee)) | ||||
2165 | return false; | ||||
2166 | |||||
2167 | if (Dominator->getBlock() != Dominatee->getBlock()) | ||||
2168 | return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); | ||||
2169 | return locallyDominates(Dominator, Dominatee); | ||||
2170 | } | ||||
2171 | |||||
2172 | bool MemorySSA::dominates(const MemoryAccess *Dominator, | ||||
2173 | const Use &Dominatee) const { | ||||
2174 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { | ||||
2175 | BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); | ||||
2176 | // The def must dominate the incoming block of the phi. | ||||
2177 | if (UseBB != Dominator->getBlock()) | ||||
2178 | return DT->dominates(Dominator->getBlock(), UseBB); | ||||
2179 | // If the UseBB and the DefBB are the same, compare locally. | ||||
2180 | return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); | ||||
2181 | } | ||||
2182 | // If it's not a PHI node use, the normal dominates can already handle it. | ||||
2183 | return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); | ||||
2184 | } | ||||
2185 | |||||
2186 | void MemorySSA::ensureOptimizedUses() { | ||||
2187 | if (IsOptimized) | ||||
2188 | return; | ||||
2189 | |||||
2190 | BatchAAResults BatchAA(*AA); | ||||
2191 | ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BatchAA, DT); | ||||
2192 | CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase); | ||||
2193 | OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses(); | ||||
2194 | IsOptimized = true; | ||||
2195 | } | ||||
2196 | |||||
2197 | void MemoryAccess::print(raw_ostream &OS) const { | ||||
2198 | switch (getValueID()) { | ||||
2199 | case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); | ||||
2200 | case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); | ||||
2201 | case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); | ||||
2202 | } | ||||
2203 | llvm_unreachable("invalid value id")::llvm::llvm_unreachable_internal("invalid value id", "llvm/lib/Analysis/MemorySSA.cpp" , 2203); | ||||
2204 | } | ||||
2205 | |||||
2206 | void MemoryDef::print(raw_ostream &OS) const { | ||||
2207 | MemoryAccess *UO = getDefiningAccess(); | ||||
2208 | |||||
2209 | auto printID = [&OS](MemoryAccess *A) { | ||||
2210 | if (A && A->getID()) | ||||
2211 | OS << A->getID(); | ||||
2212 | else | ||||
2213 | OS << LiveOnEntryStr; | ||||
2214 | }; | ||||
2215 | |||||
2216 | OS << getID() << " = MemoryDef("; | ||||
2217 | printID(UO); | ||||
2218 | OS << ")"; | ||||
2219 | |||||
2220 | if (isOptimized()) { | ||||
2221 | OS << "->"; | ||||
2222 | printID(getOptimized()); | ||||
2223 | |||||
2224 | if (Optional<AliasResult> AR = getOptimizedAccessType()) | ||||
2225 | OS << " " << *AR; | ||||
2226 | } | ||||
2227 | } | ||||
2228 | |||||
2229 | void MemoryPhi::print(raw_ostream &OS) const { | ||||
2230 | ListSeparator LS(","); | ||||
2231 | OS << getID() << " = MemoryPhi("; | ||||
2232 | for (const auto &Op : operands()) { | ||||
2233 | BasicBlock *BB = getIncomingBlock(Op); | ||||
2234 | MemoryAccess *MA = cast<MemoryAccess>(Op); | ||||
2235 | |||||
2236 | OS << LS << '{'; | ||||
2237 | if (BB->hasName()) | ||||
2238 | OS << BB->getName(); | ||||
2239 | else | ||||
2240 | BB->printAsOperand(OS, false); | ||||
2241 | OS << ','; | ||||
2242 | if (unsigned ID = MA->getID()) | ||||
2243 | OS << ID; | ||||
2244 | else | ||||
2245 | OS << LiveOnEntryStr; | ||||
2246 | OS << '}'; | ||||
2247 | } | ||||
2248 | OS << ')'; | ||||
2249 | } | ||||
2250 | |||||
2251 | void MemoryUse::print(raw_ostream &OS) const { | ||||
2252 | MemoryAccess *UO = getDefiningAccess(); | ||||
2253 | OS << "MemoryUse("; | ||||
2254 | if (UO && UO->getID()) | ||||
2255 | OS << UO->getID(); | ||||
2256 | else | ||||
2257 | OS << LiveOnEntryStr; | ||||
2258 | OS << ')'; | ||||
2259 | |||||
2260 | if (Optional<AliasResult> AR = getOptimizedAccessType()) | ||||
2261 | OS << " " << *AR; | ||||
2262 | } | ||||
2263 | |||||
2264 | void MemoryAccess::dump() const { | ||||
2265 | // Cannot completely remove virtual function even in release mode. | ||||
2266 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||
2267 | print(dbgs()); | ||||
2268 | dbgs() << "\n"; | ||||
2269 | #endif | ||||
2270 | } | ||||
2271 | |||||
2272 | char MemorySSAPrinterLegacyPass::ID = 0; | ||||
2273 | |||||
2274 | MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { | ||||
2275 | initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||
2276 | } | ||||
2277 | |||||
2278 | void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { | ||||
2279 | AU.setPreservesAll(); | ||||
2280 | AU.addRequired<MemorySSAWrapperPass>(); | ||||
2281 | } | ||||
2282 | |||||
2283 | class DOTFuncMSSAInfo { | ||||
2284 | private: | ||||
2285 | const Function &F; | ||||
2286 | MemorySSAAnnotatedWriter MSSAWriter; | ||||
2287 | |||||
2288 | public: | ||||
2289 | DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA) | ||||
2290 | : F(F), MSSAWriter(&MSSA) {} | ||||
2291 | |||||
2292 | const Function *getFunction() { return &F; } | ||||
2293 | MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; } | ||||
2294 | }; | ||||
2295 | |||||
2296 | namespace llvm { | ||||
2297 | |||||
2298 | template <> | ||||
2299 | struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> { | ||||
2300 | static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) { | ||||
2301 | return &(CFGInfo->getFunction()->getEntryBlock()); | ||||
2302 | } | ||||
2303 | |||||
2304 | // nodes_iterator/begin/end - Allow iteration over all nodes in the graph | ||||
2305 | using nodes_iterator = pointer_iterator<Function::const_iterator>; | ||||
2306 | |||||
2307 | static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) { | ||||
2308 | return nodes_iterator(CFGInfo->getFunction()->begin()); | ||||
2309 | } | ||||
2310 | |||||
2311 | static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) { | ||||
2312 | return nodes_iterator(CFGInfo->getFunction()->end()); | ||||
2313 | } | ||||
2314 | |||||
2315 | static size_t size(DOTFuncMSSAInfo *CFGInfo) { | ||||
2316 | return CFGInfo->getFunction()->size(); | ||||
2317 | } | ||||
2318 | }; | ||||
2319 | |||||
2320 | template <> | ||||
2321 | struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits { | ||||
2322 | |||||
2323 | DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {} | ||||
2324 | |||||
2325 | static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) { | ||||
2326 | return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() + | ||||
2327 | "' function"; | ||||
2328 | } | ||||
2329 | |||||
2330 | std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) { | ||||
2331 | return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel( | ||||
2332 | Node, nullptr, | ||||
2333 | [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void { | ||||
2334 | BB.print(OS, &CFGInfo->getWriter(), true, true); | ||||
2335 | }, | ||||
2336 | [](std::string &S, unsigned &I, unsigned Idx) -> void { | ||||
2337 | std::string Str = S.substr(I, Idx - I); | ||||
2338 | StringRef SR = Str; | ||||
2339 | if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") || | ||||
2340 | SR.count("MemoryUse(")) | ||||
2341 | return; | ||||
2342 | DOTGraphTraits<DOTFuncInfo *>::eraseComment(S, I, Idx); | ||||
2343 | }); | ||||
2344 | } | ||||
2345 | |||||
2346 | static std::string getEdgeSourceLabel(const BasicBlock *Node, | ||||
2347 | const_succ_iterator I) { | ||||
2348 | return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I); | ||||
2349 | } | ||||
2350 | |||||
2351 | /// Display the raw branch weights from PGO. | ||||
2352 | std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, | ||||
2353 | DOTFuncMSSAInfo *CFGInfo) { | ||||
2354 | return ""; | ||||
2355 | } | ||||
2356 | |||||
2357 | std::string getNodeAttributes(const BasicBlock *Node, | ||||
2358 | DOTFuncMSSAInfo *CFGInfo) { | ||||
2359 | return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos | ||||
2360 | ? "style=filled, fillcolor=lightpink" | ||||
2361 | : ""; | ||||
2362 | } | ||||
2363 | }; | ||||
2364 | |||||
2365 | } // namespace llvm | ||||
2366 | |||||
2367 | bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { | ||||
2368 | auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); | ||||
2369 | MSSA.ensureOptimizedUses(); | ||||
2370 | if (DotCFGMSSA != "") { | ||||
2371 | DOTFuncMSSAInfo CFGInfo(F, MSSA); | ||||
2372 | WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA); | ||||
2373 | } else | ||||
2374 | MSSA.print(dbgs()); | ||||
2375 | |||||
2376 | if (VerifyMemorySSA) | ||||
2377 | MSSA.verifyMemorySSA(); | ||||
2378 | return false; | ||||
2379 | } | ||||
2380 | |||||
2381 | AnalysisKey MemorySSAAnalysis::Key; | ||||
2382 | |||||
2383 | MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, | ||||
2384 | FunctionAnalysisManager &AM) { | ||||
2385 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); | ||||
2386 | auto &AA = AM.getResult<AAManager>(F); | ||||
2387 | return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT)); | ||||
2388 | } | ||||
2389 | |||||
2390 | bool MemorySSAAnalysis::Result::invalidate( | ||||
2391 | Function &F, const PreservedAnalyses &PA, | ||||
2392 | FunctionAnalysisManager::Invalidator &Inv) { | ||||
2393 | auto PAC = PA.getChecker<MemorySSAAnalysis>(); | ||||
2394 | return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || | ||||
2395 | Inv.invalidate<AAManager>(F, PA) || | ||||
2396 | Inv.invalidate<DominatorTreeAnalysis>(F, PA); | ||||
2397 | } | ||||
2398 | |||||
2399 | PreservedAnalyses MemorySSAPrinterPass::run(Function &F, | ||||
2400 | FunctionAnalysisManager &AM) { | ||||
2401 | auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); | ||||
2402 | MSSA.ensureOptimizedUses(); | ||||
2403 | if (DotCFGMSSA != "") { | ||||
2404 | DOTFuncMSSAInfo CFGInfo(F, MSSA); | ||||
2405 | WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA); | ||||
2406 | } else { | ||||
2407 | OS << "MemorySSA for function: " << F.getName() << "\n"; | ||||
2408 | MSSA.print(OS); | ||||
2409 | } | ||||
2410 | |||||
2411 | return PreservedAnalyses::all(); | ||||
2412 | } | ||||
2413 | |||||
2414 | PreservedAnalyses MemorySSAWalkerPrinterPass::run(Function &F, | ||||
2415 | FunctionAnalysisManager &AM) { | ||||
2416 | auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); | ||||
2417 | OS << "MemorySSA (walker) for function: " << F.getName() << "\n"; | ||||
2418 | MemorySSAWalkerAnnotatedWriter Writer(&MSSA); | ||||
2419 | F.print(OS, &Writer); | ||||
2420 | |||||
2421 | return PreservedAnalyses::all(); | ||||
2422 | } | ||||
2423 | |||||
2424 | PreservedAnalyses MemorySSAVerifierPass::run(Function &F, | ||||
2425 | FunctionAnalysisManager &AM) { | ||||
2426 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); | ||||
2427 | |||||
2428 | return PreservedAnalyses::all(); | ||||
2429 | } | ||||
2430 | |||||
2431 | char MemorySSAWrapperPass::ID = 0; | ||||
2432 | |||||
2433 | MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { | ||||
2434 | initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); | ||||
2435 | } | ||||
2436 | |||||
2437 | void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } | ||||
2438 | |||||
2439 | void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { | ||||
2440 | AU.setPreservesAll(); | ||||
2441 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); | ||||
2442 | AU.addRequiredTransitive<AAResultsWrapperPass>(); | ||||
2443 | } | ||||
2444 | |||||
2445 | bool MemorySSAWrapperPass::runOnFunction(Function &F) { | ||||
2446 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | ||||
2447 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); | ||||
2448 | MSSA.reset(new MemorySSA(F, &AA, &DT)); | ||||
2449 | return false; | ||||
2450 | } | ||||
2451 | |||||
2452 | void MemorySSAWrapperPass::verifyAnalysis() const { | ||||
2453 | if (VerifyMemorySSA) | ||||
| |||||
2454 | MSSA->verifyMemorySSA(); | ||||
2455 | } | ||||
2456 | |||||
2457 | void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { | ||||
2458 | MSSA->print(OS); | ||||
2459 | } | ||||
2460 | |||||
2461 | MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} | ||||
2462 | |||||
2463 | /// Walk the use-def chains starting at \p StartingAccess and find | ||||
2464 | /// the MemoryAccess that actually clobbers Loc. | ||||
2465 | /// | ||||
2466 | /// \returns our clobbering memory access | ||||
2467 | template <typename AliasAnalysisType> | ||||
2468 | MemoryAccess * | ||||
2469 | MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( | ||||
2470 | MemoryAccess *StartingAccess, const MemoryLocation &Loc, | ||||
2471 | unsigned &UpwardWalkLimit) { | ||||
2472 | assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access")(static_cast <bool> (!isa<MemoryUse>(StartingAccess ) && "Use cannot be defining access") ? void (0) : __assert_fail ("!isa<MemoryUse>(StartingAccess) && \"Use cannot be defining access\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2472, __extension__ __PRETTY_FUNCTION__ )); | ||||
2473 | |||||
2474 | Instruction *I = nullptr; | ||||
2475 | if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) { | ||||
2476 | if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) | ||||
2477 | return StartingUseOrDef; | ||||
2478 | |||||
2479 | I = StartingUseOrDef->getMemoryInst(); | ||||
2480 | |||||
2481 | // Conservatively, fences are always clobbers, so don't perform the walk if | ||||
2482 | // we hit a fence. | ||||
2483 | if (!isa<CallBase>(I) && I->isFenceLike()) | ||||
2484 | return StartingUseOrDef; | ||||
2485 | } | ||||
2486 | |||||
2487 | UpwardsMemoryQuery Q; | ||||
2488 | Q.OriginalAccess = StartingAccess; | ||||
2489 | Q.StartingLoc = Loc; | ||||
2490 | Q.Inst = nullptr; | ||||
2491 | Q.IsCall = false; | ||||
2492 | |||||
2493 | // Unlike the other function, do not walk to the def of a def, because we are | ||||
2494 | // handed something we already believe is the clobbering access. | ||||
2495 | // We never set SkipSelf to true in Q in this method. | ||||
2496 | MemoryAccess *Clobber = | ||||
2497 | Walker.findClobber(StartingAccess, Q, UpwardWalkLimit); | ||||
2498 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ) | ||||
2499 | dbgs() << "Clobber starting at access " << *StartingAccess << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ) | ||||
2500 | if (I)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ) | ||||
2501 | dbgs() << " for instruction " << *I << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ) | ||||
2502 | dbgs() << " is " << *Clobber << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ) | ||||
2503 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ); | ||||
2504 | return Clobber; | ||||
2505 | } | ||||
2506 | |||||
2507 | static const Instruction * | ||||
2508 | getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT) { | ||||
2509 | if (!I.hasMetadata(LLVMContext::MD_invariant_group) || I.isVolatile()) | ||||
2510 | return nullptr; | ||||
2511 | |||||
2512 | // We consider bitcasts and zero GEPs to be the same pointer value. Start by | ||||
2513 | // stripping bitcasts and zero GEPs, then we will recursively look at loads | ||||
2514 | // and stores through bitcasts and zero GEPs. | ||||
2515 | Value *PointerOperand = getLoadStorePointerOperand(&I)->stripPointerCasts(); | ||||
2516 | |||||
2517 | // It's not safe to walk the use list of a global value because function | ||||
2518 | // passes aren't allowed to look outside their functions. | ||||
2519 | // FIXME: this could be fixed by filtering instructions from outside of | ||||
2520 | // current function. | ||||
2521 | if (isa<Constant>(PointerOperand)) | ||||
2522 | return nullptr; | ||||
2523 | |||||
2524 | // Queue to process all pointers that are equivalent to load operand. | ||||
2525 | SmallVector<const Value *, 8> PointerUsesQueue; | ||||
2526 | PointerUsesQueue.push_back(PointerOperand); | ||||
2527 | |||||
2528 | const Instruction *MostDominatingInstruction = &I; | ||||
2529 | |||||
2530 | // FIXME: This loop is O(n^2) because dominates can be O(n) and in worst case | ||||
2531 | // we will see all the instructions. It may not matter in practice. If it | ||||
2532 | // does, we will have to support MemorySSA construction and updates. | ||||
2533 | while (!PointerUsesQueue.empty()) { | ||||
2534 | const Value *Ptr = PointerUsesQueue.pop_back_val(); | ||||
2535 | assert(Ptr && !isa<GlobalValue>(Ptr) &&(static_cast <bool> (Ptr && !isa<GlobalValue >(Ptr) && "Null or GlobalValue should not be inserted" ) ? void (0) : __assert_fail ("Ptr && !isa<GlobalValue>(Ptr) && \"Null or GlobalValue should not be inserted\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2536, __extension__ __PRETTY_FUNCTION__ )) | ||||
2536 | "Null or GlobalValue should not be inserted")(static_cast <bool> (Ptr && !isa<GlobalValue >(Ptr) && "Null or GlobalValue should not be inserted" ) ? void (0) : __assert_fail ("Ptr && !isa<GlobalValue>(Ptr) && \"Null or GlobalValue should not be inserted\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2536, __extension__ __PRETTY_FUNCTION__ )); | ||||
2537 | |||||
2538 | for (const User *Us : Ptr->users()) { | ||||
2539 | auto *U = dyn_cast<Instruction>(Us); | ||||
2540 | if (!U || U == &I || !DT.dominates(U, MostDominatingInstruction)) | ||||
2541 | continue; | ||||
2542 | |||||
2543 | // Add bitcasts and zero GEPs to queue. | ||||
2544 | if (isa<BitCastInst>(U)) { | ||||
2545 | PointerUsesQueue.push_back(U); | ||||
2546 | continue; | ||||
2547 | } | ||||
2548 | if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) { | ||||
2549 | if (GEP->hasAllZeroIndices()) | ||||
2550 | PointerUsesQueue.push_back(U); | ||||
2551 | continue; | ||||
2552 | } | ||||
2553 | |||||
2554 | // If we hit a load/store with an invariant.group metadata and the same | ||||
2555 | // pointer operand, we can assume that value pointed to by the pointer | ||||
2556 | // operand didn't change. | ||||
2557 | if (U->hasMetadata(LLVMContext::MD_invariant_group) && | ||||
2558 | getLoadStorePointerOperand(U) == Ptr && !U->isVolatile()) { | ||||
2559 | MostDominatingInstruction = U; | ||||
2560 | } | ||||
2561 | } | ||||
2562 | } | ||||
2563 | return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction; | ||||
2564 | } | ||||
2565 | |||||
2566 | template <typename AliasAnalysisType> | ||||
2567 | MemoryAccess * | ||||
2568 | MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( | ||||
2569 | MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf, | ||||
2570 | bool UseInvariantGroup) { | ||||
2571 | auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); | ||||
2572 | // If this is a MemoryPhi, we can't do anything. | ||||
2573 | if (!StartingAccess) | ||||
2574 | return MA; | ||||
2575 | |||||
2576 | if (UseInvariantGroup) { | ||||
2577 | if (auto *I = getInvariantGroupClobberingInstruction( | ||||
2578 | *StartingAccess->getMemoryInst(), MSSA->getDomTree())) { | ||||
2579 | assert(isa<LoadInst>(I) || isa<StoreInst>(I))(static_cast <bool> (isa<LoadInst>(I) || isa<StoreInst >(I)) ? void (0) : __assert_fail ("isa<LoadInst>(I) || isa<StoreInst>(I)" , "llvm/lib/Analysis/MemorySSA.cpp", 2579, __extension__ __PRETTY_FUNCTION__ )); | ||||
2580 | |||||
2581 | auto *ClobberMA = MSSA->getMemoryAccess(I); | ||||
2582 | assert(ClobberMA)(static_cast <bool> (ClobberMA) ? void (0) : __assert_fail ("ClobberMA", "llvm/lib/Analysis/MemorySSA.cpp", 2582, __extension__ __PRETTY_FUNCTION__)); | ||||
2583 | if (isa<MemoryUse>(ClobberMA)) | ||||
2584 | return ClobberMA->getDefiningAccess(); | ||||
2585 | return ClobberMA; | ||||
2586 | } | ||||
2587 | } | ||||
2588 | |||||
2589 | bool IsOptimized = false; | ||||
2590 | |||||
2591 | // If this is an already optimized use or def, return the optimized result. | ||||
2592 | // Note: Currently, we store the optimized def result in a separate field, | ||||
2593 | // since we can't use the defining access. | ||||
2594 | if (StartingAccess->isOptimized()) { | ||||
2595 | if (!SkipSelf || !isa<MemoryDef>(StartingAccess)) | ||||
2596 | return StartingAccess->getOptimized(); | ||||
2597 | IsOptimized = true; | ||||
2598 | } | ||||
2599 | |||||
2600 | const Instruction *I = StartingAccess->getMemoryInst(); | ||||
2601 | // We can't sanely do anything with a fence, since they conservatively clobber | ||||
2602 | // all memory, and have no locations to get pointers from to try to | ||||
2603 | // disambiguate. | ||||
2604 | if (!isa<CallBase>(I) && I->isFenceLike()) | ||||
2605 | return StartingAccess; | ||||
2606 | |||||
2607 | UpwardsMemoryQuery Q(I, StartingAccess); | ||||
2608 | |||||
2609 | if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) { | ||||
2610 | MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); | ||||
2611 | StartingAccess->setOptimized(LiveOnEntry); | ||||
2612 | StartingAccess->setOptimizedAccessType(None); | ||||
2613 | return LiveOnEntry; | ||||
2614 | } | ||||
2615 | |||||
2616 | MemoryAccess *OptimizedAccess; | ||||
2617 | if (!IsOptimized) { | ||||
2618 | // Start with the thing we already think clobbers this location | ||||
2619 | MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); | ||||
2620 | |||||
2621 | // At this point, DefiningAccess may be the live on entry def. | ||||
2622 | // If it is, we will not get a better result. | ||||
2623 | if (MSSA->isLiveOnEntryDef(DefiningAccess)) { | ||||
2624 | StartingAccess->setOptimized(DefiningAccess); | ||||
2625 | StartingAccess->setOptimizedAccessType(None); | ||||
2626 | return DefiningAccess; | ||||
2627 | } | ||||
2628 | |||||
2629 | OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); | ||||
2630 | StartingAccess->setOptimized(OptimizedAccess); | ||||
2631 | if (MSSA->isLiveOnEntryDef(OptimizedAccess)) | ||||
2632 | StartingAccess->setOptimizedAccessType(None); | ||||
2633 | else if (Q.AR && *Q.AR == AliasResult::MustAlias) | ||||
2634 | StartingAccess->setOptimizedAccessType( | ||||
2635 | AliasResult(AliasResult::MustAlias)); | ||||
2636 | } else | ||||
2637 | OptimizedAccess = StartingAccess->getOptimized(); | ||||
2638 | |||||
2639 | LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (false); | ||||
2640 | LLVM_DEBUG(dbgs() << *StartingAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *StartingAccess << "\n" ; } } while (false); | ||||
2641 | LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Optimized Memory SSA clobber for " << *I << " is "; } } while (false); | ||||
2642 | LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *OptimizedAccess << "\n" ; } } while (false); | ||||
2643 | |||||
2644 | MemoryAccess *Result; | ||||
2645 | if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) && | ||||
2646 | isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) { | ||||
2647 | assert(isa<MemoryDef>(Q.OriginalAccess))(static_cast <bool> (isa<MemoryDef>(Q.OriginalAccess )) ? void (0) : __assert_fail ("isa<MemoryDef>(Q.OriginalAccess)" , "llvm/lib/Analysis/MemorySSA.cpp", 2647, __extension__ __PRETTY_FUNCTION__ )); | ||||
2648 | Q.SkipSelfAccess = true; | ||||
2649 | Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit); | ||||
2650 | } else | ||||
2651 | Result = OptimizedAccess; | ||||
2652 | |||||
2653 | LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf; } } while (false); | ||||
2654 | LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "] for " << *I << " is " << *Result << "\n"; } } while (false); | ||||
2655 | |||||
2656 | return Result; | ||||
2657 | } | ||||
2658 | |||||
2659 | MemoryAccess * | ||||
2660 | DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { | ||||
2661 | if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) | ||||
2662 | return Use->getDefiningAccess(); | ||||
2663 | return MA; | ||||
2664 | } | ||||
2665 | |||||
2666 | MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( | ||||
2667 | MemoryAccess *StartingAccess, const MemoryLocation &) { | ||||
2668 | if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) | ||||
2669 | return Use->getDefiningAccess(); | ||||
2670 | return StartingAccess; | ||||
2671 | } | ||||
2672 | |||||
2673 | void MemoryPhi::deleteMe(DerivedUser *Self) { | ||||
2674 | delete static_cast<MemoryPhi *>(Self); | ||||
2675 | } | ||||
2676 | |||||
2677 | void MemoryDef::deleteMe(DerivedUser *Self) { | ||||
2678 | delete static_cast<MemoryDef *>(Self); | ||||
2679 | } | ||||
2680 | |||||
2681 | void MemoryUse::deleteMe(DerivedUser *Self) { | ||||
2682 | delete static_cast<MemoryUse *>(Self); | ||||
2683 | } | ||||
2684 | |||||
2685 | bool upward_defs_iterator::IsGuaranteedLoopInvariant(Value *Ptr) const { | ||||
2686 | auto IsGuaranteedLoopInvariantBase = [](Value *Ptr) { | ||||
2687 | Ptr = Ptr->stripPointerCasts(); | ||||
2688 | if (!isa<Instruction>(Ptr)) | ||||
2689 | return true; | ||||
2690 | return isa<AllocaInst>(Ptr); | ||||
2691 | }; | ||||
2692 | |||||
2693 | Ptr = Ptr->stripPointerCasts(); | ||||
2694 | if (auto *I = dyn_cast<Instruction>(Ptr)) { | ||||
2695 | if (I->getParent()->isEntryBlock()) | ||||
2696 | return true; | ||||
2697 | } | ||||
2698 | if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { | ||||
2699 | return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) && | ||||
2700 | GEP->hasAllConstantIndices(); | ||||
2701 | } | ||||
2702 | return IsGuaranteedLoopInvariantBase(Ptr); | ||||
2703 | } |
1 | //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// This file exposes an interface to building/using memory SSA to |
11 | /// walk memory instructions using a use/def graph. |
12 | /// |
13 | /// Memory SSA class builds an SSA form that links together memory access |
14 | /// instructions such as loads, stores, atomics, and calls. Additionally, it |
15 | /// does a trivial form of "heap versioning" Every time the memory state changes |
16 | /// in the program, we generate a new heap version. It generates |
17 | /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. |
18 | /// |
19 | /// As a trivial example, |
20 | /// define i32 @main() #0 { |
21 | /// entry: |
22 | /// %call = call noalias i8* @_Znwm(i64 4) #2 |
23 | /// %0 = bitcast i8* %call to i32* |
24 | /// %call1 = call noalias i8* @_Znwm(i64 4) #2 |
25 | /// %1 = bitcast i8* %call1 to i32* |
26 | /// store i32 5, i32* %0, align 4 |
27 | /// store i32 7, i32* %1, align 4 |
28 | /// %2 = load i32* %0, align 4 |
29 | /// %3 = load i32* %1, align 4 |
30 | /// %add = add nsw i32 %2, %3 |
31 | /// ret i32 %add |
32 | /// } |
33 | /// |
34 | /// Will become |
35 | /// define i32 @main() #0 { |
36 | /// entry: |
37 | /// ; 1 = MemoryDef(0) |
38 | /// %call = call noalias i8* @_Znwm(i64 4) #3 |
39 | /// %2 = bitcast i8* %call to i32* |
40 | /// ; 2 = MemoryDef(1) |
41 | /// %call1 = call noalias i8* @_Znwm(i64 4) #3 |
42 | /// %4 = bitcast i8* %call1 to i32* |
43 | /// ; 3 = MemoryDef(2) |
44 | /// store i32 5, i32* %2, align 4 |
45 | /// ; 4 = MemoryDef(3) |
46 | /// store i32 7, i32* %4, align 4 |
47 | /// ; MemoryUse(3) |
48 | /// %7 = load i32* %2, align 4 |
49 | /// ; MemoryUse(4) |
50 | /// %8 = load i32* %4, align 4 |
51 | /// %add = add nsw i32 %7, %8 |
52 | /// ret i32 %add |
53 | /// } |
54 | /// |
55 | /// Given this form, all the stores that could ever effect the load at %8 can be |
56 | /// gotten by using the MemoryUse associated with it, and walking from use to |
57 | /// def until you hit the top of the function. |
58 | /// |
59 | /// Each def also has a list of users associated with it, so you can walk from |
60 | /// both def to users, and users to defs. Note that we disambiguate MemoryUses, |
61 | /// but not the RHS of MemoryDefs. You can see this above at %7, which would |
62 | /// otherwise be a MemoryUse(4). Being disambiguated means that for a given |
63 | /// store, all the MemoryUses on its use lists are may-aliases of that store |
64 | /// (but the MemoryDefs on its use list may not be). |
65 | /// |
66 | /// MemoryDefs are not disambiguated because it would require multiple reaching |
67 | /// definitions, which would require multiple phis, and multiple memoryaccesses |
68 | /// per instruction. |
69 | /// |
70 | /// In addition to the def/use graph described above, MemoryDefs also contain |
71 | /// an "optimized" definition use. The "optimized" use points to some def |
72 | /// reachable through the memory def chain. The optimized def *may* (but is |
73 | /// not required to) alias the original MemoryDef, but no def *closer* to the |
74 | /// source def may alias it. As the name implies, the purpose of the optimized |
75 | /// use is to allow caching of clobber searches for memory defs. The optimized |
76 | /// def may be nullptr, in which case clients must walk the defining access |
77 | /// chain. |
78 | /// |
79 | /// When iterating the uses of a MemoryDef, both defining uses and optimized |
80 | /// uses will be encountered. If only one type is needed, the client must |
81 | /// filter the use walk. |
82 | // |
83 | //===----------------------------------------------------------------------===// |
84 | |
85 | #ifndef LLVM_ANALYSIS_MEMORYSSA_H |
86 | #define LLVM_ANALYSIS_MEMORYSSA_H |
87 | |
88 | #include "llvm/ADT/DenseMap.h" |
89 | #include "llvm/ADT/SmallPtrSet.h" |
90 | #include "llvm/ADT/SmallVector.h" |
91 | #include "llvm/ADT/ilist_node.h" |
92 | #include "llvm/ADT/iterator_range.h" |
93 | #include "llvm/Analysis/AliasAnalysis.h" |
94 | #include "llvm/Analysis/MemoryLocation.h" |
95 | #include "llvm/Analysis/PHITransAddr.h" |
96 | #include "llvm/IR/DerivedUser.h" |
97 | #include "llvm/IR/Dominators.h" |
98 | #include "llvm/IR/Type.h" |
99 | #include "llvm/IR/User.h" |
100 | #include "llvm/Pass.h" |
101 | #include <algorithm> |
102 | #include <cassert> |
103 | #include <cstddef> |
104 | #include <iterator> |
105 | #include <memory> |
106 | #include <utility> |
107 | |
108 | namespace llvm { |
109 | |
110 | template <class GraphType> struct GraphTraits; |
111 | class BasicBlock; |
112 | class Function; |
113 | class Instruction; |
114 | class LLVMContext; |
115 | class MemoryAccess; |
116 | class MemorySSAWalker; |
117 | class Module; |
118 | class Use; |
119 | class Value; |
120 | class raw_ostream; |
121 | |
122 | namespace MSSAHelpers { |
123 | |
124 | struct AllAccessTag {}; |
125 | struct DefsOnlyTag {}; |
126 | |
127 | } // end namespace MSSAHelpers |
128 | |
129 | enum : unsigned { |
130 | // Used to signify what the default invalid ID is for MemoryAccess's |
131 | // getID() |
132 | INVALID_MEMORYACCESS_ID = -1U |
133 | }; |
134 | |
135 | template <class T> class memoryaccess_def_iterator_base; |
136 | using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; |
137 | using const_memoryaccess_def_iterator = |
138 | memoryaccess_def_iterator_base<const MemoryAccess>; |
139 | |
140 | // The base for all memory accesses. All memory accesses in a block are |
141 | // linked together using an intrusive list. |
142 | class MemoryAccess |
143 | : public DerivedUser, |
144 | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, |
145 | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { |
146 | public: |
147 | using AllAccessType = |
148 | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
149 | using DefsOnlyType = |
150 | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
151 | |
152 | MemoryAccess(const MemoryAccess &) = delete; |
153 | MemoryAccess &operator=(const MemoryAccess &) = delete; |
154 | |
155 | void *operator new(size_t) = delete; |
156 | |
157 | // Methods for support type inquiry through isa, cast, and |
158 | // dyn_cast |
159 | static bool classof(const Value *V) { |
160 | unsigned ID = V->getValueID(); |
161 | return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; |
162 | } |
163 | |
164 | BasicBlock *getBlock() const { return Block; } |
165 | |
166 | void print(raw_ostream &OS) const; |
167 | void dump() const; |
168 | |
169 | /// The user iterators for a memory access |
170 | using iterator = user_iterator; |
171 | using const_iterator = const_user_iterator; |
172 | |
173 | /// This iterator walks over all of the defs in a given |
174 | /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For |
175 | /// MemoryUse/MemoryDef, this walks the defining access. |
176 | memoryaccess_def_iterator defs_begin(); |
177 | const_memoryaccess_def_iterator defs_begin() const; |
178 | memoryaccess_def_iterator defs_end(); |
179 | const_memoryaccess_def_iterator defs_end() const; |
180 | |
181 | /// Get the iterators for the all access list and the defs only list |
182 | /// We default to the all access list. |
183 | AllAccessType::self_iterator getIterator() { |
184 | return this->AllAccessType::getIterator(); |
185 | } |
186 | AllAccessType::const_self_iterator getIterator() const { |
187 | return this->AllAccessType::getIterator(); |
188 | } |
189 | AllAccessType::reverse_self_iterator getReverseIterator() { |
190 | return this->AllAccessType::getReverseIterator(); |
191 | } |
192 | AllAccessType::const_reverse_self_iterator getReverseIterator() const { |
193 | return this->AllAccessType::getReverseIterator(); |
194 | } |
195 | DefsOnlyType::self_iterator getDefsIterator() { |
196 | return this->DefsOnlyType::getIterator(); |
197 | } |
198 | DefsOnlyType::const_self_iterator getDefsIterator() const { |
199 | return this->DefsOnlyType::getIterator(); |
200 | } |
201 | DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { |
202 | return this->DefsOnlyType::getReverseIterator(); |
203 | } |
204 | DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { |
205 | return this->DefsOnlyType::getReverseIterator(); |
206 | } |
207 | |
208 | protected: |
209 | friend class MemoryDef; |
210 | friend class MemoryPhi; |
211 | friend class MemorySSA; |
212 | friend class MemoryUse; |
213 | friend class MemoryUseOrDef; |
214 | |
215 | /// Used by MemorySSA to change the block of a MemoryAccess when it is |
216 | /// moved. |
217 | void setBlock(BasicBlock *BB) { Block = BB; } |
218 | |
219 | /// Used for debugging and tracking things about MemoryAccesses. |
220 | /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. |
221 | inline unsigned getID() const; |
222 | |
223 | MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, |
224 | BasicBlock *BB, unsigned NumOperands) |
225 | : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), |
226 | Block(BB) {} |
227 | |
228 | // Use deleteValue() to delete a generic MemoryAccess. |
229 | ~MemoryAccess() = default; |
230 | |
231 | private: |
232 | BasicBlock *Block; |
233 | }; |
234 | |
235 | template <> |
236 | struct ilist_alloc_traits<MemoryAccess> { |
237 | static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); } |
238 | }; |
239 | |
240 | inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { |
241 | MA.print(OS); |
242 | return OS; |
243 | } |
244 | |
245 | /// Class that has the common methods + fields of memory uses/defs. It's |
246 | /// a little awkward to have, but there are many cases where we want either a |
247 | /// use or def, and there are many cases where uses are needed (defs aren't |
248 | /// acceptable), and vice-versa. |
249 | /// |
250 | /// This class should never be instantiated directly; make a MemoryUse or |
251 | /// MemoryDef instead. |
252 | class MemoryUseOrDef : public MemoryAccess { |
253 | public: |
254 | void *operator new(size_t) = delete; |
255 | |
256 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
257 | |
258 | /// Get the instruction that this MemoryUse represents. |
259 | Instruction *getMemoryInst() const { return MemoryInstruction; } |
260 | |
261 | /// Get the access that produces the memory state used by this Use. |
262 | MemoryAccess *getDefiningAccess() const { return getOperand(0); } |
263 | |
264 | static bool classof(const Value *MA) { |
265 | return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; |
266 | } |
267 | |
268 | /// Do we have an optimized use? |
269 | inline bool isOptimized() const; |
270 | /// Return the MemoryAccess associated with the optimized use, or nullptr. |
271 | inline MemoryAccess *getOptimized() const; |
272 | /// Sets the optimized use for a MemoryDef. |
273 | inline void setOptimized(MemoryAccess *); |
274 | |
275 | // Retrieve AliasResult type of the optimized access. Ideally this would be |
276 | // returned by the caching walker and may go away in the future. |
277 | Optional<AliasResult> getOptimizedAccessType() const { |
278 | return isOptimized() ? OptimizedAccessAlias : None; |
279 | } |
280 | |
281 | /// Reset the ID of what this MemoryUse was optimized to, causing it to |
282 | /// be rewalked by the walker if necessary. |
283 | /// This really should only be called by tests. |
284 | inline void resetOptimized(); |
285 | |
286 | protected: |
287 | friend class MemorySSA; |
288 | friend class MemorySSAUpdater; |
289 | |
290 | MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, |
291 | DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, |
292 | unsigned NumOperands) |
293 | : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands), |
294 | MemoryInstruction(MI), OptimizedAccessAlias(AliasResult::MayAlias) { |
295 | setDefiningAccess(DMA); |
296 | } |
297 | |
298 | // Use deleteValue() to delete a generic MemoryUseOrDef. |
299 | ~MemoryUseOrDef() = default; |
300 | |
301 | void setOptimizedAccessType(Optional<AliasResult> AR) { |
302 | OptimizedAccessAlias = AR; |
303 | } |
304 | |
305 | void setDefiningAccess( |
306 | MemoryAccess *DMA, bool Optimized = false, |
307 | Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias)) { |
308 | if (!Optimized) { |
309 | setOperand(0, DMA); |
310 | return; |
311 | } |
312 | setOptimized(DMA); |
313 | setOptimizedAccessType(AR); |
314 | } |
315 | |
316 | private: |
317 | Instruction *MemoryInstruction; |
318 | Optional<AliasResult> OptimizedAccessAlias; |
319 | }; |
320 | |
321 | /// Represents read-only accesses to memory |
322 | /// |
323 | /// In particular, the set of Instructions that will be represented by |
324 | /// MemoryUse's is exactly the set of Instructions for which |
325 | /// AliasAnalysis::getModRefInfo returns "Ref". |
326 | class MemoryUse final : public MemoryUseOrDef { |
327 | public: |
328 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
329 | |
330 | MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) |
331 | : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, |
332 | /*NumOperands=*/1) {} |
333 | |
334 | // allocate space for exactly one operand |
335 | void *operator new(size_t S) { return User::operator new(S, 1); } |
336 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
337 | |
338 | static bool classof(const Value *MA) { |
339 | return MA->getValueID() == MemoryUseVal; |
340 | } |
341 | |
342 | void print(raw_ostream &OS) const; |
343 | |
344 | void setOptimized(MemoryAccess *DMA) { |
345 | OptimizedID = DMA->getID(); |
346 | setOperand(0, DMA); |
347 | } |
348 | |
349 | /// Whether the MemoryUse is optimized. If ensureOptimizedUses() was called, |
350 | /// uses will usually be optimized, but this is not guaranteed (e.g. due to |
351 | /// invalidation and optimization limits.) |
352 | bool isOptimized() const { |
353 | return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); |
354 | } |
355 | |
356 | MemoryAccess *getOptimized() const { |
357 | return getDefiningAccess(); |
358 | } |
359 | |
360 | void resetOptimized() { |
361 | OptimizedID = INVALID_MEMORYACCESS_ID; |
362 | } |
363 | |
364 | protected: |
365 | friend class MemorySSA; |
366 | |
367 | private: |
368 | static void deleteMe(DerivedUser *Self); |
369 | |
370 | unsigned OptimizedID = INVALID_MEMORYACCESS_ID; |
371 | }; |
372 | |
373 | template <> |
374 | struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; |
375 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)MemoryUse::op_iterator MemoryUse::op_begin() { return OperandTraits <MemoryUse>::op_begin(this); } MemoryUse::const_op_iterator MemoryUse::op_begin() const { return OperandTraits<MemoryUse >::op_begin(const_cast<MemoryUse*>(this)); } MemoryUse ::op_iterator MemoryUse::op_end() { return OperandTraits<MemoryUse >::op_end(this); } MemoryUse::const_op_iterator MemoryUse:: op_end() const { return OperandTraits<MemoryUse>::op_end (const_cast<MemoryUse*>(this)); } MemoryAccess *MemoryUse ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryUse>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 375, __extension__ __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess> ( OperandTraits<MemoryUse>::op_begin(const_cast<MemoryUse *>(this))[i_nocapture].get()); } void MemoryUse::setOperand (unsigned i_nocapture, MemoryAccess *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<MemoryUse> ::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 375, __extension__ __PRETTY_FUNCTION__)); OperandTraits<MemoryUse>::op_begin (this)[i_nocapture] = Val_nocapture; } unsigned MemoryUse::getNumOperands () const { return OperandTraits<MemoryUse>::operands(this ); } template <int Idx_nocapture> Use &MemoryUse::Op () { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &MemoryUse::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
376 | |
377 | /// Represents a read-write access to memory, whether it is a must-alias, |
378 | /// or a may-alias. |
379 | /// |
380 | /// In particular, the set of Instructions that will be represented by |
381 | /// MemoryDef's is exactly the set of Instructions for which |
382 | /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". |
383 | /// Note that, in order to provide def-def chains, all defs also have a use |
384 | /// associated with them. This use points to the nearest reaching |
385 | /// MemoryDef/MemoryPhi. |
386 | class MemoryDef final : public MemoryUseOrDef { |
387 | public: |
388 | friend class MemorySSA; |
389 | |
390 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
391 | |
392 | MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, |
393 | unsigned Ver) |
394 | : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, |
395 | /*NumOperands=*/2), |
396 | ID(Ver) {} |
397 | |
398 | // allocate space for exactly two operands |
399 | void *operator new(size_t S) { return User::operator new(S, 2); } |
400 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
401 | |
402 | static bool classof(const Value *MA) { |
403 | return MA->getValueID() == MemoryDefVal; |
404 | } |
405 | |
406 | void setOptimized(MemoryAccess *MA) { |
407 | setOperand(1, MA); |
408 | OptimizedID = MA->getID(); |
409 | } |
410 | |
411 | MemoryAccess *getOptimized() const { |
412 | return cast_or_null<MemoryAccess>(getOperand(1)); |
413 | } |
414 | |
415 | bool isOptimized() const { |
416 | return getOptimized() && OptimizedID == getOptimized()->getID(); |
417 | } |
418 | |
419 | void resetOptimized() { |
420 | OptimizedID = INVALID_MEMORYACCESS_ID; |
421 | setOperand(1, nullptr); |
422 | } |
423 | |
424 | void print(raw_ostream &OS) const; |
425 | |
426 | unsigned getID() const { return ID; } |
427 | |
428 | private: |
429 | static void deleteMe(DerivedUser *Self); |
430 | |
431 | const unsigned ID; |
432 | unsigned OptimizedID = INVALID_MEMORYACCESS_ID; |
433 | }; |
434 | |
435 | template <> |
436 | struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {}; |
437 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)MemoryDef::op_iterator MemoryDef::op_begin() { return OperandTraits <MemoryDef>::op_begin(this); } MemoryDef::const_op_iterator MemoryDef::op_begin() const { return OperandTraits<MemoryDef >::op_begin(const_cast<MemoryDef*>(this)); } MemoryDef ::op_iterator MemoryDef::op_end() { return OperandTraits<MemoryDef >::op_end(this); } MemoryDef::const_op_iterator MemoryDef:: op_end() const { return OperandTraits<MemoryDef>::op_end (const_cast<MemoryDef*>(this)); } MemoryAccess *MemoryDef ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryDef>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 437, __extension__ __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess> ( OperandTraits<MemoryDef>::op_begin(const_cast<MemoryDef *>(this))[i_nocapture].get()); } void MemoryDef::setOperand (unsigned i_nocapture, MemoryAccess *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<MemoryDef> ::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 437, __extension__ __PRETTY_FUNCTION__)); OperandTraits<MemoryDef>::op_begin (this)[i_nocapture] = Val_nocapture; } unsigned MemoryDef::getNumOperands () const { return OperandTraits<MemoryDef>::operands(this ); } template <int Idx_nocapture> Use &MemoryDef::Op () { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &MemoryDef::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
438 | |
439 | template <> |
440 | struct OperandTraits<MemoryUseOrDef> { |
441 | static Use *op_begin(MemoryUseOrDef *MUD) { |
442 | if (auto *MU = dyn_cast<MemoryUse>(MUD)) |
443 | return OperandTraits<MemoryUse>::op_begin(MU); |
444 | return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD)); |
445 | } |
446 | |
447 | static Use *op_end(MemoryUseOrDef *MUD) { |
448 | if (auto *MU = dyn_cast<MemoryUse>(MUD)) |
449 | return OperandTraits<MemoryUse>::op_end(MU); |
450 | return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD)); |
451 | } |
452 | |
453 | static unsigned operands(const MemoryUseOrDef *MUD) { |
454 | if (const auto *MU = dyn_cast<MemoryUse>(MUD)) |
455 | return OperandTraits<MemoryUse>::operands(MU); |
456 | return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD)); |
457 | } |
458 | }; |
459 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)MemoryUseOrDef::op_iterator MemoryUseOrDef::op_begin() { return OperandTraits<MemoryUseOrDef>::op_begin(this); } MemoryUseOrDef ::const_op_iterator MemoryUseOrDef::op_begin() const { return OperandTraits<MemoryUseOrDef>::op_begin(const_cast< MemoryUseOrDef*>(this)); } MemoryUseOrDef::op_iterator MemoryUseOrDef ::op_end() { return OperandTraits<MemoryUseOrDef>::op_end (this); } MemoryUseOrDef::const_op_iterator MemoryUseOrDef::op_end () const { return OperandTraits<MemoryUseOrDef>::op_end (const_cast<MemoryUseOrDef*>(this)); } MemoryAccess *MemoryUseOrDef ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryUseOrDef>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 459, __extension__ __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess> ( OperandTraits<MemoryUseOrDef>::op_begin(const_cast< MemoryUseOrDef*>(this))[i_nocapture].get()); } void MemoryUseOrDef ::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture ) { (static_cast <bool> (i_nocapture < OperandTraits <MemoryUseOrDef>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 459, __extension__ __PRETTY_FUNCTION__)); OperandTraits<MemoryUseOrDef>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryUseOrDef ::getNumOperands() const { return OperandTraits<MemoryUseOrDef >::operands(this); } template <int Idx_nocapture> Use &MemoryUseOrDef::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & MemoryUseOrDef::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
460 | |
461 | /// Represents phi nodes for memory accesses. |
462 | /// |
463 | /// These have the same semantic as regular phi nodes, with the exception that |
464 | /// only one phi will ever exist in a given basic block. |
465 | /// Guaranteeing one phi per block means guaranteeing there is only ever one |
466 | /// valid reaching MemoryDef/MemoryPHI along each path to the phi node. |
467 | /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or |
468 | /// a MemoryPhi's operands. |
469 | /// That is, given |
470 | /// if (a) { |
471 | /// store %a |
472 | /// store %b |
473 | /// } |
474 | /// it *must* be transformed into |
475 | /// if (a) { |
476 | /// 1 = MemoryDef(liveOnEntry) |
477 | /// store %a |
478 | /// 2 = MemoryDef(1) |
479 | /// store %b |
480 | /// } |
481 | /// and *not* |
482 | /// if (a) { |
483 | /// 1 = MemoryDef(liveOnEntry) |
484 | /// store %a |
485 | /// 2 = MemoryDef(liveOnEntry) |
486 | /// store %b |
487 | /// } |
488 | /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the |
489 | /// end of the branch, and if there are not two phi nodes, one will be |
490 | /// disconnected completely from the SSA graph below that point. |
491 | /// Because MemoryUse's do not generate new definitions, they do not have this |
492 | /// issue. |
493 | class MemoryPhi final : public MemoryAccess { |
494 | // allocate space for exactly zero operands |
495 | void *operator new(size_t S) { return User::operator new(S); } |
496 | |
497 | public: |
498 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
499 | |
500 | /// Provide fast operand accessors |
501 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
502 | |
503 | MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) |
504 | : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), |
505 | ReservedSpace(NumPreds) { |
506 | allocHungoffUses(ReservedSpace); |
507 | } |
508 | |
509 | // Block iterator interface. This provides access to the list of incoming |
510 | // basic blocks, which parallels the list of incoming values. |
511 | using block_iterator = BasicBlock **; |
512 | using const_block_iterator = BasicBlock *const *; |
513 | |
514 | block_iterator block_begin() { |
515 | return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); |
516 | } |
517 | |
518 | const_block_iterator block_begin() const { |
519 | return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); |
520 | } |
521 | |
522 | block_iterator block_end() { return block_begin() + getNumOperands(); } |
523 | |
524 | const_block_iterator block_end() const { |
525 | return block_begin() + getNumOperands(); |
526 | } |
527 | |
528 | iterator_range<block_iterator> blocks() { |
529 | return make_range(block_begin(), block_end()); |
530 | } |
531 | |
532 | iterator_range<const_block_iterator> blocks() const { |
533 | return make_range(block_begin(), block_end()); |
534 | } |
535 | |
536 | op_range incoming_values() { return operands(); } |
537 | |
538 | const_op_range incoming_values() const { return operands(); } |
539 | |
540 | /// Return the number of incoming edges |
541 | unsigned getNumIncomingValues() const { return getNumOperands(); } |
542 | |
543 | /// Return incoming value number x |
544 | MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } |
545 | void setIncomingValue(unsigned I, MemoryAccess *V) { |
546 | assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!" ) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 546, __extension__ __PRETTY_FUNCTION__)); |
547 | setOperand(I, V); |
548 | } |
549 | |
550 | static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } |
551 | static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } |
552 | |
553 | /// Return incoming basic block number @p i. |
554 | BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } |
555 | |
556 | /// Return incoming basic block corresponding |
557 | /// to an operand of the PHI. |
558 | BasicBlock *getIncomingBlock(const Use &U) const { |
559 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 559, __extension__ __PRETTY_FUNCTION__)); |
560 | return getIncomingBlock(unsigned(&U - op_begin())); |
561 | } |
562 | |
563 | /// Return incoming basic block corresponding |
564 | /// to value use iterator. |
565 | BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { |
566 | return getIncomingBlock(I.getUse()); |
567 | } |
568 | |
569 | void setIncomingBlock(unsigned I, BasicBlock *BB) { |
570 | assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 570, __extension__ __PRETTY_FUNCTION__)); |
571 | block_begin()[I] = BB; |
572 | } |
573 | |
574 | /// Add an incoming value to the end of the PHI list |
575 | void addIncoming(MemoryAccess *V, BasicBlock *BB) { |
576 | if (getNumOperands() == ReservedSpace) |
577 | growOperands(); // Get more space! |
578 | // Initialize some new operands. |
579 | setNumHungOffUseOperands(getNumOperands() + 1); |
580 | setIncomingValue(getNumOperands() - 1, V); |
581 | setIncomingBlock(getNumOperands() - 1, BB); |
582 | } |
583 | |
584 | /// Return the first index of the specified basic |
585 | /// block in the value list for this PHI. Returns -1 if no instance. |
586 | int getBasicBlockIndex(const BasicBlock *BB) const { |
587 | for (unsigned I = 0, E = getNumOperands(); I != E; ++I) |
588 | if (block_begin()[I] == BB) |
589 | return I; |
590 | return -1; |
591 | } |
592 | |
593 | MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const { |
594 | int Idx = getBasicBlockIndex(BB); |
595 | assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 595, __extension__ __PRETTY_FUNCTION__)); |
596 | return getIncomingValue(Idx); |
597 | } |
598 | |
599 | // After deleting incoming position I, the order of incoming may be changed. |
600 | void unorderedDeleteIncoming(unsigned I) { |
601 | unsigned E = getNumOperands(); |
602 | assert(I < E && "Cannot remove out of bounds Phi entry.")(static_cast <bool> (I < E && "Cannot remove out of bounds Phi entry." ) ? void (0) : __assert_fail ("I < E && \"Cannot remove out of bounds Phi entry.\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 602, __extension__ __PRETTY_FUNCTION__)); |
603 | // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi |
604 | // itself should be deleted. |
605 | assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with " "at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 606, __extension__ __PRETTY_FUNCTION__)) |
606 | "at least 2 values.")(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with " "at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 606, __extension__ __PRETTY_FUNCTION__)); |
607 | setIncomingValue(I, getIncomingValue(E - 1)); |
608 | setIncomingBlock(I, block_begin()[E - 1]); |
609 | setOperand(E - 1, nullptr); |
610 | block_begin()[E - 1] = nullptr; |
611 | setNumHungOffUseOperands(getNumOperands() - 1); |
612 | } |
613 | |
614 | // After deleting entries that satisfy Pred, remaining entries may have |
615 | // changed order. |
616 | template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) { |
617 | for (unsigned I = 0, E = getNumOperands(); I != E; ++I) |
618 | if (Pred(getIncomingValue(I), getIncomingBlock(I))) { |
619 | unorderedDeleteIncoming(I); |
620 | E = getNumOperands(); |
621 | --I; |
622 | } |
623 | assert(getNumOperands() >= 1 &&(static_cast <bool> (getNumOperands() >= 1 && "Cannot remove all incoming blocks in a MemoryPhi.") ? void ( 0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 624, __extension__ __PRETTY_FUNCTION__)) |
624 | "Cannot remove all incoming blocks in a MemoryPhi.")(static_cast <bool> (getNumOperands() >= 1 && "Cannot remove all incoming blocks in a MemoryPhi.") ? void ( 0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 624, __extension__ __PRETTY_FUNCTION__)); |
625 | } |
626 | |
627 | // After deleting incoming block BB, the incoming blocks order may be changed. |
628 | void unorderedDeleteIncomingBlock(const BasicBlock *BB) { |
629 | unorderedDeleteIncomingIf( |
630 | [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; }); |
631 | } |
632 | |
633 | // After deleting incoming memory access MA, the incoming accesses order may |
634 | // be changed. |
635 | void unorderedDeleteIncomingValue(const MemoryAccess *MA) { |
636 | unorderedDeleteIncomingIf( |
637 | [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; }); |
638 | } |
639 | |
640 | static bool classof(const Value *V) { |
641 | return V->getValueID() == MemoryPhiVal; |
642 | } |
643 | |
644 | void print(raw_ostream &OS) const; |
645 | |
646 | unsigned getID() const { return ID; } |
647 | |
648 | protected: |
649 | friend class MemorySSA; |
650 | |
651 | /// this is more complicated than the generic |
652 | /// User::allocHungoffUses, because we have to allocate Uses for the incoming |
653 | /// values and pointers to the incoming blocks, all in one allocation. |
654 | void allocHungoffUses(unsigned N) { |
655 | User::allocHungoffUses(N, /* IsPhi */ true); |
656 | } |
657 | |
658 | private: |
659 | // For debugging only |
660 | const unsigned ID; |
661 | unsigned ReservedSpace; |
662 | |
663 | /// This grows the operand list in response to a push_back style of |
664 | /// operation. This grows the number of ops by 1.5 times. |
665 | void growOperands() { |
666 | unsigned E = getNumOperands(); |
667 | // 2 op PHI nodes are VERY common, so reserve at least enough for that. |
668 | ReservedSpace = std::max(E + E / 2, 2u); |
669 | growHungoffUses(ReservedSpace, /* IsPhi */ true); |
670 | } |
671 | |
672 | static void deleteMe(DerivedUser *Self); |
673 | }; |
674 | |
675 | inline unsigned MemoryAccess::getID() const { |
676 | assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&(static_cast <bool> ((isa<MemoryDef>(this) || isa <MemoryPhi>(this)) && "only memory defs and phis have ids" ) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 677, __extension__ __PRETTY_FUNCTION__)) |
677 | "only memory defs and phis have ids")(static_cast <bool> ((isa<MemoryDef>(this) || isa <MemoryPhi>(this)) && "only memory defs and phis have ids" ) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 677, __extension__ __PRETTY_FUNCTION__)); |
678 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
679 | return MD->getID(); |
680 | return cast<MemoryPhi>(this)->getID(); |
681 | } |
682 | |
683 | inline bool MemoryUseOrDef::isOptimized() const { |
684 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
685 | return MD->isOptimized(); |
686 | return cast<MemoryUse>(this)->isOptimized(); |
687 | } |
688 | |
689 | inline MemoryAccess *MemoryUseOrDef::getOptimized() const { |
690 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
691 | return MD->getOptimized(); |
692 | return cast<MemoryUse>(this)->getOptimized(); |
693 | } |
694 | |
695 | inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { |
696 | if (auto *MD = dyn_cast<MemoryDef>(this)) |
697 | MD->setOptimized(MA); |
698 | else |
699 | cast<MemoryUse>(this)->setOptimized(MA); |
700 | } |
701 | |
702 | inline void MemoryUseOrDef::resetOptimized() { |
703 | if (auto *MD = dyn_cast<MemoryDef>(this)) |
704 | MD->resetOptimized(); |
705 | else |
706 | cast<MemoryUse>(this)->resetOptimized(); |
707 | } |
708 | |
709 | template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; |
710 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)MemoryPhi::op_iterator MemoryPhi::op_begin() { return OperandTraits <MemoryPhi>::op_begin(this); } MemoryPhi::const_op_iterator MemoryPhi::op_begin() const { return OperandTraits<MemoryPhi >::op_begin(const_cast<MemoryPhi*>(this)); } MemoryPhi ::op_iterator MemoryPhi::op_end() { return OperandTraits<MemoryPhi >::op_end(this); } MemoryPhi::const_op_iterator MemoryPhi:: op_end() const { return OperandTraits<MemoryPhi>::op_end (const_cast<MemoryPhi*>(this)); } MemoryAccess *MemoryPhi ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryPhi>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 710, __extension__ __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess> ( OperandTraits<MemoryPhi>::op_begin(const_cast<MemoryPhi *>(this))[i_nocapture].get()); } void MemoryPhi::setOperand (unsigned i_nocapture, MemoryAccess *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<MemoryPhi> ::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 710, __extension__ __PRETTY_FUNCTION__)); OperandTraits<MemoryPhi>::op_begin (this)[i_nocapture] = Val_nocapture; } unsigned MemoryPhi::getNumOperands () const { return OperandTraits<MemoryPhi>::operands(this ); } template <int Idx_nocapture> Use &MemoryPhi::Op () { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &MemoryPhi::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
711 | |
712 | /// Encapsulates MemorySSA, including all data associated with memory |
713 | /// accesses. |
714 | class MemorySSA { |
715 | public: |
716 | MemorySSA(Function &, AliasAnalysis *, DominatorTree *); |
717 | |
718 | // MemorySSA must remain where it's constructed; Walkers it creates store |
719 | // pointers to it. |
720 | MemorySSA(MemorySSA &&) = delete; |
721 | |
722 | ~MemorySSA(); |
723 | |
724 | MemorySSAWalker *getWalker(); |
725 | MemorySSAWalker *getSkipSelfWalker(); |
726 | |
727 | /// Given a memory Mod/Ref'ing instruction, get the MemorySSA |
728 | /// access associated with it. If passed a basic block gets the memory phi |
729 | /// node that exists for that block, if there is one. Otherwise, this will get |
730 | /// a MemoryUseOrDef. |
731 | MemoryUseOrDef *getMemoryAccess(const Instruction *I) const { |
732 | return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); |
733 | } |
734 | |
735 | MemoryPhi *getMemoryAccess(const BasicBlock *BB) const { |
736 | return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); |
737 | } |
738 | |
739 | DominatorTree &getDomTree() const { return *DT; } |
740 | |
741 | void dump() const; |
742 | void print(raw_ostream &) const; |
743 | |
744 | /// Return true if \p MA represents the live on entry value |
745 | /// |
746 | /// Loads and stores from pointer arguments and other global values may be |
747 | /// defined by memory operations that do not occur in the current function, so |
748 | /// they may be live on entry to the function. MemorySSA represents such |
749 | /// memory state by the live on entry definition, which is guaranteed to occur |
750 | /// before any other memory access in the function. |
751 | inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { |
752 | return MA == LiveOnEntryDef.get(); |
753 | } |
754 | |
755 | inline MemoryAccess *getLiveOnEntryDef() const { |
756 | return LiveOnEntryDef.get(); |
757 | } |
758 | |
759 | // Sadly, iplists, by default, owns and deletes pointers added to the |
760 | // list. It's not currently possible to have two iplists for the same type, |
761 | // where one owns the pointers, and one does not. This is because the traits |
762 | // are per-type, not per-tag. If this ever changes, we should make the |
763 | // DefList an iplist. |
764 | using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
765 | using DefsList = |
766 | simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
767 | |
768 | /// Return the list of MemoryAccess's for a given basic block. |
769 | /// |
770 | /// This list is not modifiable by the user. |
771 | const AccessList *getBlockAccesses(const BasicBlock *BB) const { |
772 | return getWritableBlockAccesses(BB); |
773 | } |
774 | |
775 | /// Return the list of MemoryDef's and MemoryPhi's for a given basic |
776 | /// block. |
777 | /// |
778 | /// This list is not modifiable by the user. |
779 | const DefsList *getBlockDefs(const BasicBlock *BB) const { |
780 | return getWritableBlockDefs(BB); |
781 | } |
782 | |
783 | /// Given two memory accesses in the same basic block, determine |
784 | /// whether MemoryAccess \p A dominates MemoryAccess \p B. |
785 | bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; |
786 | |
787 | /// Given two memory accesses in potentially different blocks, |
788 | /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. |
789 | bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; |
790 | |
791 | /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A |
792 | /// dominates Use \p B. |
793 | bool dominates(const MemoryAccess *A, const Use &B) const; |
794 | |
795 | enum class VerificationLevel { Fast, Full }; |
796 | /// Verify that MemorySSA is self consistent (IE definitions dominate |
797 | /// all uses, uses appear in the right places). This is used by unit tests. |
798 | void verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const; |
799 | |
800 | /// Used in various insertion functions to specify whether we are talking |
801 | /// about the beginning or end of a block. |
802 | enum InsertionPlace { Beginning, End, BeforeTerminator }; |
803 | |
804 | /// By default, uses are *not* optimized during MemorySSA construction. |
805 | /// Calling this method will attempt to optimize all MemoryUses, if this has |
806 | /// not happened yet for this MemorySSA instance. This should be done if you |
807 | /// plan to query the clobbering access for most uses, or if you walk the |
808 | /// def-use chain of uses. |
809 | void ensureOptimizedUses(); |
810 | |
811 | protected: |
812 | // Used by Memory SSA dumpers and wrapper pass |
813 | friend class MemorySSAPrinterLegacyPass; |
814 | friend class MemorySSAUpdater; |
815 | |
816 | void verifyOrderingDominationAndDefUses( |
817 | Function &F, VerificationLevel = VerificationLevel::Fast) const; |
818 | void verifyDominationNumbers(const Function &F) const; |
819 | void verifyPrevDefInPhis(Function &F) const; |
820 | |
821 | // This is used by the use optimizer and updater. |
822 | AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { |
823 | auto It = PerBlockAccesses.find(BB); |
824 | return It == PerBlockAccesses.end() ? nullptr : It->second.get(); |
825 | } |
826 | |
827 | // This is used by the use optimizer and updater. |
828 | DefsList *getWritableBlockDefs(const BasicBlock *BB) const { |
829 | auto It = PerBlockDefs.find(BB); |
830 | return It == PerBlockDefs.end() ? nullptr : It->second.get(); |
831 | } |
832 | |
833 | // These is used by the updater to perform various internal MemorySSA |
834 | // machinsations. They do not always leave the IR in a correct state, and |
835 | // relies on the updater to fixup what it breaks, so it is not public. |
836 | |
837 | void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); |
838 | void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point); |
839 | |
840 | // Rename the dominator tree branch rooted at BB. |
841 | void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, |
842 | SmallPtrSetImpl<BasicBlock *> &Visited) { |
843 | renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); |
844 | } |
845 | |
846 | void removeFromLookups(MemoryAccess *); |
847 | void removeFromLists(MemoryAccess *, bool ShouldDelete = true); |
848 | void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, |
849 | InsertionPlace); |
850 | void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, |
851 | AccessList::iterator); |
852 | MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *, |
853 | const MemoryUseOrDef *Template = nullptr, |
854 | bool CreationMustSucceed = true); |
855 | |
856 | private: |
857 | template <class AliasAnalysisType> class ClobberWalkerBase; |
858 | template <class AliasAnalysisType> class CachingWalker; |
859 | template <class AliasAnalysisType> class SkipSelfWalker; |
860 | class OptimizeUses; |
861 | |
862 | CachingWalker<AliasAnalysis> *getWalkerImpl(); |
863 | void buildMemorySSA(BatchAAResults &BAA); |
864 | |
865 | void prepareForMoveTo(MemoryAccess *, BasicBlock *); |
866 | void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; |
867 | |
868 | using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; |
869 | using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; |
870 | |
871 | void markUnreachableAsLiveOnEntry(BasicBlock *BB); |
872 | MemoryPhi *createMemoryPhi(BasicBlock *BB); |
873 | template <typename AliasAnalysisType> |
874 | MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *, |
875 | const MemoryUseOrDef *Template = nullptr); |
876 | void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &); |
877 | MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); |
878 | void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); |
879 | void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, |
880 | SmallPtrSetImpl<BasicBlock *> &Visited, |
881 | bool SkipVisited = false, bool RenameAllUses = false); |
882 | AccessList *getOrCreateAccessList(const BasicBlock *); |
883 | DefsList *getOrCreateDefsList(const BasicBlock *); |
884 | void renumberBlock(const BasicBlock *) const; |
885 | AliasAnalysis *AA = nullptr; |
886 | DominatorTree *DT; |
887 | Function &F; |
888 | |
889 | // Memory SSA mappings |
890 | DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; |
891 | |
892 | // These two mappings contain the main block to access/def mappings for |
893 | // MemorySSA. The list contained in PerBlockAccesses really owns all the |
894 | // MemoryAccesses. |
895 | // Both maps maintain the invariant that if a block is found in them, the |
896 | // corresponding list is not empty, and if a block is not found in them, the |
897 | // corresponding list is empty. |
898 | AccessMap PerBlockAccesses; |
899 | DefsMap PerBlockDefs; |
900 | std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef; |
901 | |
902 | // Domination mappings |
903 | // Note that the numbering is local to a block, even though the map is |
904 | // global. |
905 | mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; |
906 | mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; |
907 | |
908 | // Memory SSA building info |
909 | std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase; |
910 | std::unique_ptr<CachingWalker<AliasAnalysis>> Walker; |
911 | std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker; |
912 | unsigned NextID = 0; |
913 | bool IsOptimized = false; |
914 | }; |
915 | |
916 | /// Enables verification of MemorySSA. |
917 | /// |
918 | /// The checks which this flag enables is exensive and disabled by default |
919 | /// unless `EXPENSIVE_CHECKS` is defined. The flag `-verify-memoryssa` can be |
920 | /// used to selectively enable the verification without re-compilation. |
921 | extern bool VerifyMemorySSA; |
922 | |
923 | // Internal MemorySSA utils, for use by MemorySSA classes and walkers |
924 | class MemorySSAUtil { |
925 | protected: |
926 | friend class GVNHoist; |
927 | friend class MemorySSAWalker; |
928 | |
929 | // This function should not be used by new passes. |
930 | static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, |
931 | AliasAnalysis &AA); |
932 | }; |
933 | |
934 | // This pass does eager building and then printing of MemorySSA. It is used by |
935 | // the tests to be able to build, dump, and verify Memory SSA. |
936 | class MemorySSAPrinterLegacyPass : public FunctionPass { |
937 | public: |
938 | MemorySSAPrinterLegacyPass(); |
939 | |
940 | bool runOnFunction(Function &) override; |
941 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
942 | |
943 | static char ID; |
944 | }; |
945 | |
946 | /// An analysis that produces \c MemorySSA for a function. |
947 | /// |
948 | class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { |
949 | friend AnalysisInfoMixin<MemorySSAAnalysis>; |
950 | |
951 | static AnalysisKey Key; |
952 | |
953 | public: |
954 | // Wrap MemorySSA result to ensure address stability of internal MemorySSA |
955 | // pointers after construction. Use a wrapper class instead of plain |
956 | // unique_ptr<MemorySSA> to avoid build breakage on MSVC. |
957 | struct Result { |
958 | Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} |
959 | |
960 | MemorySSA &getMSSA() { return *MSSA.get(); } |
961 | |
962 | std::unique_ptr<MemorySSA> MSSA; |
963 | |
964 | bool invalidate(Function &F, const PreservedAnalyses &PA, |
965 | FunctionAnalysisManager::Invalidator &Inv); |
966 | }; |
967 | |
968 | Result run(Function &F, FunctionAnalysisManager &AM); |
969 | }; |
970 | |
971 | /// Printer pass for \c MemorySSA. |
972 | class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { |
973 | raw_ostream &OS; |
974 | |
975 | public: |
976 | explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {} |
977 | |
978 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
979 | }; |
980 | |
981 | /// Printer pass for \c MemorySSA via the walker. |
982 | class MemorySSAWalkerPrinterPass |
983 | : public PassInfoMixin<MemorySSAWalkerPrinterPass> { |
984 | raw_ostream &OS; |
985 | |
986 | public: |
987 | explicit MemorySSAWalkerPrinterPass(raw_ostream &OS) : OS(OS) {} |
988 | |
989 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
990 | }; |
991 | |
992 | /// Verifier pass for \c MemorySSA. |
993 | struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { |
994 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
995 | }; |
996 | |
997 | /// Legacy analysis pass which computes \c MemorySSA. |
998 | class MemorySSAWrapperPass : public FunctionPass { |
999 | public: |
1000 | MemorySSAWrapperPass(); |
1001 | |
1002 | static char ID; |
1003 | |
1004 | bool runOnFunction(Function &) override; |
1005 | void releaseMemory() override; |
1006 | MemorySSA &getMSSA() { return *MSSA; } |
1007 | const MemorySSA &getMSSA() const { return *MSSA; } |
1008 | |
1009 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
1010 | |
1011 | void verifyAnalysis() const override; |
1012 | void print(raw_ostream &OS, const Module *M = nullptr) const override; |
1013 | |
1014 | private: |
1015 | std::unique_ptr<MemorySSA> MSSA; |
1016 | }; |
1017 | |
1018 | /// This is the generic walker interface for walkers of MemorySSA. |
1019 | /// Walkers are used to be able to further disambiguate the def-use chains |
1020 | /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives |
1021 | /// you. |
1022 | /// In particular, while the def-use chains provide basic information, and are |
1023 | /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a |
1024 | /// MemoryUse as AliasAnalysis considers it, a user mant want better or other |
1025 | /// information. In particular, they may want to use SCEV info to further |
1026 | /// disambiguate memory accesses, or they may want the nearest dominating |
1027 | /// may-aliasing MemoryDef for a call or a store. This API enables a |
1028 | /// standardized interface to getting and using that info. |
1029 | class MemorySSAWalker { |
1030 | public: |
1031 | MemorySSAWalker(MemorySSA *); |
1032 | virtual ~MemorySSAWalker() = default; |
1033 | |
1034 | using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; |
1035 | |
1036 | /// Given a memory Mod/Ref/ModRef'ing instruction, calling this |
1037 | /// will give you the nearest dominating MemoryAccess that Mod's the location |
1038 | /// the instruction accesses (by skipping any def which AA can prove does not |
1039 | /// alias the location(s) accessed by the instruction given). |
1040 | /// |
1041 | /// Note that this will return a single access, and it must dominate the |
1042 | /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, |
1043 | /// this will return the MemoryPhi, not the operand. This means that |
1044 | /// given: |
1045 | /// if (a) { |
1046 | /// 1 = MemoryDef(liveOnEntry) |
1047 | /// store %a |
1048 | /// } else { |
1049 | /// 2 = MemoryDef(liveOnEntry) |
1050 | /// store %b |
1051 | /// } |
1052 | /// 3 = MemoryPhi(2, 1) |
1053 | /// MemoryUse(3) |
1054 | /// load %a |
1055 | /// |
1056 | /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef |
1057 | /// in the if (a) branch. |
1058 | MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { |
1059 | MemoryAccess *MA = MSSA->getMemoryAccess(I); |
1060 | assert(MA && "Handed an instruction that MemorySSA doesn't recognize?")(static_cast <bool> (MA && "Handed an instruction that MemorySSA doesn't recognize?" ) ? void (0) : __assert_fail ("MA && \"Handed an instruction that MemorySSA doesn't recognize?\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1060, __extension__ __PRETTY_FUNCTION__)); |
1061 | return getClobberingMemoryAccess(MA); |
1062 | } |
1063 | |
1064 | /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), |
1065 | /// but takes a MemoryAccess instead of an Instruction. |
1066 | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0; |
1067 | |
1068 | /// Given a potentially clobbering memory access and a new location, |
1069 | /// calling this will give you the nearest dominating clobbering MemoryAccess |
1070 | /// (by skipping non-aliasing def links). |
1071 | /// |
1072 | /// This version of the function is mainly used to disambiguate phi translated |
1073 | /// pointers, where the value of a pointer may have changed from the initial |
1074 | /// memory access. Note that this expects to be handed either a MemoryUse, |
1075 | /// or an already potentially clobbering access. Unlike the above API, if |
1076 | /// given a MemoryDef that clobbers the pointer as the starting access, it |
1077 | /// will return that MemoryDef, whereas the above would return the clobber |
1078 | /// starting from the use side of the memory def. |
1079 | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1080 | const MemoryLocation &) = 0; |
1081 | |
1082 | /// Given a memory access, invalidate anything this walker knows about |
1083 | /// that access. |
1084 | /// This API is used by walkers that store information to perform basic cache |
1085 | /// invalidation. This will be called by MemorySSA at appropriate times for |
1086 | /// the walker it uses or returns. |
1087 | virtual void invalidateInfo(MemoryAccess *) {} |
1088 | |
1089 | protected: |
1090 | friend class MemorySSA; // For updating MSSA pointer in MemorySSA move |
1091 | // constructor. |
1092 | MemorySSA *MSSA; |
1093 | }; |
1094 | |
1095 | /// A MemorySSAWalker that does no alias queries, or anything else. It |
1096 | /// simply returns the links as they were constructed by the builder. |
1097 | class DoNothingMemorySSAWalker final : public MemorySSAWalker { |
1098 | public: |
1099 | // Keep the overrides below from hiding the Instruction overload of |
1100 | // getClobberingMemoryAccess. |
1101 | using MemorySSAWalker::getClobberingMemoryAccess; |
1102 | |
1103 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; |
1104 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1105 | const MemoryLocation &) override; |
1106 | }; |
1107 | |
1108 | using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; |
1109 | using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; |
1110 | |
1111 | /// Iterator base class used to implement const and non-const iterators |
1112 | /// over the defining accesses of a MemoryAccess. |
1113 | template <class T> |
1114 | class memoryaccess_def_iterator_base |
1115 | : public iterator_facade_base<memoryaccess_def_iterator_base<T>, |
1116 | std::forward_iterator_tag, T, ptrdiff_t, T *, |
1117 | T *> { |
1118 | using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; |
1119 | |
1120 | public: |
1121 | memoryaccess_def_iterator_base(T *Start) : Access(Start) {} |
1122 | memoryaccess_def_iterator_base() = default; |
1123 | |
1124 | bool operator==(const memoryaccess_def_iterator_base &Other) const { |
1125 | return Access == Other.Access && (!Access || ArgNo == Other.ArgNo); |
1126 | } |
1127 | |
1128 | // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the |
1129 | // block from the operand in constant time (In a PHINode, the uselist has |
1130 | // both, so it's just subtraction). We provide it as part of the |
1131 | // iterator to avoid callers having to linear walk to get the block. |
1132 | // If the operation becomes constant time on MemoryPHI's, this bit of |
1133 | // abstraction breaking should be removed. |
1134 | BasicBlock *getPhiArgBlock() const { |
1135 | MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); |
1136 | assert(MP && "Tried to get phi arg block when not iterating over a PHI")(static_cast <bool> (MP && "Tried to get phi arg block when not iterating over a PHI" ) ? void (0) : __assert_fail ("MP && \"Tried to get phi arg block when not iterating over a PHI\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1136, __extension__ __PRETTY_FUNCTION__)); |
1137 | return MP->getIncomingBlock(ArgNo); |
1138 | } |
1139 | |
1140 | typename std::iterator_traits<BaseT>::pointer operator*() const { |
1141 | assert(Access && "Tried to access past the end of our iterator")(static_cast <bool> (Access && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("Access && \"Tried to access past the end of our iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1141, __extension__ __PRETTY_FUNCTION__)); |
1142 | // Go to the first argument for phis, and the defining access for everything |
1143 | // else. |
1144 | if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) |
1145 | return MP->getIncomingValue(ArgNo); |
1146 | return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); |
1147 | } |
1148 | |
1149 | using BaseT::operator++; |
1150 | memoryaccess_def_iterator_base &operator++() { |
1151 | assert(Access && "Hit end of iterator")(static_cast <bool> (Access && "Hit end of iterator" ) ? void (0) : __assert_fail ("Access && \"Hit end of iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1151, __extension__ __PRETTY_FUNCTION__)); |
1152 | if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) { |
1153 | if (++ArgNo >= MP->getNumIncomingValues()) { |
1154 | ArgNo = 0; |
1155 | Access = nullptr; |
1156 | } |
1157 | } else { |
1158 | Access = nullptr; |
1159 | } |
1160 | return *this; |
1161 | } |
1162 | |
1163 | private: |
1164 | T *Access = nullptr; |
1165 | unsigned ArgNo = 0; |
1166 | }; |
1167 | |
1168 | inline memoryaccess_def_iterator MemoryAccess::defs_begin() { |
1169 | return memoryaccess_def_iterator(this); |
1170 | } |
1171 | |
1172 | inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { |
1173 | return const_memoryaccess_def_iterator(this); |
1174 | } |
1175 | |
1176 | inline memoryaccess_def_iterator MemoryAccess::defs_end() { |
1177 | return memoryaccess_def_iterator(); |
1178 | } |
1179 | |
1180 | inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { |
1181 | return const_memoryaccess_def_iterator(); |
1182 | } |
1183 | |
1184 | /// GraphTraits for a MemoryAccess, which walks defs in the normal case, |
1185 | /// and uses in the inverse case. |
1186 | template <> struct GraphTraits<MemoryAccess *> { |
1187 | using NodeRef = MemoryAccess *; |
1188 | using ChildIteratorType = memoryaccess_def_iterator; |
1189 | |
1190 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1191 | static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } |
1192 | static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } |
1193 | }; |
1194 | |
1195 | template <> struct GraphTraits<Inverse<MemoryAccess *>> { |
1196 | using NodeRef = MemoryAccess *; |
1197 | using ChildIteratorType = MemoryAccess::iterator; |
1198 | |
1199 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1200 | static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } |
1201 | static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } |
1202 | }; |
1203 | |
1204 | /// Provide an iterator that walks defs, giving both the memory access, |
1205 | /// and the current pointer location, updating the pointer location as it |
1206 | /// changes due to phi node translation. |
1207 | /// |
1208 | /// This iterator, while somewhat specialized, is what most clients actually |
1209 | /// want when walking upwards through MemorySSA def chains. It takes a pair of |
1210 | /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the |
1211 | /// memory location through phi nodes for the user. |
1212 | class upward_defs_iterator |
1213 | : public iterator_facade_base<upward_defs_iterator, |
1214 | std::forward_iterator_tag, |
1215 | const MemoryAccessPair> { |
1216 | using BaseT = upward_defs_iterator::iterator_facade_base; |
1217 | |
1218 | public: |
1219 | upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT, |
1220 | bool *PerformedPhiTranslation = nullptr) |
1221 | : DefIterator(Info.first), Location(Info.second), |
1222 | OriginalAccess(Info.first), DT(DT), |
1223 | PerformedPhiTranslation(PerformedPhiTranslation) { |
1224 | CurrentPair.first = nullptr; |
1225 | |
1226 | WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); |
1227 | fillInCurrentPair(); |
1228 | } |
1229 | |
1230 | upward_defs_iterator() { CurrentPair.first = nullptr; } |
1231 | |
1232 | bool operator==(const upward_defs_iterator &Other) const { |
1233 | return DefIterator == Other.DefIterator; |
1234 | } |
1235 | |
1236 | typename std::iterator_traits<BaseT>::reference operator*() const { |
1237 | assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1238, __extension__ __PRETTY_FUNCTION__)) |
1238 | "Tried to access past the end of our iterator")(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1238, __extension__ __PRETTY_FUNCTION__)); |
1239 | return CurrentPair; |
1240 | } |
1241 | |
1242 | using BaseT::operator++; |
1243 | upward_defs_iterator &operator++() { |
1244 | assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of the iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1245, __extension__ __PRETTY_FUNCTION__)) |
1245 | "Tried to access past the end of the iterator")(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of the iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1245, __extension__ __PRETTY_FUNCTION__)); |
1246 | ++DefIterator; |
1247 | if (DefIterator != OriginalAccess->defs_end()) |
1248 | fillInCurrentPair(); |
1249 | return *this; |
1250 | } |
1251 | |
1252 | BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } |
1253 | |
1254 | private: |
1255 | /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible |
1256 | /// loop. In particular, this guarantees that it only references a single |
1257 | /// MemoryLocation during execution of the containing function. |
1258 | bool IsGuaranteedLoopInvariant(Value *Ptr) const; |
1259 | |
1260 | void fillInCurrentPair() { |
1261 | CurrentPair.first = *DefIterator; |
1262 | CurrentPair.second = Location; |
1263 | if (WalkingPhi && Location.Ptr) { |
1264 | // Mark size as unknown, if the location is not guaranteed to be |
1265 | // loop-invariant for any possible loop in the function. Setting the size |
1266 | // to unknown guarantees that any memory accesses that access locations |
1267 | // after the pointer are considered as clobbers, which is important to |
1268 | // catch loop carried dependences. |
1269 | if (Location.Ptr && |
1270 | !IsGuaranteedLoopInvariant(const_cast<Value *>(Location.Ptr))) |
1271 | CurrentPair.second = |
1272 | Location.getWithNewSize(LocationSize::beforeOrAfterPointer()); |
1273 | PHITransAddr Translator( |
1274 | const_cast<Value *>(Location.Ptr), |
1275 | OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); |
1276 | |
1277 | if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), |
1278 | DefIterator.getPhiArgBlock(), DT, |
1279 | true)) { |
1280 | Value *TransAddr = Translator.getAddr(); |
1281 | if (TransAddr != Location.Ptr) { |
1282 | CurrentPair.second = CurrentPair.second.getWithNewPtr(TransAddr); |
1283 | |
1284 | if (TransAddr && |
1285 | !IsGuaranteedLoopInvariant(const_cast<Value *>(TransAddr))) |
1286 | CurrentPair.second = CurrentPair.second.getWithNewSize( |
1287 | LocationSize::beforeOrAfterPointer()); |
1288 | |
1289 | if (PerformedPhiTranslation) |
1290 | *PerformedPhiTranslation = true; |
1291 | } |
1292 | } |
1293 | } |
1294 | } |
1295 | |
1296 | MemoryAccessPair CurrentPair; |
1297 | memoryaccess_def_iterator DefIterator; |
1298 | MemoryLocation Location; |
1299 | MemoryAccess *OriginalAccess = nullptr; |
1300 | DominatorTree *DT = nullptr; |
1301 | bool WalkingPhi = false; |
1302 | bool *PerformedPhiTranslation = nullptr; |
1303 | }; |
1304 | |
1305 | inline upward_defs_iterator |
1306 | upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT, |
1307 | bool *PerformedPhiTranslation = nullptr) { |
1308 | return upward_defs_iterator(Pair, &DT, PerformedPhiTranslation); |
1309 | } |
1310 | |
1311 | inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } |
1312 | |
1313 | inline iterator_range<upward_defs_iterator> |
1314 | upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) { |
1315 | return make_range(upward_defs_begin(Pair, DT), upward_defs_end()); |
1316 | } |
1317 | |
1318 | /// Walks the defining accesses of MemoryDefs. Stops after we hit something that |
1319 | /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when |
1320 | /// comparing against a null def_chain_iterator, this will compare equal only |
1321 | /// after walking said Phi/liveOnEntry. |
1322 | /// |
1323 | /// The UseOptimizedChain flag specifies whether to walk the clobbering |
1324 | /// access chain, or all the accesses. |
1325 | /// |
1326 | /// Normally, MemoryDef are all just def/use linked together, so a def_chain on |
1327 | /// a MemoryDef will walk all MemoryDefs above it in the program until it hits |
1328 | /// a phi node. The optimized chain walks the clobbering access of a store. |
1329 | /// So if you are just trying to find, given a store, what the next |
1330 | /// thing that would clobber the same memory is, you want the optimized chain. |
1331 | template <class T, bool UseOptimizedChain = false> |
1332 | struct def_chain_iterator |
1333 | : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, |
1334 | std::forward_iterator_tag, MemoryAccess *> { |
1335 | def_chain_iterator() : MA(nullptr) {} |
1336 | def_chain_iterator(T MA) : MA(MA) {} |
1337 | |
1338 | T operator*() const { return MA; } |
1339 | |
1340 | def_chain_iterator &operator++() { |
1341 | // N.B. liveOnEntry has a null defining access. |
1342 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { |
1343 | if (UseOptimizedChain && MUD->isOptimized()) |
1344 | MA = MUD->getOptimized(); |
1345 | else |
1346 | MA = MUD->getDefiningAccess(); |
1347 | } else { |
1348 | MA = nullptr; |
1349 | } |
1350 | |
1351 | return *this; |
1352 | } |
1353 | |
1354 | bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } |
1355 | |
1356 | private: |
1357 | T MA; |
1358 | }; |
1359 | |
1360 | template <class T> |
1361 | inline iterator_range<def_chain_iterator<T>> |
1362 | def_chain(T MA, MemoryAccess *UpTo = nullptr) { |
1363 | #ifdef EXPENSIVE_CHECKS |
1364 | assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo ) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!" ) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1365, __extension__ __PRETTY_FUNCTION__)) |
1365 | "UpTo isn't in the def chain!")(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo ) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!" ) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1365, __extension__ __PRETTY_FUNCTION__)); |
1366 | #endif |
1367 | return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); |
1368 | } |
1369 | |
1370 | template <class T> |
1371 | inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { |
1372 | return make_range(def_chain_iterator<T, true>(MA), |
1373 | def_chain_iterator<T, true>(nullptr)); |
1374 | } |
1375 | |
1376 | } // end namespace llvm |
1377 | |
1378 | #endif // LLVM_ANALYSIS_MEMORYSSA_H |