File: | build/source/llvm/lib/Analysis/MemorySSA.cpp |
Warning: | line 2024, column 5 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file implements the MemorySSA class. | ||||
10 | // | ||||
11 | //===----------------------------------------------------------------------===// | ||||
12 | |||||
13 | #include "llvm/Analysis/MemorySSA.h" | ||||
14 | #include "llvm/ADT/DenseMap.h" | ||||
15 | #include "llvm/ADT/DenseMapInfo.h" | ||||
16 | #include "llvm/ADT/DenseSet.h" | ||||
17 | #include "llvm/ADT/DepthFirstIterator.h" | ||||
18 | #include "llvm/ADT/Hashing.h" | ||||
19 | #include "llvm/ADT/STLExtras.h" | ||||
20 | #include "llvm/ADT/SmallPtrSet.h" | ||||
21 | #include "llvm/ADT/SmallVector.h" | ||||
22 | #include "llvm/ADT/StringExtras.h" | ||||
23 | #include "llvm/ADT/iterator.h" | ||||
24 | #include "llvm/ADT/iterator_range.h" | ||||
25 | #include "llvm/Analysis/AliasAnalysis.h" | ||||
26 | #include "llvm/Analysis/CFGPrinter.h" | ||||
27 | #include "llvm/Analysis/IteratedDominanceFrontier.h" | ||||
28 | #include "llvm/Analysis/MemoryLocation.h" | ||||
29 | #include "llvm/Config/llvm-config.h" | ||||
30 | #include "llvm/IR/AssemblyAnnotationWriter.h" | ||||
31 | #include "llvm/IR/BasicBlock.h" | ||||
32 | #include "llvm/IR/Dominators.h" | ||||
33 | #include "llvm/IR/Function.h" | ||||
34 | #include "llvm/IR/Instruction.h" | ||||
35 | #include "llvm/IR/Instructions.h" | ||||
36 | #include "llvm/IR/IntrinsicInst.h" | ||||
37 | #include "llvm/IR/LLVMContext.h" | ||||
38 | #include "llvm/IR/Operator.h" | ||||
39 | #include "llvm/IR/PassManager.h" | ||||
40 | #include "llvm/IR/Use.h" | ||||
41 | #include "llvm/InitializePasses.h" | ||||
42 | #include "llvm/Pass.h" | ||||
43 | #include "llvm/Support/AtomicOrdering.h" | ||||
44 | #include "llvm/Support/Casting.h" | ||||
45 | #include "llvm/Support/CommandLine.h" | ||||
46 | #include "llvm/Support/Compiler.h" | ||||
47 | #include "llvm/Support/Debug.h" | ||||
48 | #include "llvm/Support/ErrorHandling.h" | ||||
49 | #include "llvm/Support/FormattedStream.h" | ||||
50 | #include "llvm/Support/GraphWriter.h" | ||||
51 | #include "llvm/Support/raw_ostream.h" | ||||
52 | #include <algorithm> | ||||
53 | #include <cassert> | ||||
54 | #include <iterator> | ||||
55 | #include <memory> | ||||
56 | #include <utility> | ||||
57 | |||||
58 | using namespace llvm; | ||||
59 | |||||
60 | #define DEBUG_TYPE"memoryssa" "memoryssa" | ||||
61 | |||||
62 | static cl::opt<std::string> | ||||
63 | DotCFGMSSA("dot-cfg-mssa", | ||||
64 | cl::value_desc("file name for generated dot file"), | ||||
65 | cl::desc("file name for generated dot file"), cl::init("")); | ||||
66 | |||||
67 | INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { | ||||
68 | true)static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { | ||||
69 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||
70 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | ||||
71 | INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } | ||||
72 | true)PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } | ||||
73 | |||||
74 | INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { | ||||
75 | "Memory SSA Printer", false, false)static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { | ||||
76 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | ||||
77 | INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } | ||||
78 | "Memory SSA Printer", false, false)PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } | ||||
79 | |||||
80 | static cl::opt<unsigned> MaxCheckLimit( | ||||
81 | "memssa-check-limit", cl::Hidden, cl::init(100), | ||||
82 | cl::desc("The maximum number of stores/phis MemorySSA" | ||||
83 | "will consider trying to walk past (default = 100)")); | ||||
84 | |||||
85 | // Always verify MemorySSA if expensive checking is enabled. | ||||
86 | #ifdef EXPENSIVE_CHECKS | ||||
87 | bool llvm::VerifyMemorySSA = true; | ||||
88 | #else | ||||
89 | bool llvm::VerifyMemorySSA = false; | ||||
90 | #endif | ||||
91 | |||||
92 | static cl::opt<bool, true> | ||||
93 | VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), | ||||
94 | cl::Hidden, cl::desc("Enable verification of MemorySSA.")); | ||||
95 | |||||
96 | const static char LiveOnEntryStr[] = "liveOnEntry"; | ||||
97 | |||||
98 | namespace { | ||||
99 | |||||
100 | /// An assembly annotator class to print Memory SSA information in | ||||
101 | /// comments. | ||||
102 | class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { | ||||
103 | const MemorySSA *MSSA; | ||||
104 | |||||
105 | public: | ||||
106 | MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} | ||||
107 | |||||
108 | void emitBasicBlockStartAnnot(const BasicBlock *BB, | ||||
109 | formatted_raw_ostream &OS) override { | ||||
110 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) | ||||
111 | OS << "; " << *MA << "\n"; | ||||
112 | } | ||||
113 | |||||
114 | void emitInstructionAnnot(const Instruction *I, | ||||
115 | formatted_raw_ostream &OS) override { | ||||
116 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) | ||||
117 | OS << "; " << *MA << "\n"; | ||||
118 | } | ||||
119 | }; | ||||
120 | |||||
121 | /// An assembly annotator class to print Memory SSA information in | ||||
122 | /// comments. | ||||
123 | class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter { | ||||
124 | MemorySSA *MSSA; | ||||
125 | MemorySSAWalker *Walker; | ||||
126 | BatchAAResults BAA; | ||||
127 | |||||
128 | public: | ||||
129 | MemorySSAWalkerAnnotatedWriter(MemorySSA *M) | ||||
130 | : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {} | ||||
131 | |||||
132 | void emitBasicBlockStartAnnot(const BasicBlock *BB, | ||||
133 | formatted_raw_ostream &OS) override { | ||||
134 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) | ||||
135 | OS << "; " << *MA << "\n"; | ||||
136 | } | ||||
137 | |||||
138 | void emitInstructionAnnot(const Instruction *I, | ||||
139 | formatted_raw_ostream &OS) override { | ||||
140 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) { | ||||
141 | MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA, BAA); | ||||
142 | OS << "; " << *MA; | ||||
143 | if (Clobber) { | ||||
144 | OS << " - clobbered by "; | ||||
145 | if (MSSA->isLiveOnEntryDef(Clobber)) | ||||
146 | OS << LiveOnEntryStr; | ||||
147 | else | ||||
148 | OS << *Clobber; | ||||
149 | } | ||||
150 | OS << "\n"; | ||||
151 | } | ||||
152 | } | ||||
153 | }; | ||||
154 | |||||
155 | } // namespace | ||||
156 | |||||
157 | namespace { | ||||
158 | |||||
159 | /// Our current alias analysis API differentiates heavily between calls and | ||||
160 | /// non-calls, and functions called on one usually assert on the other. | ||||
161 | /// This class encapsulates the distinction to simplify other code that wants | ||||
162 | /// "Memory affecting instructions and related data" to use as a key. | ||||
163 | /// For example, this class is used as a densemap key in the use optimizer. | ||||
164 | class MemoryLocOrCall { | ||||
165 | public: | ||||
166 | bool IsCall = false; | ||||
167 | |||||
168 | MemoryLocOrCall(MemoryUseOrDef *MUD) | ||||
169 | : MemoryLocOrCall(MUD->getMemoryInst()) {} | ||||
170 | MemoryLocOrCall(const MemoryUseOrDef *MUD) | ||||
171 | : MemoryLocOrCall(MUD->getMemoryInst()) {} | ||||
172 | |||||
173 | MemoryLocOrCall(Instruction *Inst) { | ||||
174 | if (auto *C = dyn_cast<CallBase>(Inst)) { | ||||
175 | IsCall = true; | ||||
176 | Call = C; | ||||
177 | } else { | ||||
178 | IsCall = false; | ||||
179 | // There is no such thing as a memorylocation for a fence inst, and it is | ||||
180 | // unique in that regard. | ||||
181 | if (!isa<FenceInst>(Inst)) | ||||
182 | Loc = MemoryLocation::get(Inst); | ||||
183 | } | ||||
184 | } | ||||
185 | |||||
186 | explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} | ||||
187 | |||||
188 | const CallBase *getCall() const { | ||||
189 | assert(IsCall)(static_cast <bool> (IsCall) ? void (0) : __assert_fail ("IsCall", "llvm/lib/Analysis/MemorySSA.cpp", 189, __extension__ __PRETTY_FUNCTION__)); | ||||
190 | return Call; | ||||
191 | } | ||||
192 | |||||
193 | MemoryLocation getLoc() const { | ||||
194 | assert(!IsCall)(static_cast <bool> (!IsCall) ? void (0) : __assert_fail ("!IsCall", "llvm/lib/Analysis/MemorySSA.cpp", 194, __extension__ __PRETTY_FUNCTION__)); | ||||
195 | return Loc; | ||||
196 | } | ||||
197 | |||||
198 | bool operator==(const MemoryLocOrCall &Other) const { | ||||
199 | if (IsCall != Other.IsCall) | ||||
200 | return false; | ||||
201 | |||||
202 | if (!IsCall) | ||||
203 | return Loc == Other.Loc; | ||||
204 | |||||
205 | if (Call->getCalledOperand() != Other.Call->getCalledOperand()) | ||||
206 | return false; | ||||
207 | |||||
208 | return Call->arg_size() == Other.Call->arg_size() && | ||||
209 | std::equal(Call->arg_begin(), Call->arg_end(), | ||||
210 | Other.Call->arg_begin()); | ||||
211 | } | ||||
212 | |||||
213 | private: | ||||
214 | union { | ||||
215 | const CallBase *Call; | ||||
216 | MemoryLocation Loc; | ||||
217 | }; | ||||
218 | }; | ||||
219 | |||||
220 | } // end anonymous namespace | ||||
221 | |||||
222 | namespace llvm { | ||||
223 | |||||
224 | template <> struct DenseMapInfo<MemoryLocOrCall> { | ||||
225 | static inline MemoryLocOrCall getEmptyKey() { | ||||
226 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); | ||||
227 | } | ||||
228 | |||||
229 | static inline MemoryLocOrCall getTombstoneKey() { | ||||
230 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); | ||||
231 | } | ||||
232 | |||||
233 | static unsigned getHashValue(const MemoryLocOrCall &MLOC) { | ||||
234 | if (!MLOC.IsCall) | ||||
235 | return hash_combine( | ||||
236 | MLOC.IsCall, | ||||
237 | DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); | ||||
238 | |||||
239 | hash_code hash = | ||||
240 | hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue( | ||||
241 | MLOC.getCall()->getCalledOperand())); | ||||
242 | |||||
243 | for (const Value *Arg : MLOC.getCall()->args()) | ||||
244 | hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg)); | ||||
245 | return hash; | ||||
246 | } | ||||
247 | |||||
248 | static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { | ||||
249 | return LHS == RHS; | ||||
250 | } | ||||
251 | }; | ||||
252 | |||||
253 | } // end namespace llvm | ||||
254 | |||||
255 | /// This does one-way checks to see if Use could theoretically be hoisted above | ||||
256 | /// MayClobber. This will not check the other way around. | ||||
257 | /// | ||||
258 | /// This assumes that, for the purposes of MemorySSA, Use comes directly after | ||||
259 | /// MayClobber, with no potentially clobbering operations in between them. | ||||
260 | /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) | ||||
261 | static bool areLoadsReorderable(const LoadInst *Use, | ||||
262 | const LoadInst *MayClobber) { | ||||
263 | bool VolatileUse = Use->isVolatile(); | ||||
264 | bool VolatileClobber = MayClobber->isVolatile(); | ||||
265 | // Volatile operations may never be reordered with other volatile operations. | ||||
266 | if (VolatileUse && VolatileClobber) | ||||
267 | return false; | ||||
268 | // Otherwise, volatile doesn't matter here. From the language reference: | ||||
269 | // 'optimizers may change the order of volatile operations relative to | ||||
270 | // non-volatile operations.'" | ||||
271 | |||||
272 | // If a load is seq_cst, it cannot be moved above other loads. If its ordering | ||||
273 | // is weaker, it can be moved above other loads. We just need to be sure that | ||||
274 | // MayClobber isn't an acquire load, because loads can't be moved above | ||||
275 | // acquire loads. | ||||
276 | // | ||||
277 | // Note that this explicitly *does* allow the free reordering of monotonic (or | ||||
278 | // weaker) loads of the same address. | ||||
279 | bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; | ||||
280 | bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), | ||||
281 | AtomicOrdering::Acquire); | ||||
282 | return !(SeqCstUse || MayClobberIsAcquire); | ||||
283 | } | ||||
284 | |||||
285 | template <typename AliasAnalysisType> | ||||
286 | static bool | ||||
287 | instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, | ||||
288 | const Instruction *UseInst, AliasAnalysisType &AA) { | ||||
289 | Instruction *DefInst = MD->getMemoryInst(); | ||||
290 | assert(DefInst && "Defining instruction not actually an instruction")(static_cast <bool> (DefInst && "Defining instruction not actually an instruction" ) ? void (0) : __assert_fail ("DefInst && \"Defining instruction not actually an instruction\"" , "llvm/lib/Analysis/MemorySSA.cpp", 290, __extension__ __PRETTY_FUNCTION__ )); | ||||
291 | |||||
292 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { | ||||
293 | // These intrinsics will show up as affecting memory, but they are just | ||||
294 | // markers, mostly. | ||||
295 | // | ||||
296 | // FIXME: We probably don't actually want MemorySSA to model these at all | ||||
297 | // (including creating MemoryAccesses for them): we just end up inventing | ||||
298 | // clobbers where they don't really exist at all. Please see D43269 for | ||||
299 | // context. | ||||
300 | switch (II->getIntrinsicID()) { | ||||
301 | case Intrinsic::invariant_start: | ||||
302 | case Intrinsic::invariant_end: | ||||
303 | case Intrinsic::assume: | ||||
304 | case Intrinsic::experimental_noalias_scope_decl: | ||||
305 | case Intrinsic::pseudoprobe: | ||||
306 | return false; | ||||
307 | case Intrinsic::dbg_declare: | ||||
308 | case Intrinsic::dbg_label: | ||||
309 | case Intrinsic::dbg_value: | ||||
310 | llvm_unreachable("debuginfo shouldn't have associated defs!")::llvm::llvm_unreachable_internal("debuginfo shouldn't have associated defs!" , "llvm/lib/Analysis/MemorySSA.cpp", 310); | ||||
311 | default: | ||||
312 | break; | ||||
313 | } | ||||
314 | } | ||||
315 | |||||
316 | if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) { | ||||
317 | ModRefInfo I = AA.getModRefInfo(DefInst, CB); | ||||
318 | return isModOrRefSet(I); | ||||
319 | } | ||||
320 | |||||
321 | if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) | ||||
322 | if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst)) | ||||
323 | return !areLoadsReorderable(UseLoad, DefLoad); | ||||
324 | |||||
325 | ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc); | ||||
326 | return isModSet(I); | ||||
327 | } | ||||
328 | |||||
329 | template <typename AliasAnalysisType> | ||||
330 | static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU, | ||||
331 | const MemoryLocOrCall &UseMLOC, | ||||
332 | AliasAnalysisType &AA) { | ||||
333 | // FIXME: This is a temporary hack to allow a single instructionClobbersQuery | ||||
334 | // to exist while MemoryLocOrCall is pushed through places. | ||||
335 | if (UseMLOC.IsCall) | ||||
336 | return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), | ||||
337 | AA); | ||||
338 | return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), | ||||
339 | AA); | ||||
340 | } | ||||
341 | |||||
342 | // Return true when MD may alias MU, return false otherwise. | ||||
343 | bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, | ||||
344 | AliasAnalysis &AA) { | ||||
345 | return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA); | ||||
346 | } | ||||
347 | |||||
348 | namespace { | ||||
349 | |||||
350 | struct UpwardsMemoryQuery { | ||||
351 | // True if our original query started off as a call | ||||
352 | bool IsCall = false; | ||||
353 | // The pointer location we started the query with. This will be empty if | ||||
354 | // IsCall is true. | ||||
355 | MemoryLocation StartingLoc; | ||||
356 | // This is the instruction we were querying about. | ||||
357 | const Instruction *Inst = nullptr; | ||||
358 | // The MemoryAccess we actually got called with, used to test local domination | ||||
359 | const MemoryAccess *OriginalAccess = nullptr; | ||||
360 | bool SkipSelfAccess = false; | ||||
361 | |||||
362 | UpwardsMemoryQuery() = default; | ||||
363 | |||||
364 | UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) | ||||
365 | : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) { | ||||
366 | if (!IsCall) | ||||
367 | StartingLoc = MemoryLocation::get(Inst); | ||||
368 | } | ||||
369 | }; | ||||
370 | |||||
371 | } // end anonymous namespace | ||||
372 | |||||
373 | static bool isUseTriviallyOptimizableToLiveOnEntry(BatchAAResults &AA, | ||||
374 | const Instruction *I) { | ||||
375 | // If the memory can't be changed, then loads of the memory can't be | ||||
376 | // clobbered. | ||||
377 | if (auto *LI = dyn_cast<LoadInst>(I)) { | ||||
378 | return I->hasMetadata(LLVMContext::MD_invariant_load) || | ||||
379 | !isModSet(AA.getModRefInfoMask(MemoryLocation::get(LI))); | ||||
380 | } | ||||
381 | return false; | ||||
382 | } | ||||
383 | |||||
384 | /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing | ||||
385 | /// inbetween `Start` and `ClobberAt` can clobbers `Start`. | ||||
386 | /// | ||||
387 | /// This is meant to be as simple and self-contained as possible. Because it | ||||
388 | /// uses no cache, etc., it can be relatively expensive. | ||||
389 | /// | ||||
390 | /// \param Start The MemoryAccess that we want to walk from. | ||||
391 | /// \param ClobberAt A clobber for Start. | ||||
392 | /// \param StartLoc The MemoryLocation for Start. | ||||
393 | /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to. | ||||
394 | /// \param Query The UpwardsMemoryQuery we used for our search. | ||||
395 | /// \param AA The AliasAnalysis we used for our search. | ||||
396 | /// \param AllowImpreciseClobber Always false, unless we do relaxed verify. | ||||
397 | |||||
398 | LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) static void | ||||
399 | checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, | ||||
400 | const MemoryLocation &StartLoc, const MemorySSA &MSSA, | ||||
401 | const UpwardsMemoryQuery &Query, BatchAAResults &AA, | ||||
402 | bool AllowImpreciseClobber = false) { | ||||
403 | assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?")(static_cast <bool> (MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?") ? void (0) : __assert_fail ("MSSA.dominates(ClobberAt, Start) && \"Clobber doesn't dominate start?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 403, __extension__ __PRETTY_FUNCTION__ )); | ||||
404 | |||||
405 | if (MSSA.isLiveOnEntryDef(Start)) { | ||||
406 | assert(MSSA.isLiveOnEntryDef(ClobberAt) &&(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself") ? void (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "llvm/lib/Analysis/MemorySSA.cpp", 407, __extension__ __PRETTY_FUNCTION__ )) | ||||
407 | "liveOnEntry must clobber itself")(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself") ? void (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "llvm/lib/Analysis/MemorySSA.cpp", 407, __extension__ __PRETTY_FUNCTION__ )); | ||||
408 | return; | ||||
409 | } | ||||
410 | |||||
411 | bool FoundClobber = false; | ||||
412 | DenseSet<ConstMemoryAccessPair> VisitedPhis; | ||||
413 | SmallVector<ConstMemoryAccessPair, 8> Worklist; | ||||
414 | Worklist.emplace_back(Start, StartLoc); | ||||
415 | // Walk all paths from Start to ClobberAt, while looking for clobbers. If one | ||||
416 | // is found, complain. | ||||
417 | while (!Worklist.empty()) { | ||||
418 | auto MAP = Worklist.pop_back_val(); | ||||
419 | // All we care about is that nothing from Start to ClobberAt clobbers Start. | ||||
420 | // We learn nothing from revisiting nodes. | ||||
421 | if (!VisitedPhis.insert(MAP).second) | ||||
422 | continue; | ||||
423 | |||||
424 | for (const auto *MA : def_chain(MAP.first)) { | ||||
425 | if (MA == ClobberAt) { | ||||
426 | if (const auto *MD = dyn_cast<MemoryDef>(MA)) { | ||||
427 | // instructionClobbersQuery isn't essentially free, so don't use `|=`, | ||||
428 | // since it won't let us short-circuit. | ||||
429 | // | ||||
430 | // Also, note that this can't be hoisted out of the `Worklist` loop, | ||||
431 | // since MD may only act as a clobber for 1 of N MemoryLocations. | ||||
432 | FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD); | ||||
433 | if (!FoundClobber) { | ||||
434 | if (instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)) | ||||
435 | FoundClobber = true; | ||||
436 | } | ||||
437 | } | ||||
438 | break; | ||||
439 | } | ||||
440 | |||||
441 | // We should never hit liveOnEntry, unless it's the clobber. | ||||
442 | assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(MA) && \"Hit liveOnEntry before clobber?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 442, __extension__ __PRETTY_FUNCTION__ )); | ||||
443 | |||||
444 | if (const auto *MD = dyn_cast<MemoryDef>(MA)) { | ||||
445 | // If Start is a Def, skip self. | ||||
446 | if (MD == Start) | ||||
447 | continue; | ||||
448 | |||||
449 | assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&(static_cast <bool> (!instructionClobbersQuery(MD, MAP. second, Query.Inst, AA) && "Found clobber before reaching ClobberAt!" ) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && \"Found clobber before reaching ClobberAt!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 450, __extension__ __PRETTY_FUNCTION__ )) | ||||
450 | "Found clobber before reaching ClobberAt!")(static_cast <bool> (!instructionClobbersQuery(MD, MAP. second, Query.Inst, AA) && "Found clobber before reaching ClobberAt!" ) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && \"Found clobber before reaching ClobberAt!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 450, __extension__ __PRETTY_FUNCTION__ )); | ||||
451 | continue; | ||||
452 | } | ||||
453 | |||||
454 | if (const auto *MU = dyn_cast<MemoryUse>(MA)) { | ||||
455 | (void)MU; | ||||
456 | assert (MU == Start &&(static_cast <bool> (MU == Start && "Can only find use in def chain if Start is a use" ) ? void (0) : __assert_fail ("MU == Start && \"Can only find use in def chain if Start is a use\"" , "llvm/lib/Analysis/MemorySSA.cpp", 457, __extension__ __PRETTY_FUNCTION__ )) | ||||
457 | "Can only find use in def chain if Start is a use")(static_cast <bool> (MU == Start && "Can only find use in def chain if Start is a use" ) ? void (0) : __assert_fail ("MU == Start && \"Can only find use in def chain if Start is a use\"" , "llvm/lib/Analysis/MemorySSA.cpp", 457, __extension__ __PRETTY_FUNCTION__ )); | ||||
458 | continue; | ||||
459 | } | ||||
460 | |||||
461 | assert(isa<MemoryPhi>(MA))(static_cast <bool> (isa<MemoryPhi>(MA)) ? void ( 0) : __assert_fail ("isa<MemoryPhi>(MA)", "llvm/lib/Analysis/MemorySSA.cpp" , 461, __extension__ __PRETTY_FUNCTION__)); | ||||
462 | |||||
463 | // Add reachable phi predecessors | ||||
464 | for (auto ItB = upward_defs_begin( | ||||
465 | {const_cast<MemoryAccess *>(MA), MAP.second}, | ||||
466 | MSSA.getDomTree()), | ||||
467 | ItE = upward_defs_end(); | ||||
468 | ItB != ItE; ++ItB) | ||||
469 | if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock())) | ||||
470 | Worklist.emplace_back(*ItB); | ||||
471 | } | ||||
472 | } | ||||
473 | |||||
474 | // If the verify is done following an optimization, it's possible that | ||||
475 | // ClobberAt was a conservative clobbering, that we can now infer is not a | ||||
476 | // true clobbering access. Don't fail the verify if that's the case. | ||||
477 | // We do have accesses that claim they're optimized, but could be optimized | ||||
478 | // further. Updating all these can be expensive, so allow it for now (FIXME). | ||||
479 | if (AllowImpreciseClobber) | ||||
480 | return; | ||||
481 | |||||
482 | // If ClobberAt is a MemoryPhi, we can assume something above it acted as a | ||||
483 | // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. | ||||
484 | assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber" ) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "llvm/lib/Analysis/MemorySSA.cpp", 485, __extension__ __PRETTY_FUNCTION__ )) | ||||
485 | "ClobberAt never acted as a clobber")(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber" ) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "llvm/lib/Analysis/MemorySSA.cpp", 485, __extension__ __PRETTY_FUNCTION__ )); | ||||
486 | } | ||||
487 | |||||
488 | namespace { | ||||
489 | |||||
490 | /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up | ||||
491 | /// in one class. | ||||
492 | class ClobberWalker { | ||||
493 | /// Save a few bytes by using unsigned instead of size_t. | ||||
494 | using ListIndex = unsigned; | ||||
495 | |||||
496 | /// Represents a span of contiguous MemoryDefs, potentially ending in a | ||||
497 | /// MemoryPhi. | ||||
498 | struct DefPath { | ||||
499 | MemoryLocation Loc; | ||||
500 | // Note that, because we always walk in reverse, Last will always dominate | ||||
501 | // First. Also note that First and Last are inclusive. | ||||
502 | MemoryAccess *First; | ||||
503 | MemoryAccess *Last; | ||||
504 | std::optional<ListIndex> Previous; | ||||
505 | |||||
506 | DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, | ||||
507 | std::optional<ListIndex> Previous) | ||||
508 | : Loc(Loc), First(First), Last(Last), Previous(Previous) {} | ||||
509 | |||||
510 | DefPath(const MemoryLocation &Loc, MemoryAccess *Init, | ||||
511 | std::optional<ListIndex> Previous) | ||||
512 | : DefPath(Loc, Init, Init, Previous) {} | ||||
513 | }; | ||||
514 | |||||
515 | const MemorySSA &MSSA; | ||||
516 | DominatorTree &DT; | ||||
517 | BatchAAResults *AA; | ||||
518 | UpwardsMemoryQuery *Query; | ||||
519 | unsigned *UpwardWalkLimit; | ||||
520 | |||||
521 | // Phi optimization bookkeeping: | ||||
522 | // List of DefPath to process during the current phi optimization walk. | ||||
523 | SmallVector<DefPath, 32> Paths; | ||||
524 | // List of visited <Access, Location> pairs; we can skip paths already | ||||
525 | // visited with the same memory location. | ||||
526 | DenseSet<ConstMemoryAccessPair> VisitedPhis; | ||||
527 | |||||
528 | /// Find the nearest def or phi that `From` can legally be optimized to. | ||||
529 | const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { | ||||
530 | assert(From->getNumOperands() && "Phi with no operands?")(static_cast <bool> (From->getNumOperands() && "Phi with no operands?") ? void (0) : __assert_fail ("From->getNumOperands() && \"Phi with no operands?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 530, __extension__ __PRETTY_FUNCTION__ )); | ||||
531 | |||||
532 | BasicBlock *BB = From->getBlock(); | ||||
533 | MemoryAccess *Result = MSSA.getLiveOnEntryDef(); | ||||
534 | DomTreeNode *Node = DT.getNode(BB); | ||||
535 | while ((Node = Node->getIDom())) { | ||||
536 | auto *Defs = MSSA.getBlockDefs(Node->getBlock()); | ||||
537 | if (Defs) | ||||
538 | return &*Defs->rbegin(); | ||||
539 | } | ||||
540 | return Result; | ||||
541 | } | ||||
542 | |||||
543 | /// Result of calling walkToPhiOrClobber. | ||||
544 | struct UpwardsWalkResult { | ||||
545 | /// The "Result" of the walk. Either a clobber, the last thing we walked, or | ||||
546 | /// both. Include alias info when clobber found. | ||||
547 | MemoryAccess *Result; | ||||
548 | bool IsKnownClobber; | ||||
549 | }; | ||||
550 | |||||
551 | /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. | ||||
552 | /// This will update Desc.Last as it walks. It will (optionally) also stop at | ||||
553 | /// StopAt. | ||||
554 | /// | ||||
555 | /// This does not test for whether StopAt is a clobber | ||||
556 | UpwardsWalkResult | ||||
557 | walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr, | ||||
558 | const MemoryAccess *SkipStopAt = nullptr) const { | ||||
559 | assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world")(static_cast <bool> (!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world") ? void (0) : __assert_fail ( "!isa<MemoryUse>(Desc.Last) && \"Uses don't exist in my world\"" , "llvm/lib/Analysis/MemorySSA.cpp", 559, __extension__ __PRETTY_FUNCTION__ )); | ||||
560 | assert(UpwardWalkLimit && "Need a valid walk limit")(static_cast <bool> (UpwardWalkLimit && "Need a valid walk limit" ) ? void (0) : __assert_fail ("UpwardWalkLimit && \"Need a valid walk limit\"" , "llvm/lib/Analysis/MemorySSA.cpp", 560, __extension__ __PRETTY_FUNCTION__ )); | ||||
561 | bool LimitAlreadyReached = false; | ||||
562 | // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set | ||||
563 | // it to 1. This will not do any alias() calls. It either returns in the | ||||
564 | // first iteration in the loop below, or is set back to 0 if all def chains | ||||
565 | // are free of MemoryDefs. | ||||
566 | if (!*UpwardWalkLimit) { | ||||
567 | *UpwardWalkLimit = 1; | ||||
568 | LimitAlreadyReached = true; | ||||
569 | } | ||||
570 | |||||
571 | for (MemoryAccess *Current : def_chain(Desc.Last)) { | ||||
572 | Desc.Last = Current; | ||||
573 | if (Current == StopAt || Current == SkipStopAt) | ||||
574 | return {Current, false}; | ||||
575 | |||||
576 | if (auto *MD = dyn_cast<MemoryDef>(Current)) { | ||||
577 | if (MSSA.isLiveOnEntryDef(MD)) | ||||
578 | return {MD, true}; | ||||
579 | |||||
580 | if (!--*UpwardWalkLimit) | ||||
581 | return {Current, true}; | ||||
582 | |||||
583 | if (instructionClobbersQuery(MD, Desc.Loc, Query->Inst, *AA)) | ||||
584 | return {MD, true}; | ||||
585 | } | ||||
586 | } | ||||
587 | |||||
588 | if (LimitAlreadyReached) | ||||
589 | *UpwardWalkLimit = 0; | ||||
590 | |||||
591 | assert(isa<MemoryPhi>(Desc.Last) &&(static_cast <bool> (isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 592, __extension__ __PRETTY_FUNCTION__ )) | ||||
592 | "Ended at a non-clobber that's not a phi?")(static_cast <bool> (isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 592, __extension__ __PRETTY_FUNCTION__ )); | ||||
593 | return {Desc.Last, false}; | ||||
594 | } | ||||
595 | |||||
596 | void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, | ||||
597 | ListIndex PriorNode) { | ||||
598 | auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT); | ||||
599 | auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end()); | ||||
600 | for (const MemoryAccessPair &P : UpwardDefs) { | ||||
601 | PausedSearches.push_back(Paths.size()); | ||||
602 | Paths.emplace_back(P.second, P.first, PriorNode); | ||||
603 | } | ||||
604 | } | ||||
605 | |||||
606 | /// Represents a search that terminated after finding a clobber. This clobber | ||||
607 | /// may or may not be present in the path of defs from LastNode..SearchStart, | ||||
608 | /// since it may have been retrieved from cache. | ||||
609 | struct TerminatedPath { | ||||
610 | MemoryAccess *Clobber; | ||||
611 | ListIndex LastNode; | ||||
612 | }; | ||||
613 | |||||
614 | /// Get an access that keeps us from optimizing to the given phi. | ||||
615 | /// | ||||
616 | /// PausedSearches is an array of indices into the Paths array. Its incoming | ||||
617 | /// value is the indices of searches that stopped at the last phi optimization | ||||
618 | /// target. It's left in an unspecified state. | ||||
619 | /// | ||||
620 | /// If this returns std::nullopt, NewPaused is a vector of searches that | ||||
621 | /// terminated at StopWhere. Otherwise, NewPaused is left in an unspecified | ||||
622 | /// state. | ||||
623 | std::optional<TerminatedPath> | ||||
624 | getBlockingAccess(const MemoryAccess *StopWhere, | ||||
625 | SmallVectorImpl<ListIndex> &PausedSearches, | ||||
626 | SmallVectorImpl<ListIndex> &NewPaused, | ||||
627 | SmallVectorImpl<TerminatedPath> &Terminated) { | ||||
628 | assert(!PausedSearches.empty() && "No searches to continue?")(static_cast <bool> (!PausedSearches.empty() && "No searches to continue?") ? void (0) : __assert_fail ("!PausedSearches.empty() && \"No searches to continue?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 628, __extension__ __PRETTY_FUNCTION__ )); | ||||
629 | |||||
630 | // BFS vs DFS really doesn't make a difference here, so just do a DFS with | ||||
631 | // PausedSearches as our stack. | ||||
632 | while (!PausedSearches.empty()) { | ||||
633 | ListIndex PathIndex = PausedSearches.pop_back_val(); | ||||
634 | DefPath &Node = Paths[PathIndex]; | ||||
635 | |||||
636 | // If we've already visited this path with this MemoryLocation, we don't | ||||
637 | // need to do so again. | ||||
638 | // | ||||
639 | // NOTE: That we just drop these paths on the ground makes caching | ||||
640 | // behavior sporadic. e.g. given a diamond: | ||||
641 | // A | ||||
642 | // B C | ||||
643 | // D | ||||
644 | // | ||||
645 | // ...If we walk D, B, A, C, we'll only cache the result of phi | ||||
646 | // optimization for A, B, and D; C will be skipped because it dies here. | ||||
647 | // This arguably isn't the worst thing ever, since: | ||||
648 | // - We generally query things in a top-down order, so if we got below D | ||||
649 | // without needing cache entries for {C, MemLoc}, then chances are | ||||
650 | // that those cache entries would end up ultimately unused. | ||||
651 | // - We still cache things for A, so C only needs to walk up a bit. | ||||
652 | // If this behavior becomes problematic, we can fix without a ton of extra | ||||
653 | // work. | ||||
654 | if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) | ||||
655 | continue; | ||||
656 | |||||
657 | const MemoryAccess *SkipStopWhere = nullptr; | ||||
658 | if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) { | ||||
659 | assert(isa<MemoryDef>(Query->OriginalAccess))(static_cast <bool> (isa<MemoryDef>(Query->OriginalAccess )) ? void (0) : __assert_fail ("isa<MemoryDef>(Query->OriginalAccess)" , "llvm/lib/Analysis/MemorySSA.cpp", 659, __extension__ __PRETTY_FUNCTION__ )); | ||||
660 | SkipStopWhere = Query->OriginalAccess; | ||||
661 | } | ||||
662 | |||||
663 | UpwardsWalkResult Res = walkToPhiOrClobber(Node, | ||||
664 | /*StopAt=*/StopWhere, | ||||
665 | /*SkipStopAt=*/SkipStopWhere); | ||||
666 | if (Res.IsKnownClobber) { | ||||
667 | assert(Res.Result != StopWhere && Res.Result != SkipStopWhere)(static_cast <bool> (Res.Result != StopWhere && Res.Result != SkipStopWhere) ? void (0) : __assert_fail ("Res.Result != StopWhere && Res.Result != SkipStopWhere" , "llvm/lib/Analysis/MemorySSA.cpp", 667, __extension__ __PRETTY_FUNCTION__ )); | ||||
668 | |||||
669 | // If this wasn't a cache hit, we hit a clobber when walking. That's a | ||||
670 | // failure. | ||||
671 | TerminatedPath Term{Res.Result, PathIndex}; | ||||
672 | if (!MSSA.dominates(Res.Result, StopWhere)) | ||||
673 | return Term; | ||||
674 | |||||
675 | // Otherwise, it's a valid thing to potentially optimize to. | ||||
676 | Terminated.push_back(Term); | ||||
677 | continue; | ||||
678 | } | ||||
679 | |||||
680 | if (Res.Result == StopWhere || Res.Result == SkipStopWhere) { | ||||
681 | // We've hit our target. Save this path off for if we want to continue | ||||
682 | // walking. If we are in the mode of skipping the OriginalAccess, and | ||||
683 | // we've reached back to the OriginalAccess, do not save path, we've | ||||
684 | // just looped back to self. | ||||
685 | if (Res.Result != SkipStopWhere) | ||||
686 | NewPaused.push_back(PathIndex); | ||||
687 | continue; | ||||
688 | } | ||||
689 | |||||
690 | assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Res.Result) && \"liveOnEntry is a clobber\"" , "llvm/lib/Analysis/MemorySSA.cpp", 690, __extension__ __PRETTY_FUNCTION__ )); | ||||
691 | addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); | ||||
692 | } | ||||
693 | |||||
694 | return std::nullopt; | ||||
695 | } | ||||
696 | |||||
697 | template <typename T, typename Walker> | ||||
698 | struct generic_def_path_iterator | ||||
699 | : public iterator_facade_base<generic_def_path_iterator<T, Walker>, | ||||
700 | std::forward_iterator_tag, T *> { | ||||
701 | generic_def_path_iterator() = default; | ||||
702 | generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} | ||||
703 | |||||
704 | T &operator*() const { return curNode(); } | ||||
705 | |||||
706 | generic_def_path_iterator &operator++() { | ||||
707 | N = curNode().Previous; | ||||
708 | return *this; | ||||
709 | } | ||||
710 | |||||
711 | bool operator==(const generic_def_path_iterator &O) const { | ||||
712 | if (N.has_value() != O.N.has_value()) | ||||
713 | return false; | ||||
714 | return !N || *N == *O.N; | ||||
715 | } | ||||
716 | |||||
717 | private: | ||||
718 | T &curNode() const { return W->Paths[*N]; } | ||||
719 | |||||
720 | Walker *W = nullptr; | ||||
721 | std::optional<ListIndex> N; | ||||
722 | }; | ||||
723 | |||||
724 | using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; | ||||
725 | using const_def_path_iterator = | ||||
726 | generic_def_path_iterator<const DefPath, const ClobberWalker>; | ||||
727 | |||||
728 | iterator_range<def_path_iterator> def_path(ListIndex From) { | ||||
729 | return make_range(def_path_iterator(this, From), def_path_iterator()); | ||||
730 | } | ||||
731 | |||||
732 | iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { | ||||
733 | return make_range(const_def_path_iterator(this, From), | ||||
734 | const_def_path_iterator()); | ||||
735 | } | ||||
736 | |||||
737 | struct OptznResult { | ||||
738 | /// The path that contains our result. | ||||
739 | TerminatedPath PrimaryClobber; | ||||
740 | /// The paths that we can legally cache back from, but that aren't | ||||
741 | /// necessarily the result of the Phi optimization. | ||||
742 | SmallVector<TerminatedPath, 4> OtherClobbers; | ||||
743 | }; | ||||
744 | |||||
745 | ListIndex defPathIndex(const DefPath &N) const { | ||||
746 | // The assert looks nicer if we don't need to do &N | ||||
747 | const DefPath *NP = &N; | ||||
748 | assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&(static_cast <bool> (!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 749, __extension__ __PRETTY_FUNCTION__ )) | ||||
749 | "Out of bounds DefPath!")(static_cast <bool> (!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 749, __extension__ __PRETTY_FUNCTION__ )); | ||||
750 | return NP - &Paths.front(); | ||||
751 | } | ||||
752 | |||||
753 | /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths | ||||
754 | /// that act as legal clobbers. Note that this won't return *all* clobbers. | ||||
755 | /// | ||||
756 | /// Phi optimization algorithm tl;dr: | ||||
757 | /// - Find the earliest def/phi, A, we can optimize to | ||||
758 | /// - Find if all paths from the starting memory access ultimately reach A | ||||
759 | /// - If not, optimization isn't possible. | ||||
760 | /// - Otherwise, walk from A to another clobber or phi, A'. | ||||
761 | /// - If A' is a def, we're done. | ||||
762 | /// - If A' is a phi, try to optimize it. | ||||
763 | /// | ||||
764 | /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path | ||||
765 | /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. | ||||
766 | OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, | ||||
767 | const MemoryLocation &Loc) { | ||||
768 | assert(Paths.empty() && VisitedPhis.empty() &&(static_cast <bool> (Paths.empty() && VisitedPhis .empty() && "Reset the optimization state.") ? void ( 0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\"" , "llvm/lib/Analysis/MemorySSA.cpp", 769, __extension__ __PRETTY_FUNCTION__ )) | ||||
769 | "Reset the optimization state.")(static_cast <bool> (Paths.empty() && VisitedPhis .empty() && "Reset the optimization state.") ? void ( 0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\"" , "llvm/lib/Analysis/MemorySSA.cpp", 769, __extension__ __PRETTY_FUNCTION__ )); | ||||
770 | |||||
771 | Paths.emplace_back(Loc, Start, Phi, std::nullopt); | ||||
772 | // Stores how many "valid" optimization nodes we had prior to calling | ||||
773 | // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. | ||||
774 | auto PriorPathsSize = Paths.size(); | ||||
775 | |||||
776 | SmallVector<ListIndex, 16> PausedSearches; | ||||
777 | SmallVector<ListIndex, 8> NewPaused; | ||||
778 | SmallVector<TerminatedPath, 4> TerminatedPaths; | ||||
779 | |||||
780 | addSearches(Phi, PausedSearches, 0); | ||||
781 | |||||
782 | // Moves the TerminatedPath with the "most dominated" Clobber to the end of | ||||
783 | // Paths. | ||||
784 | auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { | ||||
785 | assert(!Paths.empty() && "Need a path to move")(static_cast <bool> (!Paths.empty() && "Need a path to move" ) ? void (0) : __assert_fail ("!Paths.empty() && \"Need a path to move\"" , "llvm/lib/Analysis/MemorySSA.cpp", 785, __extension__ __PRETTY_FUNCTION__ )); | ||||
786 | auto Dom = Paths.begin(); | ||||
787 | for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) | ||||
788 | if (!MSSA.dominates(I->Clobber, Dom->Clobber)) | ||||
789 | Dom = I; | ||||
790 | auto Last = Paths.end() - 1; | ||||
791 | if (Last != Dom) | ||||
792 | std::iter_swap(Last, Dom); | ||||
793 | }; | ||||
794 | |||||
795 | MemoryPhi *Current = Phi; | ||||
796 | while (true) { | ||||
797 | assert(!MSSA.isLiveOnEntryDef(Current) &&(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 798, __extension__ __PRETTY_FUNCTION__ )) | ||||
798 | "liveOnEntry wasn't treated as a clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 798, __extension__ __PRETTY_FUNCTION__ )); | ||||
799 | |||||
800 | const auto *Target = getWalkTarget(Current); | ||||
801 | // If a TerminatedPath doesn't dominate Target, then it wasn't a legal | ||||
802 | // optimization for the prior phi. | ||||
803 | assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target ); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "llvm/lib/Analysis/MemorySSA.cpp", 805, __extension__ __PRETTY_FUNCTION__ )) | ||||
804 | return MSSA.dominates(P.Clobber, Target);(static_cast <bool> (all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target ); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "llvm/lib/Analysis/MemorySSA.cpp", 805, __extension__ __PRETTY_FUNCTION__ )) | ||||
805 | }))(static_cast <bool> (all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target ); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "llvm/lib/Analysis/MemorySSA.cpp", 805, __extension__ __PRETTY_FUNCTION__ )); | ||||
806 | |||||
807 | // FIXME: This is broken, because the Blocker may be reported to be | ||||
808 | // liveOnEntry, and we'll happily wait for that to disappear (read: never) | ||||
809 | // For the moment, this is fine, since we do nothing with blocker info. | ||||
810 | if (std::optional<TerminatedPath> Blocker = getBlockingAccess( | ||||
811 | Target, PausedSearches, NewPaused, TerminatedPaths)) { | ||||
812 | |||||
813 | // Find the node we started at. We can't search based on N->Last, since | ||||
814 | // we may have gone around a loop with a different MemoryLocation. | ||||
815 | auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { | ||||
816 | return defPathIndex(N) < PriorPathsSize; | ||||
817 | }); | ||||
818 | assert(Iter != def_path_iterator())(static_cast <bool> (Iter != def_path_iterator()) ? void (0) : __assert_fail ("Iter != def_path_iterator()", "llvm/lib/Analysis/MemorySSA.cpp" , 818, __extension__ __PRETTY_FUNCTION__)); | ||||
819 | |||||
820 | DefPath &CurNode = *Iter; | ||||
821 | assert(CurNode.Last == Current)(static_cast <bool> (CurNode.Last == Current) ? void (0 ) : __assert_fail ("CurNode.Last == Current", "llvm/lib/Analysis/MemorySSA.cpp" , 821, __extension__ __PRETTY_FUNCTION__)); | ||||
822 | |||||
823 | // Two things: | ||||
824 | // A. We can't reliably cache all of NewPaused back. Consider a case | ||||
825 | // where we have two paths in NewPaused; one of which can't optimize | ||||
826 | // above this phi, whereas the other can. If we cache the second path | ||||
827 | // back, we'll end up with suboptimal cache entries. We can handle | ||||
828 | // cases like this a bit better when we either try to find all | ||||
829 | // clobbers that block phi optimization, or when our cache starts | ||||
830 | // supporting unfinished searches. | ||||
831 | // B. We can't reliably cache TerminatedPaths back here without doing | ||||
832 | // extra checks; consider a case like: | ||||
833 | // T | ||||
834 | // / \ | ||||
835 | // D C | ||||
836 | // \ / | ||||
837 | // S | ||||
838 | // Where T is our target, C is a node with a clobber on it, D is a | ||||
839 | // diamond (with a clobber *only* on the left or right node, N), and | ||||
840 | // S is our start. Say we walk to D, through the node opposite N | ||||
841 | // (read: ignoring the clobber), and see a cache entry in the top | ||||
842 | // node of D. That cache entry gets put into TerminatedPaths. We then | ||||
843 | // walk up to C (N is later in our worklist), find the clobber, and | ||||
844 | // quit. If we append TerminatedPaths to OtherClobbers, we'll cache | ||||
845 | // the bottom part of D to the cached clobber, ignoring the clobber | ||||
846 | // in N. Again, this problem goes away if we start tracking all | ||||
847 | // blockers for a given phi optimization. | ||||
848 | TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; | ||||
849 | return {Result, {}}; | ||||
850 | } | ||||
851 | |||||
852 | // If there's nothing left to search, then all paths led to valid clobbers | ||||
853 | // that we got from our cache; pick the nearest to the start, and allow | ||||
854 | // the rest to be cached back. | ||||
855 | if (NewPaused.empty()) { | ||||
856 | MoveDominatedPathToEnd(TerminatedPaths); | ||||
857 | TerminatedPath Result = TerminatedPaths.pop_back_val(); | ||||
858 | return {Result, std::move(TerminatedPaths)}; | ||||
859 | } | ||||
860 | |||||
861 | MemoryAccess *DefChainEnd = nullptr; | ||||
862 | SmallVector<TerminatedPath, 4> Clobbers; | ||||
863 | for (ListIndex Paused : NewPaused) { | ||||
864 | UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); | ||||
865 | if (WR.IsKnownClobber) | ||||
866 | Clobbers.push_back({WR.Result, Paused}); | ||||
867 | else | ||||
868 | // Micro-opt: If we hit the end of the chain, save it. | ||||
869 | DefChainEnd = WR.Result; | ||||
870 | } | ||||
871 | |||||
872 | if (!TerminatedPaths.empty()) { | ||||
873 | // If we couldn't find the dominating phi/liveOnEntry in the above loop, | ||||
874 | // do it now. | ||||
875 | if (!DefChainEnd) | ||||
876 | for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) | ||||
877 | DefChainEnd = MA; | ||||
878 | assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry")(static_cast <bool> (DefChainEnd && "Failed to find dominating phi/liveOnEntry" ) ? void (0) : __assert_fail ("DefChainEnd && \"Failed to find dominating phi/liveOnEntry\"" , "llvm/lib/Analysis/MemorySSA.cpp", 878, __extension__ __PRETTY_FUNCTION__ )); | ||||
879 | |||||
880 | // If any of the terminated paths don't dominate the phi we'll try to | ||||
881 | // optimize, we need to figure out what they are and quit. | ||||
882 | const BasicBlock *ChainBB = DefChainEnd->getBlock(); | ||||
883 | for (const TerminatedPath &TP : TerminatedPaths) { | ||||
884 | // Because we know that DefChainEnd is as "high" as we can go, we | ||||
885 | // don't need local dominance checks; BB dominance is sufficient. | ||||
886 | if (DT.dominates(ChainBB, TP.Clobber->getBlock())) | ||||
887 | Clobbers.push_back(TP); | ||||
888 | } | ||||
889 | } | ||||
890 | |||||
891 | // If we have clobbers in the def chain, find the one closest to Current | ||||
892 | // and quit. | ||||
893 | if (!Clobbers.empty()) { | ||||
894 | MoveDominatedPathToEnd(Clobbers); | ||||
895 | TerminatedPath Result = Clobbers.pop_back_val(); | ||||
896 | return {Result, std::move(Clobbers)}; | ||||
897 | } | ||||
898 | |||||
899 | assert(all_of(NewPaused,(static_cast <bool> (all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "llvm/lib/Analysis/MemorySSA.cpp", 900, __extension__ __PRETTY_FUNCTION__ )) | ||||
900 | [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }))(static_cast <bool> (all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "llvm/lib/Analysis/MemorySSA.cpp", 900, __extension__ __PRETTY_FUNCTION__ )); | ||||
901 | |||||
902 | // Because liveOnEntry is a clobber, this must be a phi. | ||||
903 | auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); | ||||
904 | |||||
905 | PriorPathsSize = Paths.size(); | ||||
906 | PausedSearches.clear(); | ||||
907 | for (ListIndex I : NewPaused) | ||||
908 | addSearches(DefChainPhi, PausedSearches, I); | ||||
909 | NewPaused.clear(); | ||||
910 | |||||
911 | Current = DefChainPhi; | ||||
912 | } | ||||
913 | } | ||||
914 | |||||
915 | void verifyOptResult(const OptznResult &R) const { | ||||
916 | assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R. PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "llvm/lib/Analysis/MemorySSA.cpp", 918, __extension__ __PRETTY_FUNCTION__ )) | ||||
917 | return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);(static_cast <bool> (all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R. PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "llvm/lib/Analysis/MemorySSA.cpp", 918, __extension__ __PRETTY_FUNCTION__ )) | ||||
918 | }))(static_cast <bool> (all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R. PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "llvm/lib/Analysis/MemorySSA.cpp", 918, __extension__ __PRETTY_FUNCTION__ )); | ||||
919 | } | ||||
920 | |||||
921 | void resetPhiOptznState() { | ||||
922 | Paths.clear(); | ||||
923 | VisitedPhis.clear(); | ||||
924 | } | ||||
925 | |||||
926 | public: | ||||
927 | ClobberWalker(const MemorySSA &MSSA, DominatorTree &DT) | ||||
928 | : MSSA(MSSA), DT(DT) {} | ||||
929 | |||||
930 | /// Finds the nearest clobber for the given query, optimizing phis if | ||||
931 | /// possible. | ||||
932 | MemoryAccess *findClobber(BatchAAResults &BAA, MemoryAccess *Start, | ||||
933 | UpwardsMemoryQuery &Q, unsigned &UpWalkLimit) { | ||||
934 | AA = &BAA; | ||||
935 | Query = &Q; | ||||
936 | UpwardWalkLimit = &UpWalkLimit; | ||||
937 | // Starting limit must be > 0. | ||||
938 | if (!UpWalkLimit) | ||||
939 | UpWalkLimit++; | ||||
940 | |||||
941 | MemoryAccess *Current = Start; | ||||
942 | // This walker pretends uses don't exist. If we're handed one, silently grab | ||||
943 | // its def. (This has the nice side-effect of ensuring we never cache uses) | ||||
944 | if (auto *MU = dyn_cast<MemoryUse>(Start)) | ||||
945 | Current = MU->getDefiningAccess(); | ||||
946 | |||||
947 | DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt); | ||||
948 | // Fast path for the overly-common case (no crazy phi optimization | ||||
949 | // necessary) | ||||
950 | UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); | ||||
951 | MemoryAccess *Result; | ||||
952 | if (WalkResult.IsKnownClobber) { | ||||
953 | Result = WalkResult.Result; | ||||
954 | } else { | ||||
955 | OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), | ||||
956 | Current, Q.StartingLoc); | ||||
957 | verifyOptResult(OptRes); | ||||
958 | resetPhiOptznState(); | ||||
959 | Result = OptRes.PrimaryClobber.Clobber; | ||||
960 | } | ||||
961 | |||||
962 | #ifdef EXPENSIVE_CHECKS | ||||
963 | if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0) | ||||
964 | checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, BAA); | ||||
965 | #endif | ||||
966 | return Result; | ||||
967 | } | ||||
968 | }; | ||||
969 | |||||
970 | struct RenamePassData { | ||||
971 | DomTreeNode *DTN; | ||||
972 | DomTreeNode::const_iterator ChildIt; | ||||
973 | MemoryAccess *IncomingVal; | ||||
974 | |||||
975 | RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, | ||||
976 | MemoryAccess *M) | ||||
977 | : DTN(D), ChildIt(It), IncomingVal(M) {} | ||||
978 | |||||
979 | void swap(RenamePassData &RHS) { | ||||
980 | std::swap(DTN, RHS.DTN); | ||||
981 | std::swap(ChildIt, RHS.ChildIt); | ||||
982 | std::swap(IncomingVal, RHS.IncomingVal); | ||||
983 | } | ||||
984 | }; | ||||
985 | |||||
986 | } // end anonymous namespace | ||||
987 | |||||
988 | namespace llvm { | ||||
989 | |||||
990 | class MemorySSA::ClobberWalkerBase { | ||||
991 | ClobberWalker Walker; | ||||
992 | MemorySSA *MSSA; | ||||
993 | |||||
994 | public: | ||||
995 | ClobberWalkerBase(MemorySSA *M, DominatorTree *D) : Walker(*M, *D), MSSA(M) {} | ||||
996 | |||||
997 | MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, | ||||
998 | const MemoryLocation &, | ||||
999 | BatchAAResults &, unsigned &); | ||||
1000 | // Third argument (bool), defines whether the clobber search should skip the | ||||
1001 | // original queried access. If true, there will be a follow-up query searching | ||||
1002 | // for a clobber access past "self". Note that the Optimized access is not | ||||
1003 | // updated if a new clobber is found by this SkipSelf search. If this | ||||
1004 | // additional query becomes heavily used we may decide to cache the result. | ||||
1005 | // Walker instantiations will decide how to set the SkipSelf bool. | ||||
1006 | MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, BatchAAResults &, | ||||
1007 | unsigned &, bool, | ||||
1008 | bool UseInvariantGroup = true); | ||||
1009 | }; | ||||
1010 | |||||
1011 | /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no | ||||
1012 | /// longer does caching on its own, but the name has been retained for the | ||||
1013 | /// moment. | ||||
1014 | class MemorySSA::CachingWalker final : public MemorySSAWalker { | ||||
1015 | ClobberWalkerBase *Walker; | ||||
1016 | |||||
1017 | public: | ||||
1018 | CachingWalker(MemorySSA *M, ClobberWalkerBase *W) | ||||
1019 | : MemorySSAWalker(M), Walker(W) {} | ||||
1020 | ~CachingWalker() override = default; | ||||
1021 | |||||
1022 | using MemorySSAWalker::getClobberingMemoryAccess; | ||||
1023 | |||||
1024 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, | ||||
1025 | unsigned &UWL) { | ||||
1026 | return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false); | ||||
1027 | } | ||||
1028 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1029 | const MemoryLocation &Loc, | ||||
1030 | BatchAAResults &BAA, unsigned &UWL) { | ||||
1031 | return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL); | ||||
1032 | } | ||||
1033 | // This method is not accessible outside of this file. | ||||
1034 | MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup( | ||||
1035 | MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL) { | ||||
1036 | return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false, false); | ||||
1037 | } | ||||
1038 | |||||
1039 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1040 | BatchAAResults &BAA) override { | ||||
1041 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1042 | return getClobberingMemoryAccess(MA, BAA, UpwardWalkLimit); | ||||
1043 | } | ||||
1044 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1045 | const MemoryLocation &Loc, | ||||
1046 | BatchAAResults &BAA) override { | ||||
1047 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1048 | return getClobberingMemoryAccess(MA, Loc, BAA, UpwardWalkLimit); | ||||
1049 | } | ||||
1050 | |||||
1051 | void invalidateInfo(MemoryAccess *MA) override { | ||||
1052 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1053 | MUD->resetOptimized(); | ||||
1054 | } | ||||
1055 | }; | ||||
1056 | |||||
1057 | class MemorySSA::SkipSelfWalker final : public MemorySSAWalker { | ||||
1058 | ClobberWalkerBase *Walker; | ||||
1059 | |||||
1060 | public: | ||||
1061 | SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W) | ||||
1062 | : MemorySSAWalker(M), Walker(W) {} | ||||
1063 | ~SkipSelfWalker() override = default; | ||||
1064 | |||||
1065 | using MemorySSAWalker::getClobberingMemoryAccess; | ||||
1066 | |||||
1067 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, | ||||
1068 | unsigned &UWL) { | ||||
1069 | return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, true); | ||||
1070 | } | ||||
1071 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1072 | const MemoryLocation &Loc, | ||||
1073 | BatchAAResults &BAA, unsigned &UWL) { | ||||
1074 | return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL); | ||||
1075 | } | ||||
1076 | |||||
1077 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1078 | BatchAAResults &BAA) override { | ||||
1079 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1080 | return getClobberingMemoryAccess(MA, BAA, UpwardWalkLimit); | ||||
1081 | } | ||||
1082 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, | ||||
1083 | const MemoryLocation &Loc, | ||||
1084 | BatchAAResults &BAA) override { | ||||
1085 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1086 | return getClobberingMemoryAccess(MA, Loc, BAA, UpwardWalkLimit); | ||||
1087 | } | ||||
1088 | |||||
1089 | void invalidateInfo(MemoryAccess *MA) override { | ||||
1090 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1091 | MUD->resetOptimized(); | ||||
1092 | } | ||||
1093 | }; | ||||
1094 | |||||
1095 | } // end namespace llvm | ||||
1096 | |||||
1097 | void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, | ||||
1098 | bool RenameAllUses) { | ||||
1099 | // Pass through values to our successors | ||||
1100 | for (const BasicBlock *S : successors(BB)) { | ||||
1101 | auto It = PerBlockAccesses.find(S); | ||||
1102 | // Rename the phi nodes in our successor block | ||||
1103 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | ||||
1104 | continue; | ||||
1105 | AccessList *Accesses = It->second.get(); | ||||
1106 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | ||||
1107 | if (RenameAllUses) { | ||||
1108 | bool ReplacementDone = false; | ||||
1109 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) | ||||
1110 | if (Phi->getIncomingBlock(I) == BB) { | ||||
1111 | Phi->setIncomingValue(I, IncomingVal); | ||||
1112 | ReplacementDone = true; | ||||
1113 | } | ||||
1114 | (void) ReplacementDone; | ||||
1115 | assert(ReplacementDone && "Incomplete phi during partial rename")(static_cast <bool> (ReplacementDone && "Incomplete phi during partial rename" ) ? void (0) : __assert_fail ("ReplacementDone && \"Incomplete phi during partial rename\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1115, __extension__ __PRETTY_FUNCTION__ )); | ||||
1116 | } else | ||||
1117 | Phi->addIncoming(IncomingVal, BB); | ||||
1118 | } | ||||
1119 | } | ||||
1120 | |||||
1121 | /// Rename a single basic block into MemorySSA form. | ||||
1122 | /// Uses the standard SSA renaming algorithm. | ||||
1123 | /// \returns The new incoming value. | ||||
1124 | MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, | ||||
1125 | bool RenameAllUses) { | ||||
1126 | auto It = PerBlockAccesses.find(BB); | ||||
1127 | // Skip most processing if the list is empty. | ||||
1128 | if (It != PerBlockAccesses.end()) { | ||||
1129 | AccessList *Accesses = It->second.get(); | ||||
1130 | for (MemoryAccess &L : *Accesses) { | ||||
1131 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { | ||||
1132 | if (MUD->getDefiningAccess() == nullptr || RenameAllUses) | ||||
1133 | MUD->setDefiningAccess(IncomingVal); | ||||
1134 | if (isa<MemoryDef>(&L)) | ||||
1135 | IncomingVal = &L; | ||||
1136 | } else { | ||||
1137 | IncomingVal = &L; | ||||
1138 | } | ||||
1139 | } | ||||
1140 | } | ||||
1141 | return IncomingVal; | ||||
1142 | } | ||||
1143 | |||||
1144 | /// This is the standard SSA renaming algorithm. | ||||
1145 | /// | ||||
1146 | /// We walk the dominator tree in preorder, renaming accesses, and then filling | ||||
1147 | /// in phi nodes in our successors. | ||||
1148 | void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, | ||||
1149 | SmallPtrSetImpl<BasicBlock *> &Visited, | ||||
1150 | bool SkipVisited, bool RenameAllUses) { | ||||
1151 | assert(Root && "Trying to rename accesses in an unreachable block")(static_cast <bool> (Root && "Trying to rename accesses in an unreachable block" ) ? void (0) : __assert_fail ("Root && \"Trying to rename accesses in an unreachable block\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1151, __extension__ __PRETTY_FUNCTION__ )); | ||||
1152 | |||||
1153 | SmallVector<RenamePassData, 32> WorkStack; | ||||
1154 | // Skip everything if we already renamed this block and we are skipping. | ||||
1155 | // Note: You can't sink this into the if, because we need it to occur | ||||
1156 | // regardless of whether we skip blocks or not. | ||||
1157 | bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; | ||||
1158 | if (SkipVisited && AlreadyVisited) | ||||
1159 | return; | ||||
1160 | |||||
1161 | IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); | ||||
1162 | renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); | ||||
1163 | WorkStack.push_back({Root, Root->begin(), IncomingVal}); | ||||
1164 | |||||
1165 | while (!WorkStack.empty()) { | ||||
1166 | DomTreeNode *Node = WorkStack.back().DTN; | ||||
1167 | DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; | ||||
1168 | IncomingVal = WorkStack.back().IncomingVal; | ||||
1169 | |||||
1170 | if (ChildIt == Node->end()) { | ||||
1171 | WorkStack.pop_back(); | ||||
1172 | } else { | ||||
1173 | DomTreeNode *Child = *ChildIt; | ||||
1174 | ++WorkStack.back().ChildIt; | ||||
1175 | BasicBlock *BB = Child->getBlock(); | ||||
1176 | // Note: You can't sink this into the if, because we need it to occur | ||||
1177 | // regardless of whether we skip blocks or not. | ||||
1178 | AlreadyVisited = !Visited.insert(BB).second; | ||||
1179 | if (SkipVisited && AlreadyVisited) { | ||||
1180 | // We already visited this during our renaming, which can happen when | ||||
1181 | // being asked to rename multiple blocks. Figure out the incoming val, | ||||
1182 | // which is the last def. | ||||
1183 | // Incoming value can only change if there is a block def, and in that | ||||
1184 | // case, it's the last block def in the list. | ||||
1185 | if (auto *BlockDefs = getWritableBlockDefs(BB)) | ||||
1186 | IncomingVal = &*BlockDefs->rbegin(); | ||||
1187 | } else | ||||
1188 | IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); | ||||
1189 | renameSuccessorPhis(BB, IncomingVal, RenameAllUses); | ||||
1190 | WorkStack.push_back({Child, Child->begin(), IncomingVal}); | ||||
1191 | } | ||||
1192 | } | ||||
1193 | } | ||||
1194 | |||||
1195 | /// This handles unreachable block accesses by deleting phi nodes in | ||||
1196 | /// unreachable blocks, and marking all other unreachable MemoryAccess's as | ||||
1197 | /// being uses of the live on entry definition. | ||||
1198 | void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { | ||||
1199 | assert(!DT->isReachableFromEntry(BB) &&(static_cast <bool> (!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks") ? void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1200, __extension__ __PRETTY_FUNCTION__ )) | ||||
1200 | "Reachable block found while handling unreachable blocks")(static_cast <bool> (!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks") ? void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1200, __extension__ __PRETTY_FUNCTION__ )); | ||||
1201 | |||||
1202 | // Make sure phi nodes in our reachable successors end up with a | ||||
1203 | // LiveOnEntryDef for our incoming edge, even though our block is forward | ||||
1204 | // unreachable. We could just disconnect these blocks from the CFG fully, | ||||
1205 | // but we do not right now. | ||||
1206 | for (const BasicBlock *S : successors(BB)) { | ||||
1207 | if (!DT->isReachableFromEntry(S)) | ||||
1208 | continue; | ||||
1209 | auto It = PerBlockAccesses.find(S); | ||||
1210 | // Rename the phi nodes in our successor block | ||||
1211 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | ||||
1212 | continue; | ||||
1213 | AccessList *Accesses = It->second.get(); | ||||
1214 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | ||||
1215 | Phi->addIncoming(LiveOnEntryDef.get(), BB); | ||||
1216 | } | ||||
1217 | |||||
1218 | auto It = PerBlockAccesses.find(BB); | ||||
1219 | if (It == PerBlockAccesses.end()) | ||||
1220 | return; | ||||
1221 | |||||
1222 | auto &Accesses = It->second; | ||||
1223 | for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { | ||||
1224 | auto Next = std::next(AI); | ||||
1225 | // If we have a phi, just remove it. We are going to replace all | ||||
1226 | // users with live on entry. | ||||
1227 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) | ||||
1228 | UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); | ||||
1229 | else | ||||
1230 | Accesses->erase(AI); | ||||
1231 | AI = Next; | ||||
1232 | } | ||||
1233 | } | ||||
1234 | |||||
1235 | MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) | ||||
1236 | : DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), | ||||
1237 | SkipWalker(nullptr) { | ||||
1238 | // Build MemorySSA using a batch alias analysis. This reuses the internal | ||||
1239 | // state that AA collects during an alias()/getModRefInfo() call. This is | ||||
1240 | // safe because there are no CFG changes while building MemorySSA and can | ||||
1241 | // significantly reduce the time spent by the compiler in AA, because we will | ||||
1242 | // make queries about all the instructions in the Function. | ||||
1243 | assert(AA && "No alias analysis?")(static_cast <bool> (AA && "No alias analysis?" ) ? void (0) : __assert_fail ("AA && \"No alias analysis?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1243, __extension__ __PRETTY_FUNCTION__ )); | ||||
1244 | BatchAAResults BatchAA(*AA); | ||||
1245 | buildMemorySSA(BatchAA); | ||||
1246 | // Intentionally leave AA to nullptr while building so we don't accidently | ||||
1247 | // use non-batch AliasAnalysis. | ||||
1248 | this->AA = AA; | ||||
1249 | // Also create the walker here. | ||||
1250 | getWalker(); | ||||
1251 | } | ||||
1252 | |||||
1253 | MemorySSA::~MemorySSA() { | ||||
1254 | // Drop all our references | ||||
1255 | for (const auto &Pair : PerBlockAccesses) | ||||
1256 | for (MemoryAccess &MA : *Pair.second) | ||||
1257 | MA.dropAllReferences(); | ||||
1258 | } | ||||
1259 | |||||
1260 | MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { | ||||
1261 | auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); | ||||
1262 | |||||
1263 | if (Res.second) | ||||
1264 | Res.first->second = std::make_unique<AccessList>(); | ||||
1265 | return Res.first->second.get(); | ||||
1266 | } | ||||
1267 | |||||
1268 | MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { | ||||
1269 | auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); | ||||
1270 | |||||
1271 | if (Res.second) | ||||
1272 | Res.first->second = std::make_unique<DefsList>(); | ||||
1273 | return Res.first->second.get(); | ||||
1274 | } | ||||
1275 | |||||
1276 | namespace llvm { | ||||
1277 | |||||
1278 | /// This class is a batch walker of all MemoryUse's in the program, and points | ||||
1279 | /// their defining access at the thing that actually clobbers them. Because it | ||||
1280 | /// is a batch walker that touches everything, it does not operate like the | ||||
1281 | /// other walkers. This walker is basically performing a top-down SSA renaming | ||||
1282 | /// pass, where the version stack is used as the cache. This enables it to be | ||||
1283 | /// significantly more time and memory efficient than using the regular walker, | ||||
1284 | /// which is walking bottom-up. | ||||
1285 | class MemorySSA::OptimizeUses { | ||||
1286 | public: | ||||
1287 | OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA, | ||||
1288 | DominatorTree *DT) | ||||
1289 | : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {} | ||||
1290 | |||||
1291 | void optimizeUses(); | ||||
1292 | |||||
1293 | private: | ||||
1294 | /// This represents where a given memorylocation is in the stack. | ||||
1295 | struct MemlocStackInfo { | ||||
1296 | // This essentially is keeping track of versions of the stack. Whenever | ||||
1297 | // the stack changes due to pushes or pops, these versions increase. | ||||
1298 | unsigned long StackEpoch; | ||||
1299 | unsigned long PopEpoch; | ||||
1300 | // This is the lower bound of places on the stack to check. It is equal to | ||||
1301 | // the place the last stack walk ended. | ||||
1302 | // Note: Correctness depends on this being initialized to 0, which densemap | ||||
1303 | // does | ||||
1304 | unsigned long LowerBound; | ||||
1305 | const BasicBlock *LowerBoundBlock; | ||||
1306 | // This is where the last walk for this memory location ended. | ||||
1307 | unsigned long LastKill; | ||||
1308 | bool LastKillValid; | ||||
1309 | }; | ||||
1310 | |||||
1311 | void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, | ||||
1312 | SmallVectorImpl<MemoryAccess *> &, | ||||
1313 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &); | ||||
1314 | |||||
1315 | MemorySSA *MSSA; | ||||
1316 | CachingWalker *Walker; | ||||
1317 | BatchAAResults *AA; | ||||
1318 | DominatorTree *DT; | ||||
1319 | }; | ||||
1320 | |||||
1321 | } // end namespace llvm | ||||
1322 | |||||
1323 | /// Optimize the uses in a given block This is basically the SSA renaming | ||||
1324 | /// algorithm, with one caveat: We are able to use a single stack for all | ||||
1325 | /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is | ||||
1326 | /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just | ||||
1327 | /// going to be some position in that stack of possible ones. | ||||
1328 | /// | ||||
1329 | /// We track the stack positions that each MemoryLocation needs | ||||
1330 | /// to check, and last ended at. This is because we only want to check the | ||||
1331 | /// things that changed since last time. The same MemoryLocation should | ||||
1332 | /// get clobbered by the same store (getModRefInfo does not use invariantness or | ||||
1333 | /// things like this, and if they start, we can modify MemoryLocOrCall to | ||||
1334 | /// include relevant data) | ||||
1335 | void MemorySSA::OptimizeUses::optimizeUsesInBlock( | ||||
1336 | const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, | ||||
1337 | SmallVectorImpl<MemoryAccess *> &VersionStack, | ||||
1338 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { | ||||
1339 | |||||
1340 | /// If no accesses, nothing to do. | ||||
1341 | MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); | ||||
1342 | if (Accesses == nullptr) | ||||
1343 | return; | ||||
1344 | |||||
1345 | // Pop everything that doesn't dominate the current block off the stack, | ||||
1346 | // increment the PopEpoch to account for this. | ||||
1347 | while (true) { | ||||
1348 | assert((static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1350, __extension__ __PRETTY_FUNCTION__ )) | ||||
1349 | !VersionStack.empty() &&(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1350, __extension__ __PRETTY_FUNCTION__ )) | ||||
1350 | "Version stack should have liveOnEntry sentinel dominating everything")(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1350, __extension__ __PRETTY_FUNCTION__ )); | ||||
1351 | BasicBlock *BackBlock = VersionStack.back()->getBlock(); | ||||
1352 | if (DT->dominates(BackBlock, BB)) | ||||
1353 | break; | ||||
1354 | while (VersionStack.back()->getBlock() == BackBlock) | ||||
1355 | VersionStack.pop_back(); | ||||
1356 | ++PopEpoch; | ||||
1357 | } | ||||
1358 | |||||
1359 | for (MemoryAccess &MA : *Accesses) { | ||||
1360 | auto *MU = dyn_cast<MemoryUse>(&MA); | ||||
1361 | if (!MU) { | ||||
1362 | VersionStack.push_back(&MA); | ||||
1363 | ++StackEpoch; | ||||
1364 | continue; | ||||
1365 | } | ||||
1366 | |||||
1367 | if (MU->isOptimized()) | ||||
1368 | continue; | ||||
1369 | |||||
1370 | if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { | ||||
1371 | MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true); | ||||
1372 | continue; | ||||
1373 | } | ||||
1374 | |||||
1375 | MemoryLocOrCall UseMLOC(MU); | ||||
1376 | auto &LocInfo = LocStackInfo[UseMLOC]; | ||||
1377 | // If the pop epoch changed, it means we've removed stuff from top of | ||||
1378 | // stack due to changing blocks. We may have to reset the lower bound or | ||||
1379 | // last kill info. | ||||
1380 | if (LocInfo.PopEpoch != PopEpoch) { | ||||
1381 | LocInfo.PopEpoch = PopEpoch; | ||||
1382 | LocInfo.StackEpoch = StackEpoch; | ||||
1383 | // If the lower bound was in something that no longer dominates us, we | ||||
1384 | // have to reset it. | ||||
1385 | // We can't simply track stack size, because the stack may have had | ||||
1386 | // pushes/pops in the meantime. | ||||
1387 | // XXX: This is non-optimal, but only is slower cases with heavily | ||||
1388 | // branching dominator trees. To get the optimal number of queries would | ||||
1389 | // be to make lowerbound and lastkill a per-loc stack, and pop it until | ||||
1390 | // the top of that stack dominates us. This does not seem worth it ATM. | ||||
1391 | // A much cheaper optimization would be to always explore the deepest | ||||
1392 | // branch of the dominator tree first. This will guarantee this resets on | ||||
1393 | // the smallest set of blocks. | ||||
1394 | if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && | ||||
1395 | !DT->dominates(LocInfo.LowerBoundBlock, BB)) { | ||||
1396 | // Reset the lower bound of things to check. | ||||
1397 | // TODO: Some day we should be able to reset to last kill, rather than | ||||
1398 | // 0. | ||||
1399 | LocInfo.LowerBound = 0; | ||||
1400 | LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); | ||||
1401 | LocInfo.LastKillValid = false; | ||||
1402 | } | ||||
1403 | } else if (LocInfo.StackEpoch != StackEpoch) { | ||||
1404 | // If all that has changed is the StackEpoch, we only have to check the | ||||
1405 | // new things on the stack, because we've checked everything before. In | ||||
1406 | // this case, the lower bound of things to check remains the same. | ||||
1407 | LocInfo.PopEpoch = PopEpoch; | ||||
1408 | LocInfo.StackEpoch = StackEpoch; | ||||
1409 | } | ||||
1410 | if (!LocInfo.LastKillValid) { | ||||
1411 | LocInfo.LastKill = VersionStack.size() - 1; | ||||
1412 | LocInfo.LastKillValid = true; | ||||
1413 | } | ||||
1414 | |||||
1415 | // At this point, we should have corrected last kill and LowerBound to be | ||||
1416 | // in bounds. | ||||
1417 | assert(LocInfo.LowerBound < VersionStack.size() &&(static_cast <bool> (LocInfo.LowerBound < VersionStack .size() && "Lower bound out of range") ? void (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1418, __extension__ __PRETTY_FUNCTION__ )) | ||||
1418 | "Lower bound out of range")(static_cast <bool> (LocInfo.LowerBound < VersionStack .size() && "Lower bound out of range") ? void (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1418, __extension__ __PRETTY_FUNCTION__ )); | ||||
1419 | assert(LocInfo.LastKill < VersionStack.size() &&(static_cast <bool> (LocInfo.LastKill < VersionStack .size() && "Last kill info out of range") ? void (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1420, __extension__ __PRETTY_FUNCTION__ )) | ||||
1420 | "Last kill info out of range")(static_cast <bool> (LocInfo.LastKill < VersionStack .size() && "Last kill info out of range") ? void (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1420, __extension__ __PRETTY_FUNCTION__ )); | ||||
1421 | // In any case, the new upper bound is the top of the stack. | ||||
1422 | unsigned long UpperBound = VersionStack.size() - 1; | ||||
1423 | |||||
1424 | if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { | ||||
1425 | LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1426 | << *(MU->getMemoryInst()) << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1427 | << " because there are "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1428 | << UpperBound - LocInfo.LowerBounddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | ||||
1429 | << " stores to disambiguate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false); | ||||
1430 | // Because we did not walk, LastKill is no longer valid, as this may | ||||
1431 | // have been a kill. | ||||
1432 | LocInfo.LastKillValid = false; | ||||
1433 | continue; | ||||
1434 | } | ||||
1435 | bool FoundClobberResult = false; | ||||
1436 | unsigned UpwardWalkLimit = MaxCheckLimit; | ||||
1437 | while (UpperBound > LocInfo.LowerBound) { | ||||
1438 | if (isa<MemoryPhi>(VersionStack[UpperBound])) { | ||||
1439 | // For phis, use the walker, see where we ended up, go there. | ||||
1440 | // The invariant.group handling in MemorySSA is ad-hoc and doesn't | ||||
1441 | // support updates, so don't use it to optimize uses. | ||||
1442 | MemoryAccess *Result = | ||||
1443 | Walker->getClobberingMemoryAccessWithoutInvariantGroup( | ||||
1444 | MU, *AA, UpwardWalkLimit); | ||||
1445 | // We are guaranteed to find it or something is wrong. | ||||
1446 | while (VersionStack[UpperBound] != Result) { | ||||
1447 | assert(UpperBound != 0)(static_cast <bool> (UpperBound != 0) ? void (0) : __assert_fail ("UpperBound != 0", "llvm/lib/Analysis/MemorySSA.cpp", 1447, __extension__ __PRETTY_FUNCTION__)); | ||||
1448 | --UpperBound; | ||||
1449 | } | ||||
1450 | FoundClobberResult = true; | ||||
1451 | break; | ||||
1452 | } | ||||
1453 | |||||
1454 | MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); | ||||
1455 | if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) { | ||||
1456 | FoundClobberResult = true; | ||||
1457 | break; | ||||
1458 | } | ||||
1459 | --UpperBound; | ||||
1460 | } | ||||
1461 | |||||
1462 | // At the end of this loop, UpperBound is either a clobber, or lower bound | ||||
1463 | // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. | ||||
1464 | if (FoundClobberResult || UpperBound < LocInfo.LastKill) { | ||||
1465 | MU->setDefiningAccess(VersionStack[UpperBound], true); | ||||
1466 | LocInfo.LastKill = UpperBound; | ||||
1467 | } else { | ||||
1468 | // Otherwise, we checked all the new ones, and now we know we can get to | ||||
1469 | // LastKill. | ||||
1470 | MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true); | ||||
1471 | } | ||||
1472 | LocInfo.LowerBound = VersionStack.size() - 1; | ||||
1473 | LocInfo.LowerBoundBlock = BB; | ||||
1474 | } | ||||
1475 | } | ||||
1476 | |||||
1477 | /// Optimize uses to point to their actual clobbering definitions. | ||||
1478 | void MemorySSA::OptimizeUses::optimizeUses() { | ||||
1479 | SmallVector<MemoryAccess *, 16> VersionStack; | ||||
1480 | DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; | ||||
1481 | VersionStack.push_back(MSSA->getLiveOnEntryDef()); | ||||
1482 | |||||
1483 | unsigned long StackEpoch = 1; | ||||
1484 | unsigned long PopEpoch = 1; | ||||
1485 | // We perform a non-recursive top-down dominator tree walk. | ||||
1486 | for (const auto *DomNode : depth_first(DT->getRootNode())) | ||||
1487 | optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, | ||||
1488 | LocStackInfo); | ||||
1489 | } | ||||
1490 | |||||
1491 | void MemorySSA::placePHINodes( | ||||
1492 | const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) { | ||||
1493 | // Determine where our MemoryPhi's should go | ||||
1494 | ForwardIDFCalculator IDFs(*DT); | ||||
1495 | IDFs.setDefiningBlocks(DefiningBlocks); | ||||
1496 | SmallVector<BasicBlock *, 32> IDFBlocks; | ||||
1497 | IDFs.calculate(IDFBlocks); | ||||
1498 | |||||
1499 | // Now place MemoryPhi nodes. | ||||
1500 | for (auto &BB : IDFBlocks) | ||||
1501 | createMemoryPhi(BB); | ||||
1502 | } | ||||
1503 | |||||
1504 | void MemorySSA::buildMemorySSA(BatchAAResults &BAA) { | ||||
1505 | // We create an access to represent "live on entry", for things like | ||||
1506 | // arguments or users of globals, where the memory they use is defined before | ||||
1507 | // the beginning of the function. We do not actually insert it into the IR. | ||||
1508 | // We do not define a live on exit for the immediate uses, and thus our | ||||
1509 | // semantics do *not* imply that something with no immediate uses can simply | ||||
1510 | // be removed. | ||||
1511 | BasicBlock &StartingPoint = F.getEntryBlock(); | ||||
1512 | LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr, | ||||
1513 | &StartingPoint, NextID++)); | ||||
1514 | |||||
1515 | // We maintain lists of memory accesses per-block, trading memory for time. We | ||||
1516 | // could just look up the memory access for every possible instruction in the | ||||
1517 | // stream. | ||||
1518 | SmallPtrSet<BasicBlock *, 32> DefiningBlocks; | ||||
1519 | // Go through each block, figure out where defs occur, and chain together all | ||||
1520 | // the accesses. | ||||
1521 | for (BasicBlock &B : F) { | ||||
1522 | bool InsertIntoDef = false; | ||||
1523 | AccessList *Accesses = nullptr; | ||||
1524 | DefsList *Defs = nullptr; | ||||
1525 | for (Instruction &I : B) { | ||||
1526 | MemoryUseOrDef *MUD = createNewAccess(&I, &BAA); | ||||
1527 | if (!MUD) | ||||
1528 | continue; | ||||
1529 | |||||
1530 | if (!Accesses) | ||||
1531 | Accesses = getOrCreateAccessList(&B); | ||||
1532 | Accesses->push_back(MUD); | ||||
1533 | if (isa<MemoryDef>(MUD)) { | ||||
1534 | InsertIntoDef = true; | ||||
1535 | if (!Defs) | ||||
1536 | Defs = getOrCreateDefsList(&B); | ||||
1537 | Defs->push_back(*MUD); | ||||
1538 | } | ||||
1539 | } | ||||
1540 | if (InsertIntoDef) | ||||
1541 | DefiningBlocks.insert(&B); | ||||
1542 | } | ||||
1543 | placePHINodes(DefiningBlocks); | ||||
1544 | |||||
1545 | // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get | ||||
1546 | // filled in with all blocks. | ||||
1547 | SmallPtrSet<BasicBlock *, 16> Visited; | ||||
1548 | renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); | ||||
1549 | |||||
1550 | // Mark the uses in unreachable blocks as live on entry, so that they go | ||||
1551 | // somewhere. | ||||
1552 | for (auto &BB : F) | ||||
1553 | if (!Visited.count(&BB)) | ||||
1554 | markUnreachableAsLiveOnEntry(&BB); | ||||
1555 | } | ||||
1556 | |||||
1557 | MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } | ||||
1558 | |||||
1559 | MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() { | ||||
1560 | if (Walker) | ||||
1561 | return Walker.get(); | ||||
1562 | |||||
1563 | if (!WalkerBase) | ||||
1564 | WalkerBase = std::make_unique<ClobberWalkerBase>(this, DT); | ||||
1565 | |||||
1566 | Walker = std::make_unique<CachingWalker>(this, WalkerBase.get()); | ||||
1567 | return Walker.get(); | ||||
1568 | } | ||||
1569 | |||||
1570 | MemorySSAWalker *MemorySSA::getSkipSelfWalker() { | ||||
1571 | if (SkipWalker) | ||||
1572 | return SkipWalker.get(); | ||||
1573 | |||||
1574 | if (!WalkerBase) | ||||
1575 | WalkerBase = std::make_unique<ClobberWalkerBase>(this, DT); | ||||
1576 | |||||
1577 | SkipWalker = std::make_unique<SkipSelfWalker>(this, WalkerBase.get()); | ||||
1578 | return SkipWalker.get(); | ||||
1579 | } | ||||
1580 | |||||
1581 | |||||
1582 | // This is a helper function used by the creation routines. It places NewAccess | ||||
1583 | // into the access and defs lists for a given basic block, at the given | ||||
1584 | // insertion point. | ||||
1585 | void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, | ||||
1586 | const BasicBlock *BB, | ||||
1587 | InsertionPlace Point) { | ||||
1588 | auto *Accesses = getOrCreateAccessList(BB); | ||||
1589 | if (Point == Beginning) { | ||||
1590 | // If it's a phi node, it goes first, otherwise, it goes after any phi | ||||
1591 | // nodes. | ||||
1592 | if (isa<MemoryPhi>(NewAccess)) { | ||||
1593 | Accesses->push_front(NewAccess); | ||||
1594 | auto *Defs = getOrCreateDefsList(BB); | ||||
1595 | Defs->push_front(*NewAccess); | ||||
1596 | } else { | ||||
1597 | auto AI = find_if_not( | ||||
1598 | *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); | ||||
1599 | Accesses->insert(AI, NewAccess); | ||||
1600 | if (!isa<MemoryUse>(NewAccess)) { | ||||
1601 | auto *Defs = getOrCreateDefsList(BB); | ||||
1602 | auto DI = find_if_not( | ||||
1603 | *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); | ||||
1604 | Defs->insert(DI, *NewAccess); | ||||
1605 | } | ||||
1606 | } | ||||
1607 | } else { | ||||
1608 | Accesses->push_back(NewAccess); | ||||
1609 | if (!isa<MemoryUse>(NewAccess)) { | ||||
1610 | auto *Defs = getOrCreateDefsList(BB); | ||||
1611 | Defs->push_back(*NewAccess); | ||||
1612 | } | ||||
1613 | } | ||||
1614 | BlockNumberingValid.erase(BB); | ||||
1615 | } | ||||
1616 | |||||
1617 | void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, | ||||
1618 | AccessList::iterator InsertPt) { | ||||
1619 | auto *Accesses = getWritableBlockAccesses(BB); | ||||
1620 | bool WasEnd = InsertPt == Accesses->end(); | ||||
1621 | Accesses->insert(AccessList::iterator(InsertPt), What); | ||||
1622 | if (!isa<MemoryUse>(What)) { | ||||
1623 | auto *Defs = getOrCreateDefsList(BB); | ||||
1624 | // If we got asked to insert at the end, we have an easy job, just shove it | ||||
1625 | // at the end. If we got asked to insert before an existing def, we also get | ||||
1626 | // an iterator. If we got asked to insert before a use, we have to hunt for | ||||
1627 | // the next def. | ||||
1628 | if (WasEnd) { | ||||
1629 | Defs->push_back(*What); | ||||
1630 | } else if (isa<MemoryDef>(InsertPt)) { | ||||
1631 | Defs->insert(InsertPt->getDefsIterator(), *What); | ||||
1632 | } else { | ||||
1633 | while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) | ||||
1634 | ++InsertPt; | ||||
1635 | // Either we found a def, or we are inserting at the end | ||||
1636 | if (InsertPt == Accesses->end()) | ||||
1637 | Defs->push_back(*What); | ||||
1638 | else | ||||
1639 | Defs->insert(InsertPt->getDefsIterator(), *What); | ||||
1640 | } | ||||
1641 | } | ||||
1642 | BlockNumberingValid.erase(BB); | ||||
1643 | } | ||||
1644 | |||||
1645 | void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) { | ||||
1646 | // Keep it in the lookup tables, remove from the lists | ||||
1647 | removeFromLists(What, false); | ||||
1648 | |||||
1649 | // Note that moving should implicitly invalidate the optimized state of a | ||||
1650 | // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a | ||||
1651 | // MemoryDef. | ||||
1652 | if (auto *MD = dyn_cast<MemoryDef>(What)) | ||||
1653 | MD->resetOptimized(); | ||||
1654 | What->setBlock(BB); | ||||
1655 | } | ||||
1656 | |||||
1657 | // Move What before Where in the IR. The end result is that What will belong to | ||||
1658 | // the right lists and have the right Block set, but will not otherwise be | ||||
1659 | // correct. It will not have the right defining access, and if it is a def, | ||||
1660 | // things below it will not properly be updated. | ||||
1661 | void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, | ||||
1662 | AccessList::iterator Where) { | ||||
1663 | prepareForMoveTo(What, BB); | ||||
1664 | insertIntoListsBefore(What, BB, Where); | ||||
1665 | } | ||||
1666 | |||||
1667 | void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB, | ||||
1668 | InsertionPlace Point) { | ||||
1669 | if (isa<MemoryPhi>(What)) { | ||||
1670 | assert(Point == Beginning &&(static_cast <bool> (Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? void (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1671, __extension__ __PRETTY_FUNCTION__ )) | ||||
1671 | "Can only move a Phi at the beginning of the block")(static_cast <bool> (Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? void (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1671, __extension__ __PRETTY_FUNCTION__ )); | ||||
1672 | // Update lookup table entry | ||||
1673 | ValueToMemoryAccess.erase(What->getBlock()); | ||||
1674 | bool Inserted = ValueToMemoryAccess.insert({BB, What}).second; | ||||
1675 | (void)Inserted; | ||||
1676 | assert(Inserted && "Cannot move a Phi to a block that already has one")(static_cast <bool> (Inserted && "Cannot move a Phi to a block that already has one" ) ? void (0) : __assert_fail ("Inserted && \"Cannot move a Phi to a block that already has one\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1676, __extension__ __PRETTY_FUNCTION__ )); | ||||
1677 | } | ||||
1678 | |||||
1679 | prepareForMoveTo(What, BB); | ||||
1680 | insertIntoListsForBlock(What, BB, Point); | ||||
1681 | } | ||||
1682 | |||||
1683 | MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { | ||||
1684 | assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB")(static_cast <bool> (!getMemoryAccess(BB) && "MemoryPhi already exists for this BB" ) ? void (0) : __assert_fail ("!getMemoryAccess(BB) && \"MemoryPhi already exists for this BB\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1684, __extension__ __PRETTY_FUNCTION__ )); | ||||
1685 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); | ||||
1686 | // Phi's always are placed at the front of the block. | ||||
1687 | insertIntoListsForBlock(Phi, BB, Beginning); | ||||
1688 | ValueToMemoryAccess[BB] = Phi; | ||||
1689 | return Phi; | ||||
1690 | } | ||||
1691 | |||||
1692 | MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, | ||||
1693 | MemoryAccess *Definition, | ||||
1694 | const MemoryUseOrDef *Template, | ||||
1695 | bool CreationMustSucceed) { | ||||
1696 | assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI")(static_cast <bool> (!isa<PHINode>(I) && "Cannot create a defined access for a PHI" ) ? void (0) : __assert_fail ("!isa<PHINode>(I) && \"Cannot create a defined access for a PHI\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1696, __extension__ __PRETTY_FUNCTION__ )); | ||||
1697 | MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template); | ||||
1698 | if (CreationMustSucceed) | ||||
1699 | assert(NewAccess != nullptr && "Tried to create a memory access for a "(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a " "non-memory touching instruction") ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a \" \"non-memory touching instruction\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1700, __extension__ __PRETTY_FUNCTION__ )) | ||||
1700 | "non-memory touching instruction")(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a " "non-memory touching instruction") ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a \" \"non-memory touching instruction\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1700, __extension__ __PRETTY_FUNCTION__ )); | ||||
1701 | if (NewAccess) { | ||||
1702 | assert((!Definition || !isa<MemoryUse>(Definition)) &&(static_cast <bool> ((!Definition || !isa<MemoryUse> (Definition)) && "A use cannot be a defining access") ? void (0) : __assert_fail ("(!Definition || !isa<MemoryUse>(Definition)) && \"A use cannot be a defining access\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1703, __extension__ __PRETTY_FUNCTION__ )) | ||||
1703 | "A use cannot be a defining access")(static_cast <bool> ((!Definition || !isa<MemoryUse> (Definition)) && "A use cannot be a defining access") ? void (0) : __assert_fail ("(!Definition || !isa<MemoryUse>(Definition)) && \"A use cannot be a defining access\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1703, __extension__ __PRETTY_FUNCTION__ )); | ||||
1704 | NewAccess->setDefiningAccess(Definition); | ||||
1705 | } | ||||
1706 | return NewAccess; | ||||
1707 | } | ||||
1708 | |||||
1709 | // Return true if the instruction has ordering constraints. | ||||
1710 | // Note specifically that this only considers stores and loads | ||||
1711 | // because others are still considered ModRef by getModRefInfo. | ||||
1712 | static inline bool isOrdered(const Instruction *I) { | ||||
1713 | if (auto *SI = dyn_cast<StoreInst>(I)) { | ||||
1714 | if (!SI->isUnordered()) | ||||
1715 | return true; | ||||
1716 | } else if (auto *LI = dyn_cast<LoadInst>(I)) { | ||||
1717 | if (!LI->isUnordered()) | ||||
1718 | return true; | ||||
1719 | } | ||||
1720 | return false; | ||||
1721 | } | ||||
1722 | |||||
1723 | /// Helper function to create new memory accesses | ||||
1724 | template <typename AliasAnalysisType> | ||||
1725 | MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I, | ||||
1726 | AliasAnalysisType *AAP, | ||||
1727 | const MemoryUseOrDef *Template) { | ||||
1728 | // The assume intrinsic has a control dependency which we model by claiming | ||||
1729 | // that it writes arbitrarily. Debuginfo intrinsics may be considered | ||||
1730 | // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory | ||||
1731 | // dependencies here. | ||||
1732 | // FIXME: Replace this special casing with a more accurate modelling of | ||||
1733 | // assume's control dependency. | ||||
1734 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | ||||
1735 | switch (II->getIntrinsicID()) { | ||||
1736 | default: | ||||
1737 | break; | ||||
1738 | case Intrinsic::assume: | ||||
1739 | case Intrinsic::experimental_noalias_scope_decl: | ||||
1740 | case Intrinsic::pseudoprobe: | ||||
1741 | return nullptr; | ||||
1742 | } | ||||
1743 | } | ||||
1744 | |||||
1745 | // Using a nonstandard AA pipelines might leave us with unexpected modref | ||||
1746 | // results for I, so add a check to not model instructions that may not read | ||||
1747 | // from or write to memory. This is necessary for correctness. | ||||
1748 | if (!I->mayReadFromMemory() && !I->mayWriteToMemory()) | ||||
1749 | return nullptr; | ||||
1750 | |||||
1751 | bool Def, Use; | ||||
1752 | if (Template) { | ||||
1753 | Def = isa<MemoryDef>(Template); | ||||
1754 | Use = isa<MemoryUse>(Template); | ||||
1755 | #if !defined(NDEBUG) | ||||
1756 | ModRefInfo ModRef = AAP->getModRefInfo(I, std::nullopt); | ||||
1757 | bool DefCheck, UseCheck; | ||||
1758 | DefCheck = isModSet(ModRef) || isOrdered(I); | ||||
1759 | UseCheck = isRefSet(ModRef); | ||||
1760 | // Memory accesses should only be reduced and can not be increased since AA | ||||
1761 | // just might return better results as a result of some transformations. | ||||
1762 | assert((Def == DefCheck || !DefCheck) &&(static_cast <bool> ((Def == DefCheck || !DefCheck) && "Memory accesses should only be reduced") ? void (0) : __assert_fail ("(Def == DefCheck || !DefCheck) && \"Memory accesses should only be reduced\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1763, __extension__ __PRETTY_FUNCTION__ )) | ||||
1763 | "Memory accesses should only be reduced")(static_cast <bool> ((Def == DefCheck || !DefCheck) && "Memory accesses should only be reduced") ? void (0) : __assert_fail ("(Def == DefCheck || !DefCheck) && \"Memory accesses should only be reduced\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1763, __extension__ __PRETTY_FUNCTION__ )); | ||||
1764 | if (!Def && Use != UseCheck) { | ||||
1765 | // New Access should not have more power than template access | ||||
1766 | assert(!UseCheck && "Invalid template")(static_cast <bool> (!UseCheck && "Invalid template" ) ? void (0) : __assert_fail ("!UseCheck && \"Invalid template\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1766, __extension__ __PRETTY_FUNCTION__ )); | ||||
1767 | } | ||||
1768 | #endif | ||||
1769 | } else { | ||||
1770 | // Find out what affect this instruction has on memory. | ||||
1771 | ModRefInfo ModRef = AAP->getModRefInfo(I, std::nullopt); | ||||
1772 | // The isOrdered check is used to ensure that volatiles end up as defs | ||||
1773 | // (atomics end up as ModRef right now anyway). Until we separate the | ||||
1774 | // ordering chain from the memory chain, this enables people to see at least | ||||
1775 | // some relative ordering to volatiles. Note that getClobberingMemoryAccess | ||||
1776 | // will still give an answer that bypasses other volatile loads. TODO: | ||||
1777 | // Separate memory aliasing and ordering into two different chains so that | ||||
1778 | // we can precisely represent both "what memory will this read/write/is | ||||
1779 | // clobbered by" and "what instructions can I move this past". | ||||
1780 | Def = isModSet(ModRef) || isOrdered(I); | ||||
1781 | Use = isRefSet(ModRef); | ||||
1782 | } | ||||
1783 | |||||
1784 | // It's possible for an instruction to not modify memory at all. During | ||||
1785 | // construction, we ignore them. | ||||
1786 | if (!Def && !Use) | ||||
1787 | return nullptr; | ||||
1788 | |||||
1789 | MemoryUseOrDef *MUD; | ||||
1790 | if (Def) | ||||
1791 | MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); | ||||
1792 | else | ||||
1793 | MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); | ||||
1794 | ValueToMemoryAccess[I] = MUD; | ||||
1795 | return MUD; | ||||
1796 | } | ||||
1797 | |||||
1798 | /// Properly remove \p MA from all of MemorySSA's lookup tables. | ||||
1799 | void MemorySSA::removeFromLookups(MemoryAccess *MA) { | ||||
1800 | assert(MA->use_empty() &&(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses" ) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1801, __extension__ __PRETTY_FUNCTION__ )) | ||||
1801 | "Trying to remove memory access that still has uses")(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses" ) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1801, __extension__ __PRETTY_FUNCTION__ )); | ||||
1802 | BlockNumbering.erase(MA); | ||||
1803 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1804 | MUD->setDefiningAccess(nullptr); | ||||
1805 | // Invalidate our walker's cache if necessary | ||||
1806 | if (!isa<MemoryUse>(MA)) | ||||
1807 | getWalker()->invalidateInfo(MA); | ||||
1808 | |||||
1809 | Value *MemoryInst; | ||||
1810 | if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | ||||
1811 | MemoryInst = MUD->getMemoryInst(); | ||||
1812 | else | ||||
1813 | MemoryInst = MA->getBlock(); | ||||
1814 | |||||
1815 | auto VMA = ValueToMemoryAccess.find(MemoryInst); | ||||
1816 | if (VMA->second == MA) | ||||
1817 | ValueToMemoryAccess.erase(VMA); | ||||
1818 | } | ||||
1819 | |||||
1820 | /// Properly remove \p MA from all of MemorySSA's lists. | ||||
1821 | /// | ||||
1822 | /// Because of the way the intrusive list and use lists work, it is important to | ||||
1823 | /// do removal in the right order. | ||||
1824 | /// ShouldDelete defaults to true, and will cause the memory access to also be | ||||
1825 | /// deleted, not just removed. | ||||
1826 | void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { | ||||
1827 | BasicBlock *BB = MA->getBlock(); | ||||
1828 | // The access list owns the reference, so we erase it from the non-owning list | ||||
1829 | // first. | ||||
1830 | if (!isa<MemoryUse>(MA)) { | ||||
1831 | auto DefsIt = PerBlockDefs.find(BB); | ||||
1832 | std::unique_ptr<DefsList> &Defs = DefsIt->second; | ||||
1833 | Defs->remove(*MA); | ||||
1834 | if (Defs->empty()) | ||||
1835 | PerBlockDefs.erase(DefsIt); | ||||
1836 | } | ||||
1837 | |||||
1838 | // The erase call here will delete it. If we don't want it deleted, we call | ||||
1839 | // remove instead. | ||||
1840 | auto AccessIt = PerBlockAccesses.find(BB); | ||||
1841 | std::unique_ptr<AccessList> &Accesses = AccessIt->second; | ||||
1842 | if (ShouldDelete) | ||||
1843 | Accesses->erase(MA); | ||||
1844 | else | ||||
1845 | Accesses->remove(MA); | ||||
1846 | |||||
1847 | if (Accesses->empty()) { | ||||
1848 | PerBlockAccesses.erase(AccessIt); | ||||
1849 | BlockNumberingValid.erase(BB); | ||||
1850 | } | ||||
1851 | } | ||||
1852 | |||||
1853 | void MemorySSA::print(raw_ostream &OS) const { | ||||
1854 | MemorySSAAnnotatedWriter Writer(this); | ||||
1855 | F.print(OS, &Writer); | ||||
1856 | } | ||||
1857 | |||||
1858 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||
1859 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MemorySSA::dump() const { print(dbgs()); } | ||||
1860 | #endif | ||||
1861 | |||||
1862 | void MemorySSA::verifyMemorySSA(VerificationLevel VL) const { | ||||
1863 | #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) | ||||
1864 | VL = VerificationLevel::Full; | ||||
1865 | #endif | ||||
1866 | |||||
1867 | #ifndef NDEBUG | ||||
1868 | verifyOrderingDominationAndDefUses(F, VL); | ||||
1869 | verifyDominationNumbers(F); | ||||
1870 | if (VL == VerificationLevel::Full) | ||||
1871 | verifyPrevDefInPhis(F); | ||||
1872 | #endif | ||||
1873 | // Previously, the verification used to also verify that the clobberingAccess | ||||
1874 | // cached by MemorySSA is the same as the clobberingAccess found at a later | ||||
1875 | // query to AA. This does not hold true in general due to the current fragility | ||||
1876 | // of BasicAA which has arbitrary caps on the things it analyzes before giving | ||||
1877 | // up. As a result, transformations that are correct, will lead to BasicAA | ||||
1878 | // returning different Alias answers before and after that transformation. | ||||
1879 | // Invalidating MemorySSA is not an option, as the results in BasicAA can be so | ||||
1880 | // random, in the worst case we'd need to rebuild MemorySSA from scratch after | ||||
1881 | // every transformation, which defeats the purpose of using it. For such an | ||||
1882 | // example, see test4 added in D51960. | ||||
1883 | } | ||||
1884 | |||||
1885 | void MemorySSA::verifyPrevDefInPhis(Function &F) const { | ||||
1886 | for (const BasicBlock &BB : F) { | ||||
1887 | if (MemoryPhi *Phi = getMemoryAccess(&BB)) { | ||||
1888 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { | ||||
1889 | auto *Pred = Phi->getIncomingBlock(I); | ||||
1890 | auto *IncAcc = Phi->getIncomingValue(I); | ||||
1891 | // If Pred has no unreachable predecessors, get last def looking at | ||||
1892 | // IDoms. If, while walkings IDoms, any of these has an unreachable | ||||
1893 | // predecessor, then the incoming def can be any access. | ||||
1894 | if (auto *DTNode = DT->getNode(Pred)) { | ||||
1895 | while (DTNode) { | ||||
1896 | if (auto *DefList = getBlockDefs(DTNode->getBlock())) { | ||||
1897 | auto *LastAcc = &*(--DefList->end()); | ||||
1898 | assert(LastAcc == IncAcc &&(static_cast <bool> (LastAcc == IncAcc && "Incorrect incoming access into phi." ) ? void (0) : __assert_fail ("LastAcc == IncAcc && \"Incorrect incoming access into phi.\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1899, __extension__ __PRETTY_FUNCTION__ )) | ||||
1899 | "Incorrect incoming access into phi.")(static_cast <bool> (LastAcc == IncAcc && "Incorrect incoming access into phi." ) ? void (0) : __assert_fail ("LastAcc == IncAcc && \"Incorrect incoming access into phi.\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1899, __extension__ __PRETTY_FUNCTION__ )); | ||||
1900 | (void)IncAcc; | ||||
1901 | (void)LastAcc; | ||||
1902 | break; | ||||
1903 | } | ||||
1904 | DTNode = DTNode->getIDom(); | ||||
1905 | } | ||||
1906 | } else { | ||||
1907 | // If Pred has unreachable predecessors, but has at least a Def, the | ||||
1908 | // incoming access can be the last Def in Pred, or it could have been | ||||
1909 | // optimized to LoE. After an update, though, the LoE may have been | ||||
1910 | // replaced by another access, so IncAcc may be any access. | ||||
1911 | // If Pred has unreachable predecessors and no Defs, incoming access | ||||
1912 | // should be LoE; However, after an update, it may be any access. | ||||
1913 | } | ||||
1914 | } | ||||
1915 | } | ||||
1916 | } | ||||
1917 | } | ||||
1918 | |||||
1919 | /// Verify that all of the blocks we believe to have valid domination numbers | ||||
1920 | /// actually have valid domination numbers. | ||||
1921 | void MemorySSA::verifyDominationNumbers(const Function &F) const { | ||||
1922 | if (BlockNumberingValid.empty()) | ||||
1923 | return; | ||||
1924 | |||||
1925 | SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid; | ||||
1926 | for (const BasicBlock &BB : F) { | ||||
1927 | if (!ValidBlocks.count(&BB)) | ||||
1928 | continue; | ||||
1929 | |||||
1930 | ValidBlocks.erase(&BB); | ||||
1931 | |||||
1932 | const AccessList *Accesses = getBlockAccesses(&BB); | ||||
1933 | // It's correct to say an empty block has valid numbering. | ||||
1934 | if (!Accesses) | ||||
1935 | continue; | ||||
1936 | |||||
1937 | // Block numbering starts at 1. | ||||
1938 | unsigned long LastNumber = 0; | ||||
1939 | for (const MemoryAccess &MA : *Accesses) { | ||||
1940 | auto ThisNumberIter = BlockNumbering.find(&MA); | ||||
1941 | assert(ThisNumberIter != BlockNumbering.end() &&(static_cast <bool> (ThisNumberIter != BlockNumbering.end () && "MemoryAccess has no domination number in a valid block!" ) ? void (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1942, __extension__ __PRETTY_FUNCTION__ )) | ||||
1942 | "MemoryAccess has no domination number in a valid block!")(static_cast <bool> (ThisNumberIter != BlockNumbering.end () && "MemoryAccess has no domination number in a valid block!" ) ? void (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1942, __extension__ __PRETTY_FUNCTION__ )); | ||||
1943 | |||||
1944 | unsigned long ThisNumber = ThisNumberIter->second; | ||||
1945 | assert(ThisNumber > LastNumber &&(static_cast <bool> (ThisNumber > LastNumber && "Domination numbers should be strictly increasing!") ? void ( 0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1946, __extension__ __PRETTY_FUNCTION__ )) | ||||
1946 | "Domination numbers should be strictly increasing!")(static_cast <bool> (ThisNumber > LastNumber && "Domination numbers should be strictly increasing!") ? void ( 0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1946, __extension__ __PRETTY_FUNCTION__ )); | ||||
1947 | (void)LastNumber; | ||||
1948 | LastNumber = ThisNumber; | ||||
1949 | } | ||||
1950 | } | ||||
1951 | |||||
1952 | assert(ValidBlocks.empty() &&(static_cast <bool> (ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? void (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1953, __extension__ __PRETTY_FUNCTION__ )) | ||||
1953 | "All valid BasicBlocks should exist in F -- dangling pointers?")(static_cast <bool> (ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? void (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1953, __extension__ __PRETTY_FUNCTION__ )); | ||||
1954 | } | ||||
1955 | |||||
1956 | /// Verify ordering: the order and existence of MemoryAccesses matches the | ||||
1957 | /// order and existence of memory affecting instructions. | ||||
1958 | /// Verify domination: each definition dominates all of its uses. | ||||
1959 | /// Verify def-uses: the immediate use information - walk all the memory | ||||
1960 | /// accesses and verifying that, for each use, it appears in the appropriate | ||||
1961 | /// def's use list | ||||
1962 | void MemorySSA::verifyOrderingDominationAndDefUses(Function &F, | ||||
1963 | VerificationLevel VL) const { | ||||
1964 | // Walk all the blocks, comparing what the lookups think and what the access | ||||
1965 | // lists think, as well as the order in the blocks vs the order in the access | ||||
1966 | // lists. | ||||
1967 | SmallVector<MemoryAccess *, 32> ActualAccesses; | ||||
1968 | SmallVector<MemoryAccess *, 32> ActualDefs; | ||||
1969 | for (BasicBlock &B : F) { | ||||
1970 | const AccessList *AL = getBlockAccesses(&B); | ||||
1971 | const auto *DL = getBlockDefs(&B); | ||||
1972 | MemoryPhi *Phi = getMemoryAccess(&B); | ||||
1973 | if (Phi
| ||||
1974 | // Verify ordering. | ||||
1975 | ActualAccesses.push_back(Phi); | ||||
1976 | ActualDefs.push_back(Phi); | ||||
1977 | // Verify domination | ||||
1978 | for (const Use &U : Phi->uses()) { | ||||
1979 | assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses")(static_cast <bool> (dominates(Phi, U) && "Memory PHI does not dominate it's uses" ) ? void (0) : __assert_fail ("dominates(Phi, U) && \"Memory PHI does not dominate it's uses\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1979, __extension__ __PRETTY_FUNCTION__ )); | ||||
1980 | (void)U; | ||||
1981 | } | ||||
1982 | // Verify def-uses for full verify. | ||||
1983 | if (VL == VerificationLevel::Full) { | ||||
1984 | assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance((static_cast <bool> (Phi->getNumOperands() == static_cast <unsigned>(std::distance( pred_begin(&B), pred_end( &B))) && "Incomplete MemoryPhi Node") ? void (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1986, __extension__ __PRETTY_FUNCTION__ )) | ||||
1985 | pred_begin(&B), pred_end(&B))) &&(static_cast <bool> (Phi->getNumOperands() == static_cast <unsigned>(std::distance( pred_begin(&B), pred_end( &B))) && "Incomplete MemoryPhi Node") ? void (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1986, __extension__ __PRETTY_FUNCTION__ )) | ||||
1986 | "Incomplete MemoryPhi Node")(static_cast <bool> (Phi->getNumOperands() == static_cast <unsigned>(std::distance( pred_begin(&B), pred_end( &B))) && "Incomplete MemoryPhi Node") ? void (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1986, __extension__ __PRETTY_FUNCTION__ )); | ||||
1987 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { | ||||
1988 | verifyUseInDefs(Phi->getIncomingValue(I), Phi); | ||||
1989 | assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&(static_cast <bool> (is_contained(predecessors(&B), Phi->getIncomingBlock(I)) && "Incoming phi block not a block predecessor" ) ? void (0) : __assert_fail ("is_contained(predecessors(&B), Phi->getIncomingBlock(I)) && \"Incoming phi block not a block predecessor\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1990, __extension__ __PRETTY_FUNCTION__ )) | ||||
1990 | "Incoming phi block not a block predecessor")(static_cast <bool> (is_contained(predecessors(&B), Phi->getIncomingBlock(I)) && "Incoming phi block not a block predecessor" ) ? void (0) : __assert_fail ("is_contained(predecessors(&B), Phi->getIncomingBlock(I)) && \"Incoming phi block not a block predecessor\"" , "llvm/lib/Analysis/MemorySSA.cpp", 1990, __extension__ __PRETTY_FUNCTION__ )); | ||||
1991 | } | ||||
1992 | } | ||||
1993 | } | ||||
1994 | |||||
1995 | for (Instruction &I : B) { | ||||
1996 | MemoryUseOrDef *MA = getMemoryAccess(&I); | ||||
1997 | assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2000, __extension__ __PRETTY_FUNCTION__ )) | ||||
1998 | "We have memory affecting instructions "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2000, __extension__ __PRETTY_FUNCTION__ )) | ||||
1999 | "in this block but they are not in the "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2000, __extension__ __PRETTY_FUNCTION__ )) | ||||
2000 | "access list or defs list")(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2000, __extension__ __PRETTY_FUNCTION__ )); | ||||
2001 | if (MA) { | ||||
2002 | // Verify ordering. | ||||
2003 | ActualAccesses.push_back(MA); | ||||
2004 | if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) { | ||||
2005 | // Verify ordering. | ||||
2006 | ActualDefs.push_back(MA); | ||||
2007 | // Verify domination. | ||||
2008 | for (const Use &U : MD->uses()) { | ||||
2009 | assert(dominates(MD, U) &&(static_cast <bool> (dominates(MD, U) && "Memory Def does not dominate it's uses" ) ? void (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2010, __extension__ __PRETTY_FUNCTION__ )) | ||||
2010 | "Memory Def does not dominate it's uses")(static_cast <bool> (dominates(MD, U) && "Memory Def does not dominate it's uses" ) ? void (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2010, __extension__ __PRETTY_FUNCTION__ )); | ||||
2011 | (void)U; | ||||
2012 | } | ||||
2013 | } | ||||
2014 | // Verify def-uses for full verify. | ||||
2015 | if (VL == VerificationLevel::Full) | ||||
2016 | verifyUseInDefs(MA->getDefiningAccess(), MA); | ||||
2017 | } | ||||
2018 | } | ||||
2019 | // Either we hit the assert, really have no accesses, or we have both | ||||
2020 | // accesses and an access list. Same with defs. | ||||
2021 | if (!AL && !DL) | ||||
2022 | continue; | ||||
2023 | // Verify ordering. | ||||
2024 | assert(AL->size() == ActualAccesses.size() &&(static_cast <bool> (AL->size() == ActualAccesses.size () && "We don't have the same number of accesses in the block as on the " "access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2026, __extension__ __PRETTY_FUNCTION__ )) | ||||
| |||||
2025 | "We don't have the same number of accesses in the block as on the "(static_cast <bool> (AL->size() == ActualAccesses.size () && "We don't have the same number of accesses in the block as on the " "access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2026, __extension__ __PRETTY_FUNCTION__ )) | ||||
2026 | "access list")(static_cast <bool> (AL->size() == ActualAccesses.size () && "We don't have the same number of accesses in the block as on the " "access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2026, __extension__ __PRETTY_FUNCTION__ )); | ||||
2027 | assert((DL || ActualDefs.size() == 0) &&(static_cast <bool> ((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2028, __extension__ __PRETTY_FUNCTION__ )) | ||||
2028 | "Either we should have a defs list, or we should have no defs")(static_cast <bool> ((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2028, __extension__ __PRETTY_FUNCTION__ )); | ||||
2029 | assert((!DL || DL->size() == ActualDefs.size()) &&(static_cast <bool> ((!DL || DL->size() == ActualDefs .size()) && "We don't have the same number of defs in the block as on the " "def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2031, __extension__ __PRETTY_FUNCTION__ )) | ||||
2030 | "We don't have the same number of defs in the block as on the "(static_cast <bool> ((!DL || DL->size() == ActualDefs .size()) && "We don't have the same number of defs in the block as on the " "def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2031, __extension__ __PRETTY_FUNCTION__ )) | ||||
2031 | "def list")(static_cast <bool> ((!DL || DL->size() == ActualDefs .size()) && "We don't have the same number of defs in the block as on the " "def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2031, __extension__ __PRETTY_FUNCTION__ )); | ||||
2032 | auto ALI = AL->begin(); | ||||
2033 | auto AAI = ActualAccesses.begin(); | ||||
2034 | while (ALI != AL->end() && AAI != ActualAccesses.end()) { | ||||
2035 | assert(&*ALI == *AAI && "Not the same accesses in the same order")(static_cast <bool> (&*ALI == *AAI && "Not the same accesses in the same order" ) ? void (0) : __assert_fail ("&*ALI == *AAI && \"Not the same accesses in the same order\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2035, __extension__ __PRETTY_FUNCTION__ )); | ||||
2036 | ++ALI; | ||||
2037 | ++AAI; | ||||
2038 | } | ||||
2039 | ActualAccesses.clear(); | ||||
2040 | if (DL) { | ||||
2041 | auto DLI = DL->begin(); | ||||
2042 | auto ADI = ActualDefs.begin(); | ||||
2043 | while (DLI != DL->end() && ADI != ActualDefs.end()) { | ||||
2044 | assert(&*DLI == *ADI && "Not the same defs in the same order")(static_cast <bool> (&*DLI == *ADI && "Not the same defs in the same order" ) ? void (0) : __assert_fail ("&*DLI == *ADI && \"Not the same defs in the same order\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2044, __extension__ __PRETTY_FUNCTION__ )); | ||||
2045 | ++DLI; | ||||
2046 | ++ADI; | ||||
2047 | } | ||||
2048 | } | ||||
2049 | ActualDefs.clear(); | ||||
2050 | } | ||||
2051 | } | ||||
2052 | |||||
2053 | /// Verify the def-use lists in MemorySSA, by verifying that \p Use | ||||
2054 | /// appears in the use list of \p Def. | ||||
2055 | void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { | ||||
2056 | // The live on entry use may cause us to get a NULL def here | ||||
2057 | if (!Def) | ||||
2058 | assert(isLiveOnEntryDef(Use) &&(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2059, __extension__ __PRETTY_FUNCTION__ )) | ||||
2059 | "Null def but use not point to live on entry def")(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2059, __extension__ __PRETTY_FUNCTION__ )); | ||||
2060 | else | ||||
2061 | assert(is_contained(Def->users(), Use) &&(static_cast <bool> (is_contained(Def->users(), Use) && "Did not find use in def's use list") ? void (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2062, __extension__ __PRETTY_FUNCTION__ )) | ||||
2062 | "Did not find use in def's use list")(static_cast <bool> (is_contained(Def->users(), Use) && "Did not find use in def's use list") ? void (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2062, __extension__ __PRETTY_FUNCTION__ )); | ||||
2063 | } | ||||
2064 | |||||
2065 | /// Perform a local numbering on blocks so that instruction ordering can be | ||||
2066 | /// determined in constant time. | ||||
2067 | /// TODO: We currently just number in order. If we numbered by N, we could | ||||
2068 | /// allow at least N-1 sequences of insertBefore or insertAfter (and at least | ||||
2069 | /// log2(N) sequences of mixed before and after) without needing to invalidate | ||||
2070 | /// the numbering. | ||||
2071 | void MemorySSA::renumberBlock(const BasicBlock *B) const { | ||||
2072 | // The pre-increment ensures the numbers really start at 1. | ||||
2073 | unsigned long CurrentNumber = 0; | ||||
2074 | const AccessList *AL = getBlockAccesses(B); | ||||
2075 | assert(AL != nullptr && "Asking to renumber an empty block")(static_cast <bool> (AL != nullptr && "Asking to renumber an empty block" ) ? void (0) : __assert_fail ("AL != nullptr && \"Asking to renumber an empty block\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2075, __extension__ __PRETTY_FUNCTION__ )); | ||||
2076 | for (const auto &I : *AL) | ||||
2077 | BlockNumbering[&I] = ++CurrentNumber; | ||||
2078 | BlockNumberingValid.insert(B); | ||||
2079 | } | ||||
2080 | |||||
2081 | /// Determine, for two memory accesses in the same block, | ||||
2082 | /// whether \p Dominator dominates \p Dominatee. | ||||
2083 | /// \returns True if \p Dominator dominates \p Dominatee. | ||||
2084 | bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, | ||||
2085 | const MemoryAccess *Dominatee) const { | ||||
2086 | const BasicBlock *DominatorBlock = Dominator->getBlock(); | ||||
2087 | |||||
2088 | assert((DominatorBlock == Dominatee->getBlock()) &&(static_cast <bool> ((DominatorBlock == Dominatee->getBlock ()) && "Asking for local domination when accesses are in different blocks!" ) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2089, __extension__ __PRETTY_FUNCTION__ )) | ||||
2089 | "Asking for local domination when accesses are in different blocks!")(static_cast <bool> ((DominatorBlock == Dominatee->getBlock ()) && "Asking for local domination when accesses are in different blocks!" ) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2089, __extension__ __PRETTY_FUNCTION__ )); | ||||
2090 | // A node dominates itself. | ||||
2091 | if (Dominatee == Dominator) | ||||
2092 | return true; | ||||
2093 | |||||
2094 | // When Dominatee is defined on function entry, it is not dominated by another | ||||
2095 | // memory access. | ||||
2096 | if (isLiveOnEntryDef(Dominatee)) | ||||
2097 | return false; | ||||
2098 | |||||
2099 | // When Dominator is defined on function entry, it dominates the other memory | ||||
2100 | // access. | ||||
2101 | if (isLiveOnEntryDef(Dominator)) | ||||
2102 | return true; | ||||
2103 | |||||
2104 | if (!BlockNumberingValid.count(DominatorBlock)) | ||||
2105 | renumberBlock(DominatorBlock); | ||||
2106 | |||||
2107 | unsigned long DominatorNum = BlockNumbering.lookup(Dominator); | ||||
2108 | // All numbers start with 1 | ||||
2109 | assert(DominatorNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominatorNum != 0 && "Block was not numbered properly" ) ? void (0) : __assert_fail ("DominatorNum != 0 && \"Block was not numbered properly\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2109, __extension__ __PRETTY_FUNCTION__ )); | ||||
2110 | unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); | ||||
2111 | assert(DominateeNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominateeNum != 0 && "Block was not numbered properly" ) ? void (0) : __assert_fail ("DominateeNum != 0 && \"Block was not numbered properly\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2111, __extension__ __PRETTY_FUNCTION__ )); | ||||
2112 | return DominatorNum < DominateeNum; | ||||
2113 | } | ||||
2114 | |||||
2115 | bool MemorySSA::dominates(const MemoryAccess *Dominator, | ||||
2116 | const MemoryAccess *Dominatee) const { | ||||
2117 | if (Dominator == Dominatee) | ||||
2118 | return true; | ||||
2119 | |||||
2120 | if (isLiveOnEntryDef(Dominatee)) | ||||
2121 | return false; | ||||
2122 | |||||
2123 | if (Dominator->getBlock() != Dominatee->getBlock()) | ||||
2124 | return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); | ||||
2125 | return locallyDominates(Dominator, Dominatee); | ||||
2126 | } | ||||
2127 | |||||
2128 | bool MemorySSA::dominates(const MemoryAccess *Dominator, | ||||
2129 | const Use &Dominatee) const { | ||||
2130 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { | ||||
2131 | BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); | ||||
2132 | // The def must dominate the incoming block of the phi. | ||||
2133 | if (UseBB != Dominator->getBlock()) | ||||
2134 | return DT->dominates(Dominator->getBlock(), UseBB); | ||||
2135 | // If the UseBB and the DefBB are the same, compare locally. | ||||
2136 | return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); | ||||
2137 | } | ||||
2138 | // If it's not a PHI node use, the normal dominates can already handle it. | ||||
2139 | return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); | ||||
2140 | } | ||||
2141 | |||||
2142 | void MemorySSA::ensureOptimizedUses() { | ||||
2143 | if (IsOptimized) | ||||
2144 | return; | ||||
2145 | |||||
2146 | BatchAAResults BatchAA(*AA); | ||||
2147 | ClobberWalkerBase WalkerBase(this, DT); | ||||
2148 | CachingWalker WalkerLocal(this, &WalkerBase); | ||||
2149 | OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses(); | ||||
2150 | IsOptimized = true; | ||||
2151 | } | ||||
2152 | |||||
2153 | void MemoryAccess::print(raw_ostream &OS) const { | ||||
2154 | switch (getValueID()) { | ||||
2155 | case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); | ||||
2156 | case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); | ||||
2157 | case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); | ||||
2158 | } | ||||
2159 | llvm_unreachable("invalid value id")::llvm::llvm_unreachable_internal("invalid value id", "llvm/lib/Analysis/MemorySSA.cpp" , 2159); | ||||
2160 | } | ||||
2161 | |||||
2162 | void MemoryDef::print(raw_ostream &OS) const { | ||||
2163 | MemoryAccess *UO = getDefiningAccess(); | ||||
2164 | |||||
2165 | auto printID = [&OS](MemoryAccess *A) { | ||||
2166 | if (A && A->getID()) | ||||
2167 | OS << A->getID(); | ||||
2168 | else | ||||
2169 | OS << LiveOnEntryStr; | ||||
2170 | }; | ||||
2171 | |||||
2172 | OS << getID() << " = MemoryDef("; | ||||
2173 | printID(UO); | ||||
2174 | OS << ")"; | ||||
2175 | |||||
2176 | if (isOptimized()) { | ||||
2177 | OS << "->"; | ||||
2178 | printID(getOptimized()); | ||||
2179 | } | ||||
2180 | } | ||||
2181 | |||||
2182 | void MemoryPhi::print(raw_ostream &OS) const { | ||||
2183 | ListSeparator LS(","); | ||||
2184 | OS << getID() << " = MemoryPhi("; | ||||
2185 | for (const auto &Op : operands()) { | ||||
2186 | BasicBlock *BB = getIncomingBlock(Op); | ||||
2187 | MemoryAccess *MA = cast<MemoryAccess>(Op); | ||||
2188 | |||||
2189 | OS << LS << '{'; | ||||
2190 | if (BB->hasName()) | ||||
2191 | OS << BB->getName(); | ||||
2192 | else | ||||
2193 | BB->printAsOperand(OS, false); | ||||
2194 | OS << ','; | ||||
2195 | if (unsigned ID = MA->getID()) | ||||
2196 | OS << ID; | ||||
2197 | else | ||||
2198 | OS << LiveOnEntryStr; | ||||
2199 | OS << '}'; | ||||
2200 | } | ||||
2201 | OS << ')'; | ||||
2202 | } | ||||
2203 | |||||
2204 | void MemoryUse::print(raw_ostream &OS) const { | ||||
2205 | MemoryAccess *UO = getDefiningAccess(); | ||||
2206 | OS << "MemoryUse("; | ||||
2207 | if (UO && UO->getID()) | ||||
2208 | OS << UO->getID(); | ||||
2209 | else | ||||
2210 | OS << LiveOnEntryStr; | ||||
2211 | OS << ')'; | ||||
2212 | } | ||||
2213 | |||||
2214 | void MemoryAccess::dump() const { | ||||
2215 | // Cannot completely remove virtual function even in release mode. | ||||
2216 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||
2217 | print(dbgs()); | ||||
2218 | dbgs() << "\n"; | ||||
2219 | #endif | ||||
2220 | } | ||||
2221 | |||||
2222 | char MemorySSAPrinterLegacyPass::ID = 0; | ||||
2223 | |||||
2224 | MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { | ||||
2225 | initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||
2226 | } | ||||
2227 | |||||
2228 | void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { | ||||
2229 | AU.setPreservesAll(); | ||||
2230 | AU.addRequired<MemorySSAWrapperPass>(); | ||||
2231 | } | ||||
2232 | |||||
2233 | class DOTFuncMSSAInfo { | ||||
2234 | private: | ||||
2235 | const Function &F; | ||||
2236 | MemorySSAAnnotatedWriter MSSAWriter; | ||||
2237 | |||||
2238 | public: | ||||
2239 | DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA) | ||||
2240 | : F(F), MSSAWriter(&MSSA) {} | ||||
2241 | |||||
2242 | const Function *getFunction() { return &F; } | ||||
2243 | MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; } | ||||
2244 | }; | ||||
2245 | |||||
2246 | namespace llvm { | ||||
2247 | |||||
2248 | template <> | ||||
2249 | struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> { | ||||
2250 | static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) { | ||||
2251 | return &(CFGInfo->getFunction()->getEntryBlock()); | ||||
2252 | } | ||||
2253 | |||||
2254 | // nodes_iterator/begin/end - Allow iteration over all nodes in the graph | ||||
2255 | using nodes_iterator = pointer_iterator<Function::const_iterator>; | ||||
2256 | |||||
2257 | static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) { | ||||
2258 | return nodes_iterator(CFGInfo->getFunction()->begin()); | ||||
2259 | } | ||||
2260 | |||||
2261 | static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) { | ||||
2262 | return nodes_iterator(CFGInfo->getFunction()->end()); | ||||
2263 | } | ||||
2264 | |||||
2265 | static size_t size(DOTFuncMSSAInfo *CFGInfo) { | ||||
2266 | return CFGInfo->getFunction()->size(); | ||||
2267 | } | ||||
2268 | }; | ||||
2269 | |||||
2270 | template <> | ||||
2271 | struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits { | ||||
2272 | |||||
2273 | DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {} | ||||
2274 | |||||
2275 | static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) { | ||||
2276 | return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() + | ||||
2277 | "' function"; | ||||
2278 | } | ||||
2279 | |||||
2280 | std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) { | ||||
2281 | return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel( | ||||
2282 | Node, nullptr, | ||||
2283 | [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void { | ||||
2284 | BB.print(OS, &CFGInfo->getWriter(), true, true); | ||||
2285 | }, | ||||
2286 | [](std::string &S, unsigned &I, unsigned Idx) -> void { | ||||
2287 | std::string Str = S.substr(I, Idx - I); | ||||
2288 | StringRef SR = Str; | ||||
2289 | if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") || | ||||
2290 | SR.count("MemoryUse(")) | ||||
2291 | return; | ||||
2292 | DOTGraphTraits<DOTFuncInfo *>::eraseComment(S, I, Idx); | ||||
2293 | }); | ||||
2294 | } | ||||
2295 | |||||
2296 | static std::string getEdgeSourceLabel(const BasicBlock *Node, | ||||
2297 | const_succ_iterator I) { | ||||
2298 | return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I); | ||||
2299 | } | ||||
2300 | |||||
2301 | /// Display the raw branch weights from PGO. | ||||
2302 | std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, | ||||
2303 | DOTFuncMSSAInfo *CFGInfo) { | ||||
2304 | return ""; | ||||
2305 | } | ||||
2306 | |||||
2307 | std::string getNodeAttributes(const BasicBlock *Node, | ||||
2308 | DOTFuncMSSAInfo *CFGInfo) { | ||||
2309 | return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos | ||||
2310 | ? "style=filled, fillcolor=lightpink" | ||||
2311 | : ""; | ||||
2312 | } | ||||
2313 | }; | ||||
2314 | |||||
2315 | } // namespace llvm | ||||
2316 | |||||
2317 | bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { | ||||
2318 | auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); | ||||
2319 | MSSA.ensureOptimizedUses(); | ||||
2320 | if (DotCFGMSSA != "") { | ||||
2321 | DOTFuncMSSAInfo CFGInfo(F, MSSA); | ||||
2322 | WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA); | ||||
2323 | } else | ||||
2324 | MSSA.print(dbgs()); | ||||
2325 | |||||
2326 | if (VerifyMemorySSA) | ||||
2327 | MSSA.verifyMemorySSA(); | ||||
2328 | return false; | ||||
2329 | } | ||||
2330 | |||||
2331 | AnalysisKey MemorySSAAnalysis::Key; | ||||
2332 | |||||
2333 | MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, | ||||
2334 | FunctionAnalysisManager &AM) { | ||||
2335 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); | ||||
2336 | auto &AA = AM.getResult<AAManager>(F); | ||||
2337 | return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT)); | ||||
2338 | } | ||||
2339 | |||||
2340 | bool MemorySSAAnalysis::Result::invalidate( | ||||
2341 | Function &F, const PreservedAnalyses &PA, | ||||
2342 | FunctionAnalysisManager::Invalidator &Inv) { | ||||
2343 | auto PAC = PA.getChecker<MemorySSAAnalysis>(); | ||||
2344 | return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || | ||||
2345 | Inv.invalidate<AAManager>(F, PA) || | ||||
2346 | Inv.invalidate<DominatorTreeAnalysis>(F, PA); | ||||
2347 | } | ||||
2348 | |||||
2349 | PreservedAnalyses MemorySSAPrinterPass::run(Function &F, | ||||
2350 | FunctionAnalysisManager &AM) { | ||||
2351 | auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); | ||||
2352 | MSSA.ensureOptimizedUses(); | ||||
2353 | if (DotCFGMSSA != "") { | ||||
2354 | DOTFuncMSSAInfo CFGInfo(F, MSSA); | ||||
2355 | WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA); | ||||
2356 | } else { | ||||
2357 | OS << "MemorySSA for function: " << F.getName() << "\n"; | ||||
2358 | MSSA.print(OS); | ||||
2359 | } | ||||
2360 | |||||
2361 | return PreservedAnalyses::all(); | ||||
2362 | } | ||||
2363 | |||||
2364 | PreservedAnalyses MemorySSAWalkerPrinterPass::run(Function &F, | ||||
2365 | FunctionAnalysisManager &AM) { | ||||
2366 | auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); | ||||
2367 | OS << "MemorySSA (walker) for function: " << F.getName() << "\n"; | ||||
2368 | MemorySSAWalkerAnnotatedWriter Writer(&MSSA); | ||||
2369 | F.print(OS, &Writer); | ||||
2370 | |||||
2371 | return PreservedAnalyses::all(); | ||||
2372 | } | ||||
2373 | |||||
2374 | PreservedAnalyses MemorySSAVerifierPass::run(Function &F, | ||||
2375 | FunctionAnalysisManager &AM) { | ||||
2376 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); | ||||
2377 | |||||
2378 | return PreservedAnalyses::all(); | ||||
2379 | } | ||||
2380 | |||||
2381 | char MemorySSAWrapperPass::ID = 0; | ||||
2382 | |||||
2383 | MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { | ||||
2384 | initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); | ||||
2385 | } | ||||
2386 | |||||
2387 | void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } | ||||
2388 | |||||
2389 | void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { | ||||
2390 | AU.setPreservesAll(); | ||||
2391 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); | ||||
2392 | AU.addRequiredTransitive<AAResultsWrapperPass>(); | ||||
2393 | } | ||||
2394 | |||||
2395 | bool MemorySSAWrapperPass::runOnFunction(Function &F) { | ||||
2396 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | ||||
2397 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); | ||||
2398 | MSSA.reset(new MemorySSA(F, &AA, &DT)); | ||||
2399 | return false; | ||||
2400 | } | ||||
2401 | |||||
2402 | void MemorySSAWrapperPass::verifyAnalysis() const { | ||||
2403 | if (VerifyMemorySSA) | ||||
| |||||
2404 | MSSA->verifyMemorySSA(); | ||||
2405 | } | ||||
2406 | |||||
2407 | void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { | ||||
2408 | MSSA->print(OS); | ||||
2409 | } | ||||
2410 | |||||
2411 | MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} | ||||
2412 | |||||
2413 | /// Walk the use-def chains starting at \p StartingAccess and find | ||||
2414 | /// the MemoryAccess that actually clobbers Loc. | ||||
2415 | /// | ||||
2416 | /// \returns our clobbering memory access | ||||
2417 | MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase( | ||||
2418 | MemoryAccess *StartingAccess, const MemoryLocation &Loc, | ||||
2419 | BatchAAResults &BAA, unsigned &UpwardWalkLimit) { | ||||
2420 | assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access")(static_cast <bool> (!isa<MemoryUse>(StartingAccess ) && "Use cannot be defining access") ? void (0) : __assert_fail ("!isa<MemoryUse>(StartingAccess) && \"Use cannot be defining access\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2420, __extension__ __PRETTY_FUNCTION__ )); | ||||
2421 | |||||
2422 | Instruction *I = nullptr; | ||||
2423 | if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) { | ||||
2424 | if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) | ||||
2425 | return StartingUseOrDef; | ||||
2426 | |||||
2427 | I = StartingUseOrDef->getMemoryInst(); | ||||
2428 | |||||
2429 | // Conservatively, fences are always clobbers, so don't perform the walk if | ||||
2430 | // we hit a fence. | ||||
2431 | if (!isa<CallBase>(I) && I->isFenceLike()) | ||||
2432 | return StartingUseOrDef; | ||||
2433 | } | ||||
2434 | |||||
2435 | UpwardsMemoryQuery Q; | ||||
2436 | Q.OriginalAccess = StartingAccess; | ||||
2437 | Q.StartingLoc = Loc; | ||||
2438 | Q.Inst = nullptr; | ||||
2439 | Q.IsCall = false; | ||||
2440 | |||||
2441 | // Unlike the other function, do not walk to the def of a def, because we are | ||||
2442 | // handed something we already believe is the clobbering access. | ||||
2443 | // We never set SkipSelf to true in Q in this method. | ||||
2444 | MemoryAccess *Clobber = | ||||
2445 | Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit); | ||||
2446 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ) | ||||
2447 | dbgs() << "Clobber starting at access " << *StartingAccess << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ) | ||||
2448 | if (I)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ) | ||||
2449 | dbgs() << " for instruction " << *I << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ) | ||||
2450 | dbgs() << " is " << *Clobber << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ) | ||||
2451 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { { dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) dbgs() << " for instruction " << *I << "\n"; dbgs() << " is " << *Clobber << "\n"; }; } } while (false ); | ||||
2452 | return Clobber; | ||||
2453 | } | ||||
2454 | |||||
2455 | static const Instruction * | ||||
2456 | getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT) { | ||||
2457 | if (!I.hasMetadata(LLVMContext::MD_invariant_group) || I.isVolatile()) | ||||
2458 | return nullptr; | ||||
2459 | |||||
2460 | // We consider bitcasts and zero GEPs to be the same pointer value. Start by | ||||
2461 | // stripping bitcasts and zero GEPs, then we will recursively look at loads | ||||
2462 | // and stores through bitcasts and zero GEPs. | ||||
2463 | Value *PointerOperand = getLoadStorePointerOperand(&I)->stripPointerCasts(); | ||||
2464 | |||||
2465 | // It's not safe to walk the use list of a global value because function | ||||
2466 | // passes aren't allowed to look outside their functions. | ||||
2467 | // FIXME: this could be fixed by filtering instructions from outside of | ||||
2468 | // current function. | ||||
2469 | if (isa<Constant>(PointerOperand)) | ||||
2470 | return nullptr; | ||||
2471 | |||||
2472 | // Queue to process all pointers that are equivalent to load operand. | ||||
2473 | SmallVector<const Value *, 8> PointerUsesQueue; | ||||
2474 | PointerUsesQueue.push_back(PointerOperand); | ||||
2475 | |||||
2476 | const Instruction *MostDominatingInstruction = &I; | ||||
2477 | |||||
2478 | // FIXME: This loop is O(n^2) because dominates can be O(n) and in worst case | ||||
2479 | // we will see all the instructions. It may not matter in practice. If it | ||||
2480 | // does, we will have to support MemorySSA construction and updates. | ||||
2481 | while (!PointerUsesQueue.empty()) { | ||||
2482 | const Value *Ptr = PointerUsesQueue.pop_back_val(); | ||||
2483 | assert(Ptr && !isa<GlobalValue>(Ptr) &&(static_cast <bool> (Ptr && !isa<GlobalValue >(Ptr) && "Null or GlobalValue should not be inserted" ) ? void (0) : __assert_fail ("Ptr && !isa<GlobalValue>(Ptr) && \"Null or GlobalValue should not be inserted\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2484, __extension__ __PRETTY_FUNCTION__ )) | ||||
2484 | "Null or GlobalValue should not be inserted")(static_cast <bool> (Ptr && !isa<GlobalValue >(Ptr) && "Null or GlobalValue should not be inserted" ) ? void (0) : __assert_fail ("Ptr && !isa<GlobalValue>(Ptr) && \"Null or GlobalValue should not be inserted\"" , "llvm/lib/Analysis/MemorySSA.cpp", 2484, __extension__ __PRETTY_FUNCTION__ )); | ||||
2485 | |||||
2486 | for (const User *Us : Ptr->users()) { | ||||
2487 | auto *U = dyn_cast<Instruction>(Us); | ||||
2488 | if (!U || U == &I || !DT.dominates(U, MostDominatingInstruction)) | ||||
2489 | continue; | ||||
2490 | |||||
2491 | // Add bitcasts and zero GEPs to queue. | ||||
2492 | if (isa<BitCastInst>(U)) { | ||||
2493 | PointerUsesQueue.push_back(U); | ||||
2494 | continue; | ||||
2495 | } | ||||
2496 | if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) { | ||||
2497 | if (GEP->hasAllZeroIndices()) | ||||
2498 | PointerUsesQueue.push_back(U); | ||||
2499 | continue; | ||||
2500 | } | ||||
2501 | |||||
2502 | // If we hit a load/store with an invariant.group metadata and the same | ||||
2503 | // pointer operand, we can assume that value pointed to by the pointer | ||||
2504 | // operand didn't change. | ||||
2505 | if (U->hasMetadata(LLVMContext::MD_invariant_group) && | ||||
2506 | getLoadStorePointerOperand(U) == Ptr && !U->isVolatile()) { | ||||
2507 | MostDominatingInstruction = U; | ||||
2508 | } | ||||
2509 | } | ||||
2510 | } | ||||
2511 | return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction; | ||||
2512 | } | ||||
2513 | |||||
2514 | MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase( | ||||
2515 | MemoryAccess *MA, BatchAAResults &BAA, unsigned &UpwardWalkLimit, | ||||
2516 | bool SkipSelf, bool UseInvariantGroup) { | ||||
2517 | auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); | ||||
2518 | // If this is a MemoryPhi, we can't do anything. | ||||
2519 | if (!StartingAccess) | ||||
2520 | return MA; | ||||
2521 | |||||
2522 | if (UseInvariantGroup) { | ||||
2523 | if (auto *I = getInvariantGroupClobberingInstruction( | ||||
2524 | *StartingAccess->getMemoryInst(), MSSA->getDomTree())) { | ||||
2525 | assert(isa<LoadInst>(I) || isa<StoreInst>(I))(static_cast <bool> (isa<LoadInst>(I) || isa<StoreInst >(I)) ? void (0) : __assert_fail ("isa<LoadInst>(I) || isa<StoreInst>(I)" , "llvm/lib/Analysis/MemorySSA.cpp", 2525, __extension__ __PRETTY_FUNCTION__ )); | ||||
2526 | |||||
2527 | auto *ClobberMA = MSSA->getMemoryAccess(I); | ||||
2528 | assert(ClobberMA)(static_cast <bool> (ClobberMA) ? void (0) : __assert_fail ("ClobberMA", "llvm/lib/Analysis/MemorySSA.cpp", 2528, __extension__ __PRETTY_FUNCTION__)); | ||||
2529 | if (isa<MemoryUse>(ClobberMA)) | ||||
2530 | return ClobberMA->getDefiningAccess(); | ||||
2531 | return ClobberMA; | ||||
2532 | } | ||||
2533 | } | ||||
2534 | |||||
2535 | bool IsOptimized = false; | ||||
2536 | |||||
2537 | // If this is an already optimized use or def, return the optimized result. | ||||
2538 | // Note: Currently, we store the optimized def result in a separate field, | ||||
2539 | // since we can't use the defining access. | ||||
2540 | if (StartingAccess->isOptimized()) { | ||||
2541 | if (!SkipSelf || !isa<MemoryDef>(StartingAccess)) | ||||
2542 | return StartingAccess->getOptimized(); | ||||
2543 | IsOptimized = true; | ||||
2544 | } | ||||
2545 | |||||
2546 | const Instruction *I = StartingAccess->getMemoryInst(); | ||||
2547 | // We can't sanely do anything with a fence, since they conservatively clobber | ||||
2548 | // all memory, and have no locations to get pointers from to try to | ||||
2549 | // disambiguate. | ||||
2550 | if (!isa<CallBase>(I) && I->isFenceLike()) | ||||
2551 | return StartingAccess; | ||||
2552 | |||||
2553 | UpwardsMemoryQuery Q(I, StartingAccess); | ||||
2554 | |||||
2555 | if (isUseTriviallyOptimizableToLiveOnEntry(BAA, I)) { | ||||
2556 | MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); | ||||
2557 | StartingAccess->setOptimized(LiveOnEntry); | ||||
2558 | return LiveOnEntry; | ||||
2559 | } | ||||
2560 | |||||
2561 | MemoryAccess *OptimizedAccess; | ||||
2562 | if (!IsOptimized) { | ||||
2563 | // Start with the thing we already think clobbers this location | ||||
2564 | MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); | ||||
2565 | |||||
2566 | // At this point, DefiningAccess may be the live on entry def. | ||||
2567 | // If it is, we will not get a better result. | ||||
2568 | if (MSSA->isLiveOnEntryDef(DefiningAccess)) { | ||||
2569 | StartingAccess->setOptimized(DefiningAccess); | ||||
2570 | return DefiningAccess; | ||||
2571 | } | ||||
2572 | |||||
2573 | OptimizedAccess = | ||||
2574 | Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit); | ||||
2575 | StartingAccess->setOptimized(OptimizedAccess); | ||||
2576 | } else | ||||
2577 | OptimizedAccess = StartingAccess->getOptimized(); | ||||
2578 | |||||
2579 | LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (false); | ||||
2580 | LLVM_DEBUG(dbgs() << *StartingAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *StartingAccess << "\n" ; } } while (false); | ||||
2581 | LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Optimized Memory SSA clobber for " << *I << " is "; } } while (false); | ||||
2582 | LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *OptimizedAccess << "\n" ; } } while (false); | ||||
2583 | |||||
2584 | MemoryAccess *Result; | ||||
2585 | if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) && | ||||
2586 | isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) { | ||||
2587 | assert(isa<MemoryDef>(Q.OriginalAccess))(static_cast <bool> (isa<MemoryDef>(Q.OriginalAccess )) ? void (0) : __assert_fail ("isa<MemoryDef>(Q.OriginalAccess)" , "llvm/lib/Analysis/MemorySSA.cpp", 2587, __extension__ __PRETTY_FUNCTION__ )); | ||||
2588 | Q.SkipSelfAccess = true; | ||||
2589 | Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit); | ||||
2590 | } else | ||||
2591 | Result = OptimizedAccess; | ||||
2592 | |||||
2593 | LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf; } } while (false); | ||||
2594 | LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "] for " << *I << " is " << *Result << "\n"; } } while (false); | ||||
2595 | |||||
2596 | return Result; | ||||
2597 | } | ||||
2598 | |||||
2599 | MemoryAccess * | ||||
2600 | DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA, | ||||
2601 | BatchAAResults &) { | ||||
2602 | if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) | ||||
2603 | return Use->getDefiningAccess(); | ||||
2604 | return MA; | ||||
2605 | } | ||||
2606 | |||||
2607 | MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( | ||||
2608 | MemoryAccess *StartingAccess, const MemoryLocation &, BatchAAResults &) { | ||||
2609 | if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) | ||||
2610 | return Use->getDefiningAccess(); | ||||
2611 | return StartingAccess; | ||||
2612 | } | ||||
2613 | |||||
2614 | void MemoryPhi::deleteMe(DerivedUser *Self) { | ||||
2615 | delete static_cast<MemoryPhi *>(Self); | ||||
2616 | } | ||||
2617 | |||||
2618 | void MemoryDef::deleteMe(DerivedUser *Self) { | ||||
2619 | delete static_cast<MemoryDef *>(Self); | ||||
2620 | } | ||||
2621 | |||||
2622 | void MemoryUse::deleteMe(DerivedUser *Self) { | ||||
2623 | delete static_cast<MemoryUse *>(Self); | ||||
2624 | } | ||||
2625 | |||||
2626 | bool upward_defs_iterator::IsGuaranteedLoopInvariant(const Value *Ptr) const { | ||||
2627 | auto IsGuaranteedLoopInvariantBase = [](const Value *Ptr) { | ||||
2628 | Ptr = Ptr->stripPointerCasts(); | ||||
2629 | if (!isa<Instruction>(Ptr)) | ||||
2630 | return true; | ||||
2631 | return isa<AllocaInst>(Ptr); | ||||
2632 | }; | ||||
2633 | |||||
2634 | Ptr = Ptr->stripPointerCasts(); | ||||
2635 | if (auto *I = dyn_cast<Instruction>(Ptr)) { | ||||
2636 | if (I->getParent()->isEntryBlock()) | ||||
2637 | return true; | ||||
2638 | } | ||||
2639 | if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { | ||||
2640 | return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) && | ||||
2641 | GEP->hasAllConstantIndices(); | ||||
2642 | } | ||||
2643 | return IsGuaranteedLoopInvariantBase(Ptr); | ||||
2644 | } |
1 | //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// This file exposes an interface to building/using memory SSA to |
11 | /// walk memory instructions using a use/def graph. |
12 | /// |
13 | /// Memory SSA class builds an SSA form that links together memory access |
14 | /// instructions such as loads, stores, atomics, and calls. Additionally, it |
15 | /// does a trivial form of "heap versioning" Every time the memory state changes |
16 | /// in the program, we generate a new heap version. It generates |
17 | /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. |
18 | /// |
19 | /// As a trivial example, |
20 | /// define i32 @main() #0 { |
21 | /// entry: |
22 | /// %call = call noalias i8* @_Znwm(i64 4) #2 |
23 | /// %0 = bitcast i8* %call to i32* |
24 | /// %call1 = call noalias i8* @_Znwm(i64 4) #2 |
25 | /// %1 = bitcast i8* %call1 to i32* |
26 | /// store i32 5, i32* %0, align 4 |
27 | /// store i32 7, i32* %1, align 4 |
28 | /// %2 = load i32* %0, align 4 |
29 | /// %3 = load i32* %1, align 4 |
30 | /// %add = add nsw i32 %2, %3 |
31 | /// ret i32 %add |
32 | /// } |
33 | /// |
34 | /// Will become |
35 | /// define i32 @main() #0 { |
36 | /// entry: |
37 | /// ; 1 = MemoryDef(0) |
38 | /// %call = call noalias i8* @_Znwm(i64 4) #3 |
39 | /// %2 = bitcast i8* %call to i32* |
40 | /// ; 2 = MemoryDef(1) |
41 | /// %call1 = call noalias i8* @_Znwm(i64 4) #3 |
42 | /// %4 = bitcast i8* %call1 to i32* |
43 | /// ; 3 = MemoryDef(2) |
44 | /// store i32 5, i32* %2, align 4 |
45 | /// ; 4 = MemoryDef(3) |
46 | /// store i32 7, i32* %4, align 4 |
47 | /// ; MemoryUse(3) |
48 | /// %7 = load i32* %2, align 4 |
49 | /// ; MemoryUse(4) |
50 | /// %8 = load i32* %4, align 4 |
51 | /// %add = add nsw i32 %7, %8 |
52 | /// ret i32 %add |
53 | /// } |
54 | /// |
55 | /// Given this form, all the stores that could ever effect the load at %8 can be |
56 | /// gotten by using the MemoryUse associated with it, and walking from use to |
57 | /// def until you hit the top of the function. |
58 | /// |
59 | /// Each def also has a list of users associated with it, so you can walk from |
60 | /// both def to users, and users to defs. Note that we disambiguate MemoryUses, |
61 | /// but not the RHS of MemoryDefs. You can see this above at %7, which would |
62 | /// otherwise be a MemoryUse(4). Being disambiguated means that for a given |
63 | /// store, all the MemoryUses on its use lists are may-aliases of that store |
64 | /// (but the MemoryDefs on its use list may not be). |
65 | /// |
66 | /// MemoryDefs are not disambiguated because it would require multiple reaching |
67 | /// definitions, which would require multiple phis, and multiple memoryaccesses |
68 | /// per instruction. |
69 | /// |
70 | /// In addition to the def/use graph described above, MemoryDefs also contain |
71 | /// an "optimized" definition use. The "optimized" use points to some def |
72 | /// reachable through the memory def chain. The optimized def *may* (but is |
73 | /// not required to) alias the original MemoryDef, but no def *closer* to the |
74 | /// source def may alias it. As the name implies, the purpose of the optimized |
75 | /// use is to allow caching of clobber searches for memory defs. The optimized |
76 | /// def may be nullptr, in which case clients must walk the defining access |
77 | /// chain. |
78 | /// |
79 | /// When iterating the uses of a MemoryDef, both defining uses and optimized |
80 | /// uses will be encountered. If only one type is needed, the client must |
81 | /// filter the use walk. |
82 | // |
83 | //===----------------------------------------------------------------------===// |
84 | |
85 | #ifndef LLVM_ANALYSIS_MEMORYSSA_H |
86 | #define LLVM_ANALYSIS_MEMORYSSA_H |
87 | |
88 | #include "llvm/ADT/DenseMap.h" |
89 | #include "llvm/ADT/SmallPtrSet.h" |
90 | #include "llvm/ADT/SmallVector.h" |
91 | #include "llvm/ADT/ilist_node.h" |
92 | #include "llvm/ADT/iterator_range.h" |
93 | #include "llvm/Analysis/AliasAnalysis.h" |
94 | #include "llvm/Analysis/MemoryLocation.h" |
95 | #include "llvm/Analysis/PHITransAddr.h" |
96 | #include "llvm/IR/DerivedUser.h" |
97 | #include "llvm/IR/Dominators.h" |
98 | #include "llvm/IR/Type.h" |
99 | #include "llvm/IR/User.h" |
100 | #include "llvm/Pass.h" |
101 | #include <algorithm> |
102 | #include <cassert> |
103 | #include <cstddef> |
104 | #include <iterator> |
105 | #include <memory> |
106 | #include <utility> |
107 | |
108 | namespace llvm { |
109 | |
110 | template <class GraphType> struct GraphTraits; |
111 | class BasicBlock; |
112 | class Function; |
113 | class Instruction; |
114 | class LLVMContext; |
115 | class MemoryAccess; |
116 | class MemorySSAWalker; |
117 | class Module; |
118 | class Use; |
119 | class Value; |
120 | class raw_ostream; |
121 | |
122 | namespace MSSAHelpers { |
123 | |
124 | struct AllAccessTag {}; |
125 | struct DefsOnlyTag {}; |
126 | |
127 | } // end namespace MSSAHelpers |
128 | |
129 | enum : unsigned { |
130 | // Used to signify what the default invalid ID is for MemoryAccess's |
131 | // getID() |
132 | INVALID_MEMORYACCESS_ID = -1U |
133 | }; |
134 | |
135 | template <class T> class memoryaccess_def_iterator_base; |
136 | using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; |
137 | using const_memoryaccess_def_iterator = |
138 | memoryaccess_def_iterator_base<const MemoryAccess>; |
139 | |
140 | // The base for all memory accesses. All memory accesses in a block are |
141 | // linked together using an intrusive list. |
142 | class MemoryAccess |
143 | : public DerivedUser, |
144 | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, |
145 | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { |
146 | public: |
147 | using AllAccessType = |
148 | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
149 | using DefsOnlyType = |
150 | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
151 | |
152 | MemoryAccess(const MemoryAccess &) = delete; |
153 | MemoryAccess &operator=(const MemoryAccess &) = delete; |
154 | |
155 | void *operator new(size_t) = delete; |
156 | |
157 | // Methods for support type inquiry through isa, cast, and |
158 | // dyn_cast |
159 | static bool classof(const Value *V) { |
160 | unsigned ID = V->getValueID(); |
161 | return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; |
162 | } |
163 | |
164 | BasicBlock *getBlock() const { return Block; } |
165 | |
166 | void print(raw_ostream &OS) const; |
167 | void dump() const; |
168 | |
169 | /// The user iterators for a memory access |
170 | using iterator = user_iterator; |
171 | using const_iterator = const_user_iterator; |
172 | |
173 | /// This iterator walks over all of the defs in a given |
174 | /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For |
175 | /// MemoryUse/MemoryDef, this walks the defining access. |
176 | memoryaccess_def_iterator defs_begin(); |
177 | const_memoryaccess_def_iterator defs_begin() const; |
178 | memoryaccess_def_iterator defs_end(); |
179 | const_memoryaccess_def_iterator defs_end() const; |
180 | |
181 | /// Get the iterators for the all access list and the defs only list |
182 | /// We default to the all access list. |
183 | AllAccessType::self_iterator getIterator() { |
184 | return this->AllAccessType::getIterator(); |
185 | } |
186 | AllAccessType::const_self_iterator getIterator() const { |
187 | return this->AllAccessType::getIterator(); |
188 | } |
189 | AllAccessType::reverse_self_iterator getReverseIterator() { |
190 | return this->AllAccessType::getReverseIterator(); |
191 | } |
192 | AllAccessType::const_reverse_self_iterator getReverseIterator() const { |
193 | return this->AllAccessType::getReverseIterator(); |
194 | } |
195 | DefsOnlyType::self_iterator getDefsIterator() { |
196 | return this->DefsOnlyType::getIterator(); |
197 | } |
198 | DefsOnlyType::const_self_iterator getDefsIterator() const { |
199 | return this->DefsOnlyType::getIterator(); |
200 | } |
201 | DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { |
202 | return this->DefsOnlyType::getReverseIterator(); |
203 | } |
204 | DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { |
205 | return this->DefsOnlyType::getReverseIterator(); |
206 | } |
207 | |
208 | protected: |
209 | friend class MemoryDef; |
210 | friend class MemoryPhi; |
211 | friend class MemorySSA; |
212 | friend class MemoryUse; |
213 | friend class MemoryUseOrDef; |
214 | |
215 | /// Used by MemorySSA to change the block of a MemoryAccess when it is |
216 | /// moved. |
217 | void setBlock(BasicBlock *BB) { Block = BB; } |
218 | |
219 | /// Used for debugging and tracking things about MemoryAccesses. |
220 | /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. |
221 | inline unsigned getID() const; |
222 | |
223 | MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, |
224 | BasicBlock *BB, unsigned NumOperands) |
225 | : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), |
226 | Block(BB) {} |
227 | |
228 | // Use deleteValue() to delete a generic MemoryAccess. |
229 | ~MemoryAccess() = default; |
230 | |
231 | private: |
232 | BasicBlock *Block; |
233 | }; |
234 | |
235 | template <> |
236 | struct ilist_alloc_traits<MemoryAccess> { |
237 | static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); } |
238 | }; |
239 | |
240 | inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { |
241 | MA.print(OS); |
242 | return OS; |
243 | } |
244 | |
245 | /// Class that has the common methods + fields of memory uses/defs. It's |
246 | /// a little awkward to have, but there are many cases where we want either a |
247 | /// use or def, and there are many cases where uses are needed (defs aren't |
248 | /// acceptable), and vice-versa. |
249 | /// |
250 | /// This class should never be instantiated directly; make a MemoryUse or |
251 | /// MemoryDef instead. |
252 | class MemoryUseOrDef : public MemoryAccess { |
253 | public: |
254 | void *operator new(size_t) = delete; |
255 | |
256 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
257 | |
258 | /// Get the instruction that this MemoryUse represents. |
259 | Instruction *getMemoryInst() const { return MemoryInstruction; } |
260 | |
261 | /// Get the access that produces the memory state used by this Use. |
262 | MemoryAccess *getDefiningAccess() const { return getOperand(0); } |
263 | |
264 | static bool classof(const Value *MA) { |
265 | return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; |
266 | } |
267 | |
268 | /// Do we have an optimized use? |
269 | inline bool isOptimized() const; |
270 | /// Return the MemoryAccess associated with the optimized use, or nullptr. |
271 | inline MemoryAccess *getOptimized() const; |
272 | /// Sets the optimized use for a MemoryDef. |
273 | inline void setOptimized(MemoryAccess *); |
274 | |
275 | /// Reset the ID of what this MemoryUse was optimized to, causing it to |
276 | /// be rewalked by the walker if necessary. |
277 | /// This really should only be called by tests. |
278 | inline void resetOptimized(); |
279 | |
280 | protected: |
281 | friend class MemorySSA; |
282 | friend class MemorySSAUpdater; |
283 | |
284 | MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, |
285 | DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, |
286 | unsigned NumOperands) |
287 | : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands), |
288 | MemoryInstruction(MI) { |
289 | setDefiningAccess(DMA); |
290 | } |
291 | |
292 | // Use deleteValue() to delete a generic MemoryUseOrDef. |
293 | ~MemoryUseOrDef() = default; |
294 | |
295 | void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) { |
296 | if (!Optimized) { |
297 | setOperand(0, DMA); |
298 | return; |
299 | } |
300 | setOptimized(DMA); |
301 | } |
302 | |
303 | private: |
304 | Instruction *MemoryInstruction; |
305 | }; |
306 | |
307 | /// Represents read-only accesses to memory |
308 | /// |
309 | /// In particular, the set of Instructions that will be represented by |
310 | /// MemoryUse's is exactly the set of Instructions for which |
311 | /// AliasAnalysis::getModRefInfo returns "Ref". |
312 | class MemoryUse final : public MemoryUseOrDef { |
313 | public: |
314 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
315 | |
316 | MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) |
317 | : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, |
318 | /*NumOperands=*/1) {} |
319 | |
320 | // allocate space for exactly one operand |
321 | void *operator new(size_t S) { return User::operator new(S, 1); } |
322 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
323 | |
324 | static bool classof(const Value *MA) { |
325 | return MA->getValueID() == MemoryUseVal; |
326 | } |
327 | |
328 | void print(raw_ostream &OS) const; |
329 | |
330 | void setOptimized(MemoryAccess *DMA) { |
331 | OptimizedID = DMA->getID(); |
332 | setOperand(0, DMA); |
333 | } |
334 | |
335 | /// Whether the MemoryUse is optimized. If ensureOptimizedUses() was called, |
336 | /// uses will usually be optimized, but this is not guaranteed (e.g. due to |
337 | /// invalidation and optimization limits.) |
338 | bool isOptimized() const { |
339 | return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); |
340 | } |
341 | |
342 | MemoryAccess *getOptimized() const { |
343 | return getDefiningAccess(); |
344 | } |
345 | |
346 | void resetOptimized() { |
347 | OptimizedID = INVALID_MEMORYACCESS_ID; |
348 | } |
349 | |
350 | protected: |
351 | friend class MemorySSA; |
352 | |
353 | private: |
354 | static void deleteMe(DerivedUser *Self); |
355 | |
356 | unsigned OptimizedID = INVALID_MEMORYACCESS_ID; |
357 | }; |
358 | |
359 | template <> |
360 | struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; |
361 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)MemoryUse::op_iterator MemoryUse::op_begin() { return OperandTraits <MemoryUse>::op_begin(this); } MemoryUse::const_op_iterator MemoryUse::op_begin() const { return OperandTraits<MemoryUse >::op_begin(const_cast<MemoryUse*>(this)); } MemoryUse ::op_iterator MemoryUse::op_end() { return OperandTraits<MemoryUse >::op_end(this); } MemoryUse::const_op_iterator MemoryUse:: op_end() const { return OperandTraits<MemoryUse>::op_end (const_cast<MemoryUse*>(this)); } MemoryAccess *MemoryUse ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryUse>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 361, __extension__ __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess> ( OperandTraits<MemoryUse>::op_begin(const_cast<MemoryUse *>(this))[i_nocapture].get()); } void MemoryUse::setOperand (unsigned i_nocapture, MemoryAccess *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<MemoryUse> ::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 361, __extension__ __PRETTY_FUNCTION__)); OperandTraits<MemoryUse>::op_begin (this)[i_nocapture] = Val_nocapture; } unsigned MemoryUse::getNumOperands () const { return OperandTraits<MemoryUse>::operands(this ); } template <int Idx_nocapture> Use &MemoryUse::Op () { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &MemoryUse::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
362 | |
363 | /// Represents a read-write access to memory, whether it is a must-alias, |
364 | /// or a may-alias. |
365 | /// |
366 | /// In particular, the set of Instructions that will be represented by |
367 | /// MemoryDef's is exactly the set of Instructions for which |
368 | /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". |
369 | /// Note that, in order to provide def-def chains, all defs also have a use |
370 | /// associated with them. This use points to the nearest reaching |
371 | /// MemoryDef/MemoryPhi. |
372 | class MemoryDef final : public MemoryUseOrDef { |
373 | public: |
374 | friend class MemorySSA; |
375 | |
376 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
377 | |
378 | MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, |
379 | unsigned Ver) |
380 | : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, |
381 | /*NumOperands=*/2), |
382 | ID(Ver) {} |
383 | |
384 | // allocate space for exactly two operands |
385 | void *operator new(size_t S) { return User::operator new(S, 2); } |
386 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
387 | |
388 | static bool classof(const Value *MA) { |
389 | return MA->getValueID() == MemoryDefVal; |
390 | } |
391 | |
392 | void setOptimized(MemoryAccess *MA) { |
393 | setOperand(1, MA); |
394 | OptimizedID = MA->getID(); |
395 | } |
396 | |
397 | MemoryAccess *getOptimized() const { |
398 | return cast_or_null<MemoryAccess>(getOperand(1)); |
399 | } |
400 | |
401 | bool isOptimized() const { |
402 | return getOptimized() && OptimizedID == getOptimized()->getID(); |
403 | } |
404 | |
405 | void resetOptimized() { |
406 | OptimizedID = INVALID_MEMORYACCESS_ID; |
407 | setOperand(1, nullptr); |
408 | } |
409 | |
410 | void print(raw_ostream &OS) const; |
411 | |
412 | unsigned getID() const { return ID; } |
413 | |
414 | private: |
415 | static void deleteMe(DerivedUser *Self); |
416 | |
417 | const unsigned ID; |
418 | unsigned OptimizedID = INVALID_MEMORYACCESS_ID; |
419 | }; |
420 | |
421 | template <> |
422 | struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {}; |
423 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)MemoryDef::op_iterator MemoryDef::op_begin() { return OperandTraits <MemoryDef>::op_begin(this); } MemoryDef::const_op_iterator MemoryDef::op_begin() const { return OperandTraits<MemoryDef >::op_begin(const_cast<MemoryDef*>(this)); } MemoryDef ::op_iterator MemoryDef::op_end() { return OperandTraits<MemoryDef >::op_end(this); } MemoryDef::const_op_iterator MemoryDef:: op_end() const { return OperandTraits<MemoryDef>::op_end (const_cast<MemoryDef*>(this)); } MemoryAccess *MemoryDef ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryDef>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 423, __extension__ __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess> ( OperandTraits<MemoryDef>::op_begin(const_cast<MemoryDef *>(this))[i_nocapture].get()); } void MemoryDef::setOperand (unsigned i_nocapture, MemoryAccess *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<MemoryDef> ::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 423, __extension__ __PRETTY_FUNCTION__)); OperandTraits<MemoryDef>::op_begin (this)[i_nocapture] = Val_nocapture; } unsigned MemoryDef::getNumOperands () const { return OperandTraits<MemoryDef>::operands(this ); } template <int Idx_nocapture> Use &MemoryDef::Op () { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &MemoryDef::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
424 | |
425 | template <> |
426 | struct OperandTraits<MemoryUseOrDef> { |
427 | static Use *op_begin(MemoryUseOrDef *MUD) { |
428 | if (auto *MU = dyn_cast<MemoryUse>(MUD)) |
429 | return OperandTraits<MemoryUse>::op_begin(MU); |
430 | return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD)); |
431 | } |
432 | |
433 | static Use *op_end(MemoryUseOrDef *MUD) { |
434 | if (auto *MU = dyn_cast<MemoryUse>(MUD)) |
435 | return OperandTraits<MemoryUse>::op_end(MU); |
436 | return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD)); |
437 | } |
438 | |
439 | static unsigned operands(const MemoryUseOrDef *MUD) { |
440 | if (const auto *MU = dyn_cast<MemoryUse>(MUD)) |
441 | return OperandTraits<MemoryUse>::operands(MU); |
442 | return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD)); |
443 | } |
444 | }; |
445 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)MemoryUseOrDef::op_iterator MemoryUseOrDef::op_begin() { return OperandTraits<MemoryUseOrDef>::op_begin(this); } MemoryUseOrDef ::const_op_iterator MemoryUseOrDef::op_begin() const { return OperandTraits<MemoryUseOrDef>::op_begin(const_cast< MemoryUseOrDef*>(this)); } MemoryUseOrDef::op_iterator MemoryUseOrDef ::op_end() { return OperandTraits<MemoryUseOrDef>::op_end (this); } MemoryUseOrDef::const_op_iterator MemoryUseOrDef::op_end () const { return OperandTraits<MemoryUseOrDef>::op_end (const_cast<MemoryUseOrDef*>(this)); } MemoryAccess *MemoryUseOrDef ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryUseOrDef>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 445, __extension__ __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess> ( OperandTraits<MemoryUseOrDef>::op_begin(const_cast< MemoryUseOrDef*>(this))[i_nocapture].get()); } void MemoryUseOrDef ::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture ) { (static_cast <bool> (i_nocapture < OperandTraits <MemoryUseOrDef>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 445, __extension__ __PRETTY_FUNCTION__)); OperandTraits<MemoryUseOrDef>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryUseOrDef ::getNumOperands() const { return OperandTraits<MemoryUseOrDef >::operands(this); } template <int Idx_nocapture> Use &MemoryUseOrDef::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & MemoryUseOrDef::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
446 | |
447 | /// Represents phi nodes for memory accesses. |
448 | /// |
449 | /// These have the same semantic as regular phi nodes, with the exception that |
450 | /// only one phi will ever exist in a given basic block. |
451 | /// Guaranteeing one phi per block means guaranteeing there is only ever one |
452 | /// valid reaching MemoryDef/MemoryPHI along each path to the phi node. |
453 | /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or |
454 | /// a MemoryPhi's operands. |
455 | /// That is, given |
456 | /// if (a) { |
457 | /// store %a |
458 | /// store %b |
459 | /// } |
460 | /// it *must* be transformed into |
461 | /// if (a) { |
462 | /// 1 = MemoryDef(liveOnEntry) |
463 | /// store %a |
464 | /// 2 = MemoryDef(1) |
465 | /// store %b |
466 | /// } |
467 | /// and *not* |
468 | /// if (a) { |
469 | /// 1 = MemoryDef(liveOnEntry) |
470 | /// store %a |
471 | /// 2 = MemoryDef(liveOnEntry) |
472 | /// store %b |
473 | /// } |
474 | /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the |
475 | /// end of the branch, and if there are not two phi nodes, one will be |
476 | /// disconnected completely from the SSA graph below that point. |
477 | /// Because MemoryUse's do not generate new definitions, they do not have this |
478 | /// issue. |
479 | class MemoryPhi final : public MemoryAccess { |
480 | // allocate space for exactly zero operands |
481 | void *operator new(size_t S) { return User::operator new(S); } |
482 | |
483 | public: |
484 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
485 | |
486 | /// Provide fast operand accessors |
487 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
488 | |
489 | MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) |
490 | : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), |
491 | ReservedSpace(NumPreds) { |
492 | allocHungoffUses(ReservedSpace); |
493 | } |
494 | |
495 | // Block iterator interface. This provides access to the list of incoming |
496 | // basic blocks, which parallels the list of incoming values. |
497 | using block_iterator = BasicBlock **; |
498 | using const_block_iterator = BasicBlock *const *; |
499 | |
500 | block_iterator block_begin() { |
501 | return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); |
502 | } |
503 | |
504 | const_block_iterator block_begin() const { |
505 | return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); |
506 | } |
507 | |
508 | block_iterator block_end() { return block_begin() + getNumOperands(); } |
509 | |
510 | const_block_iterator block_end() const { |
511 | return block_begin() + getNumOperands(); |
512 | } |
513 | |
514 | iterator_range<block_iterator> blocks() { |
515 | return make_range(block_begin(), block_end()); |
516 | } |
517 | |
518 | iterator_range<const_block_iterator> blocks() const { |
519 | return make_range(block_begin(), block_end()); |
520 | } |
521 | |
522 | op_range incoming_values() { return operands(); } |
523 | |
524 | const_op_range incoming_values() const { return operands(); } |
525 | |
526 | /// Return the number of incoming edges |
527 | unsigned getNumIncomingValues() const { return getNumOperands(); } |
528 | |
529 | /// Return incoming value number x |
530 | MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } |
531 | void setIncomingValue(unsigned I, MemoryAccess *V) { |
532 | assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!" ) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 532, __extension__ __PRETTY_FUNCTION__)); |
533 | setOperand(I, V); |
534 | } |
535 | |
536 | static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } |
537 | static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } |
538 | |
539 | /// Return incoming basic block number @p i. |
540 | BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } |
541 | |
542 | /// Return incoming basic block corresponding |
543 | /// to an operand of the PHI. |
544 | BasicBlock *getIncomingBlock(const Use &U) const { |
545 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 545, __extension__ __PRETTY_FUNCTION__)); |
546 | return getIncomingBlock(unsigned(&U - op_begin())); |
547 | } |
548 | |
549 | /// Return incoming basic block corresponding |
550 | /// to value use iterator. |
551 | BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { |
552 | return getIncomingBlock(I.getUse()); |
553 | } |
554 | |
555 | void setIncomingBlock(unsigned I, BasicBlock *BB) { |
556 | assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 556, __extension__ __PRETTY_FUNCTION__)); |
557 | block_begin()[I] = BB; |
558 | } |
559 | |
560 | /// Add an incoming value to the end of the PHI list |
561 | void addIncoming(MemoryAccess *V, BasicBlock *BB) { |
562 | if (getNumOperands() == ReservedSpace) |
563 | growOperands(); // Get more space! |
564 | // Initialize some new operands. |
565 | setNumHungOffUseOperands(getNumOperands() + 1); |
566 | setIncomingValue(getNumOperands() - 1, V); |
567 | setIncomingBlock(getNumOperands() - 1, BB); |
568 | } |
569 | |
570 | /// Return the first index of the specified basic |
571 | /// block in the value list for this PHI. Returns -1 if no instance. |
572 | int getBasicBlockIndex(const BasicBlock *BB) const { |
573 | for (unsigned I = 0, E = getNumOperands(); I != E; ++I) |
574 | if (block_begin()[I] == BB) |
575 | return I; |
576 | return -1; |
577 | } |
578 | |
579 | MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const { |
580 | int Idx = getBasicBlockIndex(BB); |
581 | assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 581, __extension__ __PRETTY_FUNCTION__)); |
582 | return getIncomingValue(Idx); |
583 | } |
584 | |
585 | // After deleting incoming position I, the order of incoming may be changed. |
586 | void unorderedDeleteIncoming(unsigned I) { |
587 | unsigned E = getNumOperands(); |
588 | assert(I < E && "Cannot remove out of bounds Phi entry.")(static_cast <bool> (I < E && "Cannot remove out of bounds Phi entry." ) ? void (0) : __assert_fail ("I < E && \"Cannot remove out of bounds Phi entry.\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 588, __extension__ __PRETTY_FUNCTION__)); |
589 | // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi |
590 | // itself should be deleted. |
591 | assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with " "at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 592, __extension__ __PRETTY_FUNCTION__)) |
592 | "at least 2 values.")(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with " "at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 592, __extension__ __PRETTY_FUNCTION__)); |
593 | setIncomingValue(I, getIncomingValue(E - 1)); |
594 | setIncomingBlock(I, block_begin()[E - 1]); |
595 | setOperand(E - 1, nullptr); |
596 | block_begin()[E - 1] = nullptr; |
597 | setNumHungOffUseOperands(getNumOperands() - 1); |
598 | } |
599 | |
600 | // After deleting entries that satisfy Pred, remaining entries may have |
601 | // changed order. |
602 | template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) { |
603 | for (unsigned I = 0, E = getNumOperands(); I != E; ++I) |
604 | if (Pred(getIncomingValue(I), getIncomingBlock(I))) { |
605 | unorderedDeleteIncoming(I); |
606 | E = getNumOperands(); |
607 | --I; |
608 | } |
609 | assert(getNumOperands() >= 1 &&(static_cast <bool> (getNumOperands() >= 1 && "Cannot remove all incoming blocks in a MemoryPhi.") ? void ( 0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 610, __extension__ __PRETTY_FUNCTION__)) |
610 | "Cannot remove all incoming blocks in a MemoryPhi.")(static_cast <bool> (getNumOperands() >= 1 && "Cannot remove all incoming blocks in a MemoryPhi.") ? void ( 0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 610, __extension__ __PRETTY_FUNCTION__)); |
611 | } |
612 | |
613 | // After deleting incoming block BB, the incoming blocks order may be changed. |
614 | void unorderedDeleteIncomingBlock(const BasicBlock *BB) { |
615 | unorderedDeleteIncomingIf( |
616 | [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; }); |
617 | } |
618 | |
619 | // After deleting incoming memory access MA, the incoming accesses order may |
620 | // be changed. |
621 | void unorderedDeleteIncomingValue(const MemoryAccess *MA) { |
622 | unorderedDeleteIncomingIf( |
623 | [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; }); |
624 | } |
625 | |
626 | static bool classof(const Value *V) { |
627 | return V->getValueID() == MemoryPhiVal; |
628 | } |
629 | |
630 | void print(raw_ostream &OS) const; |
631 | |
632 | unsigned getID() const { return ID; } |
633 | |
634 | protected: |
635 | friend class MemorySSA; |
636 | |
637 | /// this is more complicated than the generic |
638 | /// User::allocHungoffUses, because we have to allocate Uses for the incoming |
639 | /// values and pointers to the incoming blocks, all in one allocation. |
640 | void allocHungoffUses(unsigned N) { |
641 | User::allocHungoffUses(N, /* IsPhi */ true); |
642 | } |
643 | |
644 | private: |
645 | // For debugging only |
646 | const unsigned ID; |
647 | unsigned ReservedSpace; |
648 | |
649 | /// This grows the operand list in response to a push_back style of |
650 | /// operation. This grows the number of ops by 1.5 times. |
651 | void growOperands() { |
652 | unsigned E = getNumOperands(); |
653 | // 2 op PHI nodes are VERY common, so reserve at least enough for that. |
654 | ReservedSpace = std::max(E + E / 2, 2u); |
655 | growHungoffUses(ReservedSpace, /* IsPhi */ true); |
656 | } |
657 | |
658 | static void deleteMe(DerivedUser *Self); |
659 | }; |
660 | |
661 | inline unsigned MemoryAccess::getID() const { |
662 | assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&(static_cast <bool> ((isa<MemoryDef>(this) || isa <MemoryPhi>(this)) && "only memory defs and phis have ids" ) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 663, __extension__ __PRETTY_FUNCTION__)) |
663 | "only memory defs and phis have ids")(static_cast <bool> ((isa<MemoryDef>(this) || isa <MemoryPhi>(this)) && "only memory defs and phis have ids" ) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 663, __extension__ __PRETTY_FUNCTION__)); |
664 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
665 | return MD->getID(); |
666 | return cast<MemoryPhi>(this)->getID(); |
667 | } |
668 | |
669 | inline bool MemoryUseOrDef::isOptimized() const { |
670 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
671 | return MD->isOptimized(); |
672 | return cast<MemoryUse>(this)->isOptimized(); |
673 | } |
674 | |
675 | inline MemoryAccess *MemoryUseOrDef::getOptimized() const { |
676 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
677 | return MD->getOptimized(); |
678 | return cast<MemoryUse>(this)->getOptimized(); |
679 | } |
680 | |
681 | inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { |
682 | if (auto *MD = dyn_cast<MemoryDef>(this)) |
683 | MD->setOptimized(MA); |
684 | else |
685 | cast<MemoryUse>(this)->setOptimized(MA); |
686 | } |
687 | |
688 | inline void MemoryUseOrDef::resetOptimized() { |
689 | if (auto *MD = dyn_cast<MemoryDef>(this)) |
690 | MD->resetOptimized(); |
691 | else |
692 | cast<MemoryUse>(this)->resetOptimized(); |
693 | } |
694 | |
695 | template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; |
696 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)MemoryPhi::op_iterator MemoryPhi::op_begin() { return OperandTraits <MemoryPhi>::op_begin(this); } MemoryPhi::const_op_iterator MemoryPhi::op_begin() const { return OperandTraits<MemoryPhi >::op_begin(const_cast<MemoryPhi*>(this)); } MemoryPhi ::op_iterator MemoryPhi::op_end() { return OperandTraits<MemoryPhi >::op_end(this); } MemoryPhi::const_op_iterator MemoryPhi:: op_end() const { return OperandTraits<MemoryPhi>::op_end (const_cast<MemoryPhi*>(this)); } MemoryAccess *MemoryPhi ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryPhi>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 696, __extension__ __PRETTY_FUNCTION__)); return cast_or_null<MemoryAccess> ( OperandTraits<MemoryPhi>::op_begin(const_cast<MemoryPhi *>(this))[i_nocapture].get()); } void MemoryPhi::setOperand (unsigned i_nocapture, MemoryAccess *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<MemoryPhi> ::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 696, __extension__ __PRETTY_FUNCTION__)); OperandTraits<MemoryPhi>::op_begin (this)[i_nocapture] = Val_nocapture; } unsigned MemoryPhi::getNumOperands () const { return OperandTraits<MemoryPhi>::operands(this ); } template <int Idx_nocapture> Use &MemoryPhi::Op () { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &MemoryPhi::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
697 | |
698 | /// Encapsulates MemorySSA, including all data associated with memory |
699 | /// accesses. |
700 | class MemorySSA { |
701 | public: |
702 | MemorySSA(Function &, AliasAnalysis *, DominatorTree *); |
703 | |
704 | // MemorySSA must remain where it's constructed; Walkers it creates store |
705 | // pointers to it. |
706 | MemorySSA(MemorySSA &&) = delete; |
707 | |
708 | ~MemorySSA(); |
709 | |
710 | MemorySSAWalker *getWalker(); |
711 | MemorySSAWalker *getSkipSelfWalker(); |
712 | |
713 | /// Given a memory Mod/Ref'ing instruction, get the MemorySSA |
714 | /// access associated with it. If passed a basic block gets the memory phi |
715 | /// node that exists for that block, if there is one. Otherwise, this will get |
716 | /// a MemoryUseOrDef. |
717 | MemoryUseOrDef *getMemoryAccess(const Instruction *I) const { |
718 | return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); |
719 | } |
720 | |
721 | MemoryPhi *getMemoryAccess(const BasicBlock *BB) const { |
722 | return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); |
723 | } |
724 | |
725 | DominatorTree &getDomTree() const { return *DT; } |
726 | |
727 | void dump() const; |
728 | void print(raw_ostream &) const; |
729 | |
730 | /// Return true if \p MA represents the live on entry value |
731 | /// |
732 | /// Loads and stores from pointer arguments and other global values may be |
733 | /// defined by memory operations that do not occur in the current function, so |
734 | /// they may be live on entry to the function. MemorySSA represents such |
735 | /// memory state by the live on entry definition, which is guaranteed to occur |
736 | /// before any other memory access in the function. |
737 | inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { |
738 | return MA == LiveOnEntryDef.get(); |
739 | } |
740 | |
741 | inline MemoryAccess *getLiveOnEntryDef() const { |
742 | return LiveOnEntryDef.get(); |
743 | } |
744 | |
745 | // Sadly, iplists, by default, owns and deletes pointers added to the |
746 | // list. It's not currently possible to have two iplists for the same type, |
747 | // where one owns the pointers, and one does not. This is because the traits |
748 | // are per-type, not per-tag. If this ever changes, we should make the |
749 | // DefList an iplist. |
750 | using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
751 | using DefsList = |
752 | simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
753 | |
754 | /// Return the list of MemoryAccess's for a given basic block. |
755 | /// |
756 | /// This list is not modifiable by the user. |
757 | const AccessList *getBlockAccesses(const BasicBlock *BB) const { |
758 | return getWritableBlockAccesses(BB); |
759 | } |
760 | |
761 | /// Return the list of MemoryDef's and MemoryPhi's for a given basic |
762 | /// block. |
763 | /// |
764 | /// This list is not modifiable by the user. |
765 | const DefsList *getBlockDefs(const BasicBlock *BB) const { |
766 | return getWritableBlockDefs(BB); |
767 | } |
768 | |
769 | /// Given two memory accesses in the same basic block, determine |
770 | /// whether MemoryAccess \p A dominates MemoryAccess \p B. |
771 | bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; |
772 | |
773 | /// Given two memory accesses in potentially different blocks, |
774 | /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. |
775 | bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; |
776 | |
777 | /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A |
778 | /// dominates Use \p B. |
779 | bool dominates(const MemoryAccess *A, const Use &B) const; |
780 | |
781 | enum class VerificationLevel { Fast, Full }; |
782 | /// Verify that MemorySSA is self consistent (IE definitions dominate |
783 | /// all uses, uses appear in the right places). This is used by unit tests. |
784 | void verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const; |
785 | |
786 | /// Used in various insertion functions to specify whether we are talking |
787 | /// about the beginning or end of a block. |
788 | enum InsertionPlace { Beginning, End, BeforeTerminator }; |
789 | |
790 | /// By default, uses are *not* optimized during MemorySSA construction. |
791 | /// Calling this method will attempt to optimize all MemoryUses, if this has |
792 | /// not happened yet for this MemorySSA instance. This should be done if you |
793 | /// plan to query the clobbering access for most uses, or if you walk the |
794 | /// def-use chain of uses. |
795 | void ensureOptimizedUses(); |
796 | |
797 | AliasAnalysis &getAA() { return *AA; } |
798 | |
799 | protected: |
800 | // Used by Memory SSA dumpers and wrapper pass |
801 | friend class MemorySSAPrinterLegacyPass; |
802 | friend class MemorySSAUpdater; |
803 | |
804 | void verifyOrderingDominationAndDefUses( |
805 | Function &F, VerificationLevel = VerificationLevel::Fast) const; |
806 | void verifyDominationNumbers(const Function &F) const; |
807 | void verifyPrevDefInPhis(Function &F) const; |
808 | |
809 | // This is used by the use optimizer and updater. |
810 | AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { |
811 | auto It = PerBlockAccesses.find(BB); |
812 | return It == PerBlockAccesses.end() ? nullptr : It->second.get(); |
813 | } |
814 | |
815 | // This is used by the use optimizer and updater. |
816 | DefsList *getWritableBlockDefs(const BasicBlock *BB) const { |
817 | auto It = PerBlockDefs.find(BB); |
818 | return It == PerBlockDefs.end() ? nullptr : It->second.get(); |
819 | } |
820 | |
821 | // These is used by the updater to perform various internal MemorySSA |
822 | // machinsations. They do not always leave the IR in a correct state, and |
823 | // relies on the updater to fixup what it breaks, so it is not public. |
824 | |
825 | void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); |
826 | void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point); |
827 | |
828 | // Rename the dominator tree branch rooted at BB. |
829 | void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, |
830 | SmallPtrSetImpl<BasicBlock *> &Visited) { |
831 | renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); |
832 | } |
833 | |
834 | void removeFromLookups(MemoryAccess *); |
835 | void removeFromLists(MemoryAccess *, bool ShouldDelete = true); |
836 | void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, |
837 | InsertionPlace); |
838 | void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, |
839 | AccessList::iterator); |
840 | MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *, |
841 | const MemoryUseOrDef *Template = nullptr, |
842 | bool CreationMustSucceed = true); |
843 | |
844 | private: |
845 | class ClobberWalkerBase; |
846 | class CachingWalker; |
847 | class SkipSelfWalker; |
848 | class OptimizeUses; |
849 | |
850 | CachingWalker *getWalkerImpl(); |
851 | void buildMemorySSA(BatchAAResults &BAA); |
852 | |
853 | void prepareForMoveTo(MemoryAccess *, BasicBlock *); |
854 | void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; |
855 | |
856 | using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; |
857 | using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; |
858 | |
859 | void markUnreachableAsLiveOnEntry(BasicBlock *BB); |
860 | MemoryPhi *createMemoryPhi(BasicBlock *BB); |
861 | template <typename AliasAnalysisType> |
862 | MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *, |
863 | const MemoryUseOrDef *Template = nullptr); |
864 | void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &); |
865 | MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); |
866 | void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); |
867 | void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, |
868 | SmallPtrSetImpl<BasicBlock *> &Visited, |
869 | bool SkipVisited = false, bool RenameAllUses = false); |
870 | AccessList *getOrCreateAccessList(const BasicBlock *); |
871 | DefsList *getOrCreateDefsList(const BasicBlock *); |
872 | void renumberBlock(const BasicBlock *) const; |
873 | AliasAnalysis *AA = nullptr; |
874 | DominatorTree *DT; |
875 | Function &F; |
876 | |
877 | // Memory SSA mappings |
878 | DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; |
879 | |
880 | // These two mappings contain the main block to access/def mappings for |
881 | // MemorySSA. The list contained in PerBlockAccesses really owns all the |
882 | // MemoryAccesses. |
883 | // Both maps maintain the invariant that if a block is found in them, the |
884 | // corresponding list is not empty, and if a block is not found in them, the |
885 | // corresponding list is empty. |
886 | AccessMap PerBlockAccesses; |
887 | DefsMap PerBlockDefs; |
888 | std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef; |
889 | |
890 | // Domination mappings |
891 | // Note that the numbering is local to a block, even though the map is |
892 | // global. |
893 | mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; |
894 | mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; |
895 | |
896 | // Memory SSA building info |
897 | std::unique_ptr<ClobberWalkerBase> WalkerBase; |
898 | std::unique_ptr<CachingWalker> Walker; |
899 | std::unique_ptr<SkipSelfWalker> SkipWalker; |
900 | unsigned NextID = 0; |
901 | bool IsOptimized = false; |
902 | }; |
903 | |
904 | /// Enables verification of MemorySSA. |
905 | /// |
906 | /// The checks which this flag enables is exensive and disabled by default |
907 | /// unless `EXPENSIVE_CHECKS` is defined. The flag `-verify-memoryssa` can be |
908 | /// used to selectively enable the verification without re-compilation. |
909 | extern bool VerifyMemorySSA; |
910 | |
911 | // Internal MemorySSA utils, for use by MemorySSA classes and walkers |
912 | class MemorySSAUtil { |
913 | protected: |
914 | friend class GVNHoist; |
915 | friend class MemorySSAWalker; |
916 | |
917 | // This function should not be used by new passes. |
918 | static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, |
919 | AliasAnalysis &AA); |
920 | }; |
921 | |
922 | // This pass does eager building and then printing of MemorySSA. It is used by |
923 | // the tests to be able to build, dump, and verify Memory SSA. |
924 | class MemorySSAPrinterLegacyPass : public FunctionPass { |
925 | public: |
926 | MemorySSAPrinterLegacyPass(); |
927 | |
928 | bool runOnFunction(Function &) override; |
929 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
930 | |
931 | static char ID; |
932 | }; |
933 | |
934 | /// An analysis that produces \c MemorySSA for a function. |
935 | /// |
936 | class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { |
937 | friend AnalysisInfoMixin<MemorySSAAnalysis>; |
938 | |
939 | static AnalysisKey Key; |
940 | |
941 | public: |
942 | // Wrap MemorySSA result to ensure address stability of internal MemorySSA |
943 | // pointers after construction. Use a wrapper class instead of plain |
944 | // unique_ptr<MemorySSA> to avoid build breakage on MSVC. |
945 | struct Result { |
946 | Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} |
947 | |
948 | MemorySSA &getMSSA() { return *MSSA.get(); } |
949 | |
950 | std::unique_ptr<MemorySSA> MSSA; |
951 | |
952 | bool invalidate(Function &F, const PreservedAnalyses &PA, |
953 | FunctionAnalysisManager::Invalidator &Inv); |
954 | }; |
955 | |
956 | Result run(Function &F, FunctionAnalysisManager &AM); |
957 | }; |
958 | |
959 | /// Printer pass for \c MemorySSA. |
960 | class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { |
961 | raw_ostream &OS; |
962 | |
963 | public: |
964 | explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {} |
965 | |
966 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
967 | }; |
968 | |
969 | /// Printer pass for \c MemorySSA via the walker. |
970 | class MemorySSAWalkerPrinterPass |
971 | : public PassInfoMixin<MemorySSAWalkerPrinterPass> { |
972 | raw_ostream &OS; |
973 | |
974 | public: |
975 | explicit MemorySSAWalkerPrinterPass(raw_ostream &OS) : OS(OS) {} |
976 | |
977 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
978 | }; |
979 | |
980 | /// Verifier pass for \c MemorySSA. |
981 | struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { |
982 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
983 | }; |
984 | |
985 | /// Legacy analysis pass which computes \c MemorySSA. |
986 | class MemorySSAWrapperPass : public FunctionPass { |
987 | public: |
988 | MemorySSAWrapperPass(); |
989 | |
990 | static char ID; |
991 | |
992 | bool runOnFunction(Function &) override; |
993 | void releaseMemory() override; |
994 | MemorySSA &getMSSA() { return *MSSA; } |
995 | const MemorySSA &getMSSA() const { return *MSSA; } |
996 | |
997 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
998 | |
999 | void verifyAnalysis() const override; |
1000 | void print(raw_ostream &OS, const Module *M = nullptr) const override; |
1001 | |
1002 | private: |
1003 | std::unique_ptr<MemorySSA> MSSA; |
1004 | }; |
1005 | |
1006 | /// This is the generic walker interface for walkers of MemorySSA. |
1007 | /// Walkers are used to be able to further disambiguate the def-use chains |
1008 | /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives |
1009 | /// you. |
1010 | /// In particular, while the def-use chains provide basic information, and are |
1011 | /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a |
1012 | /// MemoryUse as AliasAnalysis considers it, a user mant want better or other |
1013 | /// information. In particular, they may want to use SCEV info to further |
1014 | /// disambiguate memory accesses, or they may want the nearest dominating |
1015 | /// may-aliasing MemoryDef for a call or a store. This API enables a |
1016 | /// standardized interface to getting and using that info. |
1017 | class MemorySSAWalker { |
1018 | public: |
1019 | MemorySSAWalker(MemorySSA *); |
1020 | virtual ~MemorySSAWalker() = default; |
1021 | |
1022 | using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; |
1023 | |
1024 | /// Given a memory Mod/Ref/ModRef'ing instruction, calling this |
1025 | /// will give you the nearest dominating MemoryAccess that Mod's the location |
1026 | /// the instruction accesses (by skipping any def which AA can prove does not |
1027 | /// alias the location(s) accessed by the instruction given). |
1028 | /// |
1029 | /// Note that this will return a single access, and it must dominate the |
1030 | /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, |
1031 | /// this will return the MemoryPhi, not the operand. This means that |
1032 | /// given: |
1033 | /// if (a) { |
1034 | /// 1 = MemoryDef(liveOnEntry) |
1035 | /// store %a |
1036 | /// } else { |
1037 | /// 2 = MemoryDef(liveOnEntry) |
1038 | /// store %b |
1039 | /// } |
1040 | /// 3 = MemoryPhi(2, 1) |
1041 | /// MemoryUse(3) |
1042 | /// load %a |
1043 | /// |
1044 | /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef |
1045 | /// in the if (a) branch. |
1046 | MemoryAccess *getClobberingMemoryAccess(const Instruction *I, |
1047 | BatchAAResults &AA) { |
1048 | MemoryAccess *MA = MSSA->getMemoryAccess(I); |
1049 | assert(MA && "Handed an instruction that MemorySSA doesn't recognize?")(static_cast <bool> (MA && "Handed an instruction that MemorySSA doesn't recognize?" ) ? void (0) : __assert_fail ("MA && \"Handed an instruction that MemorySSA doesn't recognize?\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1049, __extension__ __PRETTY_FUNCTION__)); |
1050 | return getClobberingMemoryAccess(MA, AA); |
1051 | } |
1052 | |
1053 | /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), |
1054 | /// but takes a MemoryAccess instead of an Instruction. |
1055 | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1056 | BatchAAResults &AA) = 0; |
1057 | |
1058 | /// Given a potentially clobbering memory access and a new location, |
1059 | /// calling this will give you the nearest dominating clobbering MemoryAccess |
1060 | /// (by skipping non-aliasing def links). |
1061 | /// |
1062 | /// This version of the function is mainly used to disambiguate phi translated |
1063 | /// pointers, where the value of a pointer may have changed from the initial |
1064 | /// memory access. Note that this expects to be handed either a MemoryUse, |
1065 | /// or an already potentially clobbering access. Unlike the above API, if |
1066 | /// given a MemoryDef that clobbers the pointer as the starting access, it |
1067 | /// will return that MemoryDef, whereas the above would return the clobber |
1068 | /// starting from the use side of the memory def. |
1069 | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1070 | const MemoryLocation &, |
1071 | BatchAAResults &AA) = 0; |
1072 | |
1073 | MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { |
1074 | BatchAAResults BAA(MSSA->getAA()); |
1075 | return getClobberingMemoryAccess(I, BAA); |
1076 | } |
1077 | |
1078 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) { |
1079 | BatchAAResults BAA(MSSA->getAA()); |
1080 | return getClobberingMemoryAccess(MA, BAA); |
1081 | } |
1082 | |
1083 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, |
1084 | const MemoryLocation &Loc) { |
1085 | BatchAAResults BAA(MSSA->getAA()); |
1086 | return getClobberingMemoryAccess(MA, Loc, BAA); |
1087 | } |
1088 | |
1089 | /// Given a memory access, invalidate anything this walker knows about |
1090 | /// that access. |
1091 | /// This API is used by walkers that store information to perform basic cache |
1092 | /// invalidation. This will be called by MemorySSA at appropriate times for |
1093 | /// the walker it uses or returns. |
1094 | virtual void invalidateInfo(MemoryAccess *) {} |
1095 | |
1096 | protected: |
1097 | friend class MemorySSA; // For updating MSSA pointer in MemorySSA move |
1098 | // constructor. |
1099 | MemorySSA *MSSA; |
1100 | }; |
1101 | |
1102 | /// A MemorySSAWalker that does no alias queries, or anything else. It |
1103 | /// simply returns the links as they were constructed by the builder. |
1104 | class DoNothingMemorySSAWalker final : public MemorySSAWalker { |
1105 | public: |
1106 | // Keep the overrides below from hiding the Instruction overload of |
1107 | // getClobberingMemoryAccess. |
1108 | using MemorySSAWalker::getClobberingMemoryAccess; |
1109 | |
1110 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1111 | BatchAAResults &) override; |
1112 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1113 | const MemoryLocation &, |
1114 | BatchAAResults &) override; |
1115 | }; |
1116 | |
1117 | using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; |
1118 | using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; |
1119 | |
1120 | /// Iterator base class used to implement const and non-const iterators |
1121 | /// over the defining accesses of a MemoryAccess. |
1122 | template <class T> |
1123 | class memoryaccess_def_iterator_base |
1124 | : public iterator_facade_base<memoryaccess_def_iterator_base<T>, |
1125 | std::forward_iterator_tag, T, ptrdiff_t, T *, |
1126 | T *> { |
1127 | using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; |
1128 | |
1129 | public: |
1130 | memoryaccess_def_iterator_base(T *Start) : Access(Start) {} |
1131 | memoryaccess_def_iterator_base() = default; |
1132 | |
1133 | bool operator==(const memoryaccess_def_iterator_base &Other) const { |
1134 | return Access == Other.Access && (!Access || ArgNo == Other.ArgNo); |
1135 | } |
1136 | |
1137 | // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the |
1138 | // block from the operand in constant time (In a PHINode, the uselist has |
1139 | // both, so it's just subtraction). We provide it as part of the |
1140 | // iterator to avoid callers having to linear walk to get the block. |
1141 | // If the operation becomes constant time on MemoryPHI's, this bit of |
1142 | // abstraction breaking should be removed. |
1143 | BasicBlock *getPhiArgBlock() const { |
1144 | MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); |
1145 | assert(MP && "Tried to get phi arg block when not iterating over a PHI")(static_cast <bool> (MP && "Tried to get phi arg block when not iterating over a PHI" ) ? void (0) : __assert_fail ("MP && \"Tried to get phi arg block when not iterating over a PHI\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1145, __extension__ __PRETTY_FUNCTION__)); |
1146 | return MP->getIncomingBlock(ArgNo); |
1147 | } |
1148 | |
1149 | typename std::iterator_traits<BaseT>::pointer operator*() const { |
1150 | assert(Access && "Tried to access past the end of our iterator")(static_cast <bool> (Access && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("Access && \"Tried to access past the end of our iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1150, __extension__ __PRETTY_FUNCTION__)); |
1151 | // Go to the first argument for phis, and the defining access for everything |
1152 | // else. |
1153 | if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) |
1154 | return MP->getIncomingValue(ArgNo); |
1155 | return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); |
1156 | } |
1157 | |
1158 | using BaseT::operator++; |
1159 | memoryaccess_def_iterator_base &operator++() { |
1160 | assert(Access && "Hit end of iterator")(static_cast <bool> (Access && "Hit end of iterator" ) ? void (0) : __assert_fail ("Access && \"Hit end of iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1160, __extension__ __PRETTY_FUNCTION__)); |
1161 | if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) { |
1162 | if (++ArgNo >= MP->getNumIncomingValues()) { |
1163 | ArgNo = 0; |
1164 | Access = nullptr; |
1165 | } |
1166 | } else { |
1167 | Access = nullptr; |
1168 | } |
1169 | return *this; |
1170 | } |
1171 | |
1172 | private: |
1173 | T *Access = nullptr; |
1174 | unsigned ArgNo = 0; |
1175 | }; |
1176 | |
1177 | inline memoryaccess_def_iterator MemoryAccess::defs_begin() { |
1178 | return memoryaccess_def_iterator(this); |
1179 | } |
1180 | |
1181 | inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { |
1182 | return const_memoryaccess_def_iterator(this); |
1183 | } |
1184 | |
1185 | inline memoryaccess_def_iterator MemoryAccess::defs_end() { |
1186 | return memoryaccess_def_iterator(); |
1187 | } |
1188 | |
1189 | inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { |
1190 | return const_memoryaccess_def_iterator(); |
1191 | } |
1192 | |
1193 | /// GraphTraits for a MemoryAccess, which walks defs in the normal case, |
1194 | /// and uses in the inverse case. |
1195 | template <> struct GraphTraits<MemoryAccess *> { |
1196 | using NodeRef = MemoryAccess *; |
1197 | using ChildIteratorType = memoryaccess_def_iterator; |
1198 | |
1199 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1200 | static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } |
1201 | static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } |
1202 | }; |
1203 | |
1204 | template <> struct GraphTraits<Inverse<MemoryAccess *>> { |
1205 | using NodeRef = MemoryAccess *; |
1206 | using ChildIteratorType = MemoryAccess::iterator; |
1207 | |
1208 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1209 | static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } |
1210 | static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } |
1211 | }; |
1212 | |
1213 | /// Provide an iterator that walks defs, giving both the memory access, |
1214 | /// and the current pointer location, updating the pointer location as it |
1215 | /// changes due to phi node translation. |
1216 | /// |
1217 | /// This iterator, while somewhat specialized, is what most clients actually |
1218 | /// want when walking upwards through MemorySSA def chains. It takes a pair of |
1219 | /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the |
1220 | /// memory location through phi nodes for the user. |
1221 | class upward_defs_iterator |
1222 | : public iterator_facade_base<upward_defs_iterator, |
1223 | std::forward_iterator_tag, |
1224 | const MemoryAccessPair> { |
1225 | using BaseT = upward_defs_iterator::iterator_facade_base; |
1226 | |
1227 | public: |
1228 | upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT) |
1229 | : DefIterator(Info.first), Location(Info.second), |
1230 | OriginalAccess(Info.first), DT(DT) { |
1231 | CurrentPair.first = nullptr; |
1232 | |
1233 | WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); |
1234 | fillInCurrentPair(); |
1235 | } |
1236 | |
1237 | upward_defs_iterator() { CurrentPair.first = nullptr; } |
1238 | |
1239 | bool operator==(const upward_defs_iterator &Other) const { |
1240 | return DefIterator == Other.DefIterator; |
1241 | } |
1242 | |
1243 | typename std::iterator_traits<BaseT>::reference operator*() const { |
1244 | assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1245, __extension__ __PRETTY_FUNCTION__)) |
1245 | "Tried to access past the end of our iterator")(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1245, __extension__ __PRETTY_FUNCTION__)); |
1246 | return CurrentPair; |
1247 | } |
1248 | |
1249 | using BaseT::operator++; |
1250 | upward_defs_iterator &operator++() { |
1251 | assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of the iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1252, __extension__ __PRETTY_FUNCTION__)) |
1252 | "Tried to access past the end of the iterator")(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of the iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1252, __extension__ __PRETTY_FUNCTION__)); |
1253 | ++DefIterator; |
1254 | if (DefIterator != OriginalAccess->defs_end()) |
1255 | fillInCurrentPair(); |
1256 | return *this; |
1257 | } |
1258 | |
1259 | BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } |
1260 | |
1261 | private: |
1262 | /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible |
1263 | /// loop. In particular, this guarantees that it only references a single |
1264 | /// MemoryLocation during execution of the containing function. |
1265 | bool IsGuaranteedLoopInvariant(const Value *Ptr) const; |
1266 | |
1267 | void fillInCurrentPair() { |
1268 | CurrentPair.first = *DefIterator; |
1269 | CurrentPair.second = Location; |
1270 | if (WalkingPhi && Location.Ptr) { |
1271 | PHITransAddr Translator( |
1272 | const_cast<Value *>(Location.Ptr), |
1273 | OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); |
1274 | |
1275 | if (Value *Addr = |
1276 | Translator.translateValue(OriginalAccess->getBlock(), |
1277 | DefIterator.getPhiArgBlock(), DT, true)) |
1278 | if (Addr != CurrentPair.second.Ptr) |
1279 | CurrentPair.second = CurrentPair.second.getWithNewPtr(Addr); |
1280 | |
1281 | // Mark size as unknown, if the location is not guaranteed to be |
1282 | // loop-invariant for any possible loop in the function. Setting the size |
1283 | // to unknown guarantees that any memory accesses that access locations |
1284 | // after the pointer are considered as clobbers, which is important to |
1285 | // catch loop carried dependences. |
1286 | if (!IsGuaranteedLoopInvariant(CurrentPair.second.Ptr)) |
1287 | CurrentPair.second = CurrentPair.second.getWithNewSize( |
1288 | LocationSize::beforeOrAfterPointer()); |
1289 | } |
1290 | } |
1291 | |
1292 | MemoryAccessPair CurrentPair; |
1293 | memoryaccess_def_iterator DefIterator; |
1294 | MemoryLocation Location; |
1295 | MemoryAccess *OriginalAccess = nullptr; |
1296 | DominatorTree *DT = nullptr; |
1297 | bool WalkingPhi = false; |
1298 | }; |
1299 | |
1300 | inline upward_defs_iterator |
1301 | upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT) { |
1302 | return upward_defs_iterator(Pair, &DT); |
1303 | } |
1304 | |
1305 | inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } |
1306 | |
1307 | inline iterator_range<upward_defs_iterator> |
1308 | upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) { |
1309 | return make_range(upward_defs_begin(Pair, DT), upward_defs_end()); |
1310 | } |
1311 | |
1312 | /// Walks the defining accesses of MemoryDefs. Stops after we hit something that |
1313 | /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when |
1314 | /// comparing against a null def_chain_iterator, this will compare equal only |
1315 | /// after walking said Phi/liveOnEntry. |
1316 | /// |
1317 | /// The UseOptimizedChain flag specifies whether to walk the clobbering |
1318 | /// access chain, or all the accesses. |
1319 | /// |
1320 | /// Normally, MemoryDef are all just def/use linked together, so a def_chain on |
1321 | /// a MemoryDef will walk all MemoryDefs above it in the program until it hits |
1322 | /// a phi node. The optimized chain walks the clobbering access of a store. |
1323 | /// So if you are just trying to find, given a store, what the next |
1324 | /// thing that would clobber the same memory is, you want the optimized chain. |
1325 | template <class T, bool UseOptimizedChain = false> |
1326 | struct def_chain_iterator |
1327 | : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, |
1328 | std::forward_iterator_tag, MemoryAccess *> { |
1329 | def_chain_iterator() : MA(nullptr) {} |
1330 | def_chain_iterator(T MA) : MA(MA) {} |
1331 | |
1332 | T operator*() const { return MA; } |
1333 | |
1334 | def_chain_iterator &operator++() { |
1335 | // N.B. liveOnEntry has a null defining access. |
1336 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { |
1337 | if (UseOptimizedChain && MUD->isOptimized()) |
1338 | MA = MUD->getOptimized(); |
1339 | else |
1340 | MA = MUD->getDefiningAccess(); |
1341 | } else { |
1342 | MA = nullptr; |
1343 | } |
1344 | |
1345 | return *this; |
1346 | } |
1347 | |
1348 | bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } |
1349 | |
1350 | private: |
1351 | T MA; |
1352 | }; |
1353 | |
1354 | template <class T> |
1355 | inline iterator_range<def_chain_iterator<T>> |
1356 | def_chain(T MA, MemoryAccess *UpTo = nullptr) { |
1357 | #ifdef EXPENSIVE_CHECKS |
1358 | assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo ) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!" ) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1359, __extension__ __PRETTY_FUNCTION__)) |
1359 | "UpTo isn't in the def chain!")(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo ) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!" ) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\"" , "llvm/include/llvm/Analysis/MemorySSA.h", 1359, __extension__ __PRETTY_FUNCTION__)); |
1360 | #endif |
1361 | return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); |
1362 | } |
1363 | |
1364 | template <class T> |
1365 | inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { |
1366 | return make_range(def_chain_iterator<T, true>(MA), |
1367 | def_chain_iterator<T, true>(nullptr)); |
1368 | } |
1369 | |
1370 | } // end namespace llvm |
1371 | |
1372 | #endif // LLVM_ANALYSIS_MEMORYSSA_H |