File: | lib/Analysis/MemorySSA.cpp |
Warning: | line 1725, column 5 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// | |||||
2 | // | |||||
3 | // The LLVM Compiler Infrastructure | |||||
4 | // | |||||
5 | // This file is distributed under the University of Illinois Open Source | |||||
6 | // License. See LICENSE.TXT for details. | |||||
7 | // | |||||
8 | //===----------------------------------------------------------------------===// | |||||
9 | // | |||||
10 | // This file implements the MemorySSA class. | |||||
11 | // | |||||
12 | //===----------------------------------------------------------------------===// | |||||
13 | ||||||
14 | #include "llvm/Analysis/MemorySSA.h" | |||||
15 | #include "llvm/ADT/DenseMap.h" | |||||
16 | #include "llvm/ADT/DenseMapInfo.h" | |||||
17 | #include "llvm/ADT/DenseSet.h" | |||||
18 | #include "llvm/ADT/DepthFirstIterator.h" | |||||
19 | #include "llvm/ADT/Hashing.h" | |||||
20 | #include "llvm/ADT/None.h" | |||||
21 | #include "llvm/ADT/Optional.h" | |||||
22 | #include "llvm/ADT/STLExtras.h" | |||||
23 | #include "llvm/ADT/SmallPtrSet.h" | |||||
24 | #include "llvm/ADT/SmallVector.h" | |||||
25 | #include "llvm/ADT/iterator.h" | |||||
26 | #include "llvm/ADT/iterator_range.h" | |||||
27 | #include "llvm/Analysis/AliasAnalysis.h" | |||||
28 | #include "llvm/Analysis/IteratedDominanceFrontier.h" | |||||
29 | #include "llvm/Analysis/MemoryLocation.h" | |||||
30 | #include "llvm/Config/llvm-config.h" | |||||
31 | #include "llvm/IR/AssemblyAnnotationWriter.h" | |||||
32 | #include "llvm/IR/BasicBlock.h" | |||||
33 | #include "llvm/IR/CallSite.h" | |||||
34 | #include "llvm/IR/Dominators.h" | |||||
35 | #include "llvm/IR/Function.h" | |||||
36 | #include "llvm/IR/Instruction.h" | |||||
37 | #include "llvm/IR/Instructions.h" | |||||
38 | #include "llvm/IR/IntrinsicInst.h" | |||||
39 | #include "llvm/IR/Intrinsics.h" | |||||
40 | #include "llvm/IR/LLVMContext.h" | |||||
41 | #include "llvm/IR/PassManager.h" | |||||
42 | #include "llvm/IR/Use.h" | |||||
43 | #include "llvm/Pass.h" | |||||
44 | #include "llvm/Support/AtomicOrdering.h" | |||||
45 | #include "llvm/Support/Casting.h" | |||||
46 | #include "llvm/Support/CommandLine.h" | |||||
47 | #include "llvm/Support/Compiler.h" | |||||
48 | #include "llvm/Support/Debug.h" | |||||
49 | #include "llvm/Support/ErrorHandling.h" | |||||
50 | #include "llvm/Support/FormattedStream.h" | |||||
51 | #include "llvm/Support/raw_ostream.h" | |||||
52 | #include <algorithm> | |||||
53 | #include <cassert> | |||||
54 | #include <iterator> | |||||
55 | #include <memory> | |||||
56 | #include <utility> | |||||
57 | ||||||
58 | using namespace llvm; | |||||
59 | ||||||
60 | #define DEBUG_TYPE"memoryssa" "memoryssa" | |||||
61 | ||||||
62 | INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { | |||||
63 | true)static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { | |||||
64 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | |||||
65 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | |||||
66 | INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } | |||||
67 | true)PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } | |||||
68 | ||||||
69 | INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { | |||||
70 | "Memory SSA Printer", false, false)static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { | |||||
71 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | |||||
72 | INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } | |||||
73 | "Memory SSA Printer", false, false)PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } | |||||
74 | ||||||
75 | static cl::opt<unsigned> MaxCheckLimit( | |||||
76 | "memssa-check-limit", cl::Hidden, cl::init(100), | |||||
77 | cl::desc("The maximum number of stores/phis MemorySSA" | |||||
78 | "will consider trying to walk past (default = 100)")); | |||||
79 | ||||||
80 | static cl::opt<bool> | |||||
81 | VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden, | |||||
82 | cl::desc("Verify MemorySSA in legacy printer pass.")); | |||||
83 | ||||||
84 | namespace llvm { | |||||
85 | ||||||
86 | /// An assembly annotator class to print Memory SSA information in | |||||
87 | /// comments. | |||||
88 | class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { | |||||
89 | friend class MemorySSA; | |||||
90 | ||||||
91 | const MemorySSA *MSSA; | |||||
92 | ||||||
93 | public: | |||||
94 | MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} | |||||
95 | ||||||
96 | void emitBasicBlockStartAnnot(const BasicBlock *BB, | |||||
97 | formatted_raw_ostream &OS) override { | |||||
98 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) | |||||
99 | OS << "; " << *MA << "\n"; | |||||
100 | } | |||||
101 | ||||||
102 | void emitInstructionAnnot(const Instruction *I, | |||||
103 | formatted_raw_ostream &OS) override { | |||||
104 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) | |||||
105 | OS << "; " << *MA << "\n"; | |||||
106 | } | |||||
107 | }; | |||||
108 | ||||||
109 | } // end namespace llvm | |||||
110 | ||||||
111 | namespace { | |||||
112 | ||||||
113 | /// Our current alias analysis API differentiates heavily between calls and | |||||
114 | /// non-calls, and functions called on one usually assert on the other. | |||||
115 | /// This class encapsulates the distinction to simplify other code that wants | |||||
116 | /// "Memory affecting instructions and related data" to use as a key. | |||||
117 | /// For example, this class is used as a densemap key in the use optimizer. | |||||
118 | class MemoryLocOrCall { | |||||
119 | public: | |||||
120 | bool IsCall = false; | |||||
121 | ||||||
122 | MemoryLocOrCall() = default; | |||||
123 | MemoryLocOrCall(MemoryUseOrDef *MUD) | |||||
124 | : MemoryLocOrCall(MUD->getMemoryInst()) {} | |||||
125 | MemoryLocOrCall(const MemoryUseOrDef *MUD) | |||||
126 | : MemoryLocOrCall(MUD->getMemoryInst()) {} | |||||
127 | ||||||
128 | MemoryLocOrCall(Instruction *Inst) { | |||||
129 | if (ImmutableCallSite(Inst)) { | |||||
130 | IsCall = true; | |||||
131 | CS = ImmutableCallSite(Inst); | |||||
132 | } else { | |||||
133 | IsCall = false; | |||||
134 | // There is no such thing as a memorylocation for a fence inst, and it is | |||||
135 | // unique in that regard. | |||||
136 | if (!isa<FenceInst>(Inst)) | |||||
137 | Loc = MemoryLocation::get(Inst); | |||||
138 | } | |||||
139 | } | |||||
140 | ||||||
141 | explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} | |||||
142 | ||||||
143 | ImmutableCallSite getCS() const { | |||||
144 | assert(IsCall)(static_cast <bool> (IsCall) ? void (0) : __assert_fail ("IsCall", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 144, __extension__ __PRETTY_FUNCTION__)); | |||||
145 | return CS; | |||||
146 | } | |||||
147 | ||||||
148 | MemoryLocation getLoc() const { | |||||
149 | assert(!IsCall)(static_cast <bool> (!IsCall) ? void (0) : __assert_fail ("!IsCall", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 149, __extension__ __PRETTY_FUNCTION__)); | |||||
150 | return Loc; | |||||
151 | } | |||||
152 | ||||||
153 | bool operator==(const MemoryLocOrCall &Other) const { | |||||
154 | if (IsCall != Other.IsCall) | |||||
155 | return false; | |||||
156 | ||||||
157 | if (!IsCall) | |||||
158 | return Loc == Other.Loc; | |||||
159 | ||||||
160 | if (CS.getCalledValue() != Other.CS.getCalledValue()) | |||||
161 | return false; | |||||
162 | ||||||
163 | return CS.arg_size() == Other.CS.arg_size() && | |||||
164 | std::equal(CS.arg_begin(), CS.arg_end(), Other.CS.arg_begin()); | |||||
165 | } | |||||
166 | ||||||
167 | private: | |||||
168 | union { | |||||
169 | ImmutableCallSite CS; | |||||
170 | MemoryLocation Loc; | |||||
171 | }; | |||||
172 | }; | |||||
173 | ||||||
174 | } // end anonymous namespace | |||||
175 | ||||||
176 | namespace llvm { | |||||
177 | ||||||
178 | template <> struct DenseMapInfo<MemoryLocOrCall> { | |||||
179 | static inline MemoryLocOrCall getEmptyKey() { | |||||
180 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); | |||||
181 | } | |||||
182 | ||||||
183 | static inline MemoryLocOrCall getTombstoneKey() { | |||||
184 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); | |||||
185 | } | |||||
186 | ||||||
187 | static unsigned getHashValue(const MemoryLocOrCall &MLOC) { | |||||
188 | if (!MLOC.IsCall) | |||||
189 | return hash_combine( | |||||
190 | MLOC.IsCall, | |||||
191 | DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); | |||||
192 | ||||||
193 | hash_code hash = | |||||
194 | hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue( | |||||
195 | MLOC.getCS().getCalledValue())); | |||||
196 | ||||||
197 | for (const Value *Arg : MLOC.getCS().args()) | |||||
198 | hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg)); | |||||
199 | return hash; | |||||
200 | } | |||||
201 | ||||||
202 | static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { | |||||
203 | return LHS == RHS; | |||||
204 | } | |||||
205 | }; | |||||
206 | ||||||
207 | } // end namespace llvm | |||||
208 | ||||||
209 | /// This does one-way checks to see if Use could theoretically be hoisted above | |||||
210 | /// MayClobber. This will not check the other way around. | |||||
211 | /// | |||||
212 | /// This assumes that, for the purposes of MemorySSA, Use comes directly after | |||||
213 | /// MayClobber, with no potentially clobbering operations in between them. | |||||
214 | /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) | |||||
215 | static bool areLoadsReorderable(const LoadInst *Use, | |||||
216 | const LoadInst *MayClobber) { | |||||
217 | bool VolatileUse = Use->isVolatile(); | |||||
218 | bool VolatileClobber = MayClobber->isVolatile(); | |||||
219 | // Volatile operations may never be reordered with other volatile operations. | |||||
220 | if (VolatileUse && VolatileClobber) | |||||
221 | return false; | |||||
222 | // Otherwise, volatile doesn't matter here. From the language reference: | |||||
223 | // 'optimizers may change the order of volatile operations relative to | |||||
224 | // non-volatile operations.'" | |||||
225 | ||||||
226 | // If a load is seq_cst, it cannot be moved above other loads. If its ordering | |||||
227 | // is weaker, it can be moved above other loads. We just need to be sure that | |||||
228 | // MayClobber isn't an acquire load, because loads can't be moved above | |||||
229 | // acquire loads. | |||||
230 | // | |||||
231 | // Note that this explicitly *does* allow the free reordering of monotonic (or | |||||
232 | // weaker) loads of the same address. | |||||
233 | bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; | |||||
234 | bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), | |||||
235 | AtomicOrdering::Acquire); | |||||
236 | return !(SeqCstUse || MayClobberIsAcquire); | |||||
237 | } | |||||
238 | ||||||
239 | namespace { | |||||
240 | ||||||
241 | struct ClobberAlias { | |||||
242 | bool IsClobber; | |||||
243 | Optional<AliasResult> AR; | |||||
244 | }; | |||||
245 | ||||||
246 | } // end anonymous namespace | |||||
247 | ||||||
248 | // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being | |||||
249 | // ignored if IsClobber = false. | |||||
250 | static ClobberAlias instructionClobbersQuery(MemoryDef *MD, | |||||
251 | const MemoryLocation &UseLoc, | |||||
252 | const Instruction *UseInst, | |||||
253 | AliasAnalysis &AA) { | |||||
254 | Instruction *DefInst = MD->getMemoryInst(); | |||||
255 | assert(DefInst && "Defining instruction not actually an instruction")(static_cast <bool> (DefInst && "Defining instruction not actually an instruction" ) ? void (0) : __assert_fail ("DefInst && \"Defining instruction not actually an instruction\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 255, __extension__ __PRETTY_FUNCTION__)); | |||||
256 | ImmutableCallSite UseCS(UseInst); | |||||
257 | Optional<AliasResult> AR; | |||||
258 | ||||||
259 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { | |||||
260 | // These intrinsics will show up as affecting memory, but they are just | |||||
261 | // markers. | |||||
262 | switch (II->getIntrinsicID()) { | |||||
263 | case Intrinsic::lifetime_start: | |||||
264 | if (UseCS) | |||||
265 | return {false, NoAlias}; | |||||
266 | AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc); | |||||
267 | return {AR == MustAlias, AR}; | |||||
268 | case Intrinsic::lifetime_end: | |||||
269 | case Intrinsic::invariant_start: | |||||
270 | case Intrinsic::invariant_end: | |||||
271 | case Intrinsic::assume: | |||||
272 | return {false, NoAlias}; | |||||
273 | default: | |||||
274 | break; | |||||
275 | } | |||||
276 | } | |||||
277 | ||||||
278 | if (UseCS) { | |||||
279 | ModRefInfo I = AA.getModRefInfo(DefInst, UseCS); | |||||
280 | AR = isMustSet(I) ? MustAlias : MayAlias; | |||||
281 | return {isModOrRefSet(I), AR}; | |||||
282 | } | |||||
283 | ||||||
284 | if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) | |||||
285 | if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) | |||||
286 | return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias}; | |||||
287 | ||||||
288 | ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc); | |||||
289 | AR = isMustSet(I) ? MustAlias : MayAlias; | |||||
290 | return {isModSet(I), AR}; | |||||
291 | } | |||||
292 | ||||||
293 | static ClobberAlias instructionClobbersQuery(MemoryDef *MD, | |||||
294 | const MemoryUseOrDef *MU, | |||||
295 | const MemoryLocOrCall &UseMLOC, | |||||
296 | AliasAnalysis &AA) { | |||||
297 | // FIXME: This is a temporary hack to allow a single instructionClobbersQuery | |||||
298 | // to exist while MemoryLocOrCall is pushed through places. | |||||
299 | if (UseMLOC.IsCall) | |||||
300 | return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), | |||||
301 | AA); | |||||
302 | return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), | |||||
303 | AA); | |||||
304 | } | |||||
305 | ||||||
306 | // Return true when MD may alias MU, return false otherwise. | |||||
307 | bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, | |||||
308 | AliasAnalysis &AA) { | |||||
309 | return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber; | |||||
310 | } | |||||
311 | ||||||
312 | namespace { | |||||
313 | ||||||
314 | struct UpwardsMemoryQuery { | |||||
315 | // True if our original query started off as a call | |||||
316 | bool IsCall = false; | |||||
317 | // The pointer location we started the query with. This will be empty if | |||||
318 | // IsCall is true. | |||||
319 | MemoryLocation StartingLoc; | |||||
320 | // This is the instruction we were querying about. | |||||
321 | const Instruction *Inst = nullptr; | |||||
322 | // The MemoryAccess we actually got called with, used to test local domination | |||||
323 | const MemoryAccess *OriginalAccess = nullptr; | |||||
324 | Optional<AliasResult> AR = MayAlias; | |||||
325 | ||||||
326 | UpwardsMemoryQuery() = default; | |||||
327 | ||||||
328 | UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) | |||||
329 | : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { | |||||
330 | if (!IsCall) | |||||
331 | StartingLoc = MemoryLocation::get(Inst); | |||||
332 | } | |||||
333 | }; | |||||
334 | ||||||
335 | } // end anonymous namespace | |||||
336 | ||||||
337 | static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, | |||||
338 | AliasAnalysis &AA) { | |||||
339 | Instruction *Inst = MD->getMemoryInst(); | |||||
340 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { | |||||
341 | switch (II->getIntrinsicID()) { | |||||
342 | case Intrinsic::lifetime_end: | |||||
343 | return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc); | |||||
344 | default: | |||||
345 | return false; | |||||
346 | } | |||||
347 | } | |||||
348 | return false; | |||||
349 | } | |||||
350 | ||||||
351 | static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, | |||||
352 | const Instruction *I) { | |||||
353 | // If the memory can't be changed, then loads of the memory can't be | |||||
354 | // clobbered. | |||||
355 | return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || | |||||
356 | AA.pointsToConstantMemory(cast<LoadInst>(I)-> | |||||
357 | getPointerOperand())); | |||||
358 | } | |||||
359 | ||||||
360 | /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing | |||||
361 | /// inbetween `Start` and `ClobberAt` can clobbers `Start`. | |||||
362 | /// | |||||
363 | /// This is meant to be as simple and self-contained as possible. Because it | |||||
364 | /// uses no cache, etc., it can be relatively expensive. | |||||
365 | /// | |||||
366 | /// \param Start The MemoryAccess that we want to walk from. | |||||
367 | /// \param ClobberAt A clobber for Start. | |||||
368 | /// \param StartLoc The MemoryLocation for Start. | |||||
369 | /// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to. | |||||
370 | /// \param Query The UpwardsMemoryQuery we used for our search. | |||||
371 | /// \param AA The AliasAnalysis we used for our search. | |||||
372 | static void LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) | |||||
373 | checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt, | |||||
374 | const MemoryLocation &StartLoc, const MemorySSA &MSSA, | |||||
375 | const UpwardsMemoryQuery &Query, AliasAnalysis &AA) { | |||||
376 | assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?")(static_cast <bool> (MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?") ? void (0) : __assert_fail ("MSSA.dominates(ClobberAt, Start) && \"Clobber doesn't dominate start?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 376, __extension__ __PRETTY_FUNCTION__)); | |||||
377 | ||||||
378 | if (MSSA.isLiveOnEntryDef(Start)) { | |||||
379 | assert(MSSA.isLiveOnEntryDef(ClobberAt) &&(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself") ? void (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 380, __extension__ __PRETTY_FUNCTION__)) | |||||
380 | "liveOnEntry must clobber itself")(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself") ? void (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 380, __extension__ __PRETTY_FUNCTION__)); | |||||
381 | return; | |||||
382 | } | |||||
383 | ||||||
384 | bool FoundClobber = false; | |||||
385 | DenseSet<MemoryAccessPair> VisitedPhis; | |||||
386 | SmallVector<MemoryAccessPair, 8> Worklist; | |||||
387 | Worklist.emplace_back(Start, StartLoc); | |||||
388 | // Walk all paths from Start to ClobberAt, while looking for clobbers. If one | |||||
389 | // is found, complain. | |||||
390 | while (!Worklist.empty()) { | |||||
391 | MemoryAccessPair MAP = Worklist.pop_back_val(); | |||||
392 | // All we care about is that nothing from Start to ClobberAt clobbers Start. | |||||
393 | // We learn nothing from revisiting nodes. | |||||
394 | if (!VisitedPhis.insert(MAP).second) | |||||
395 | continue; | |||||
396 | ||||||
397 | for (MemoryAccess *MA : def_chain(MAP.first)) { | |||||
398 | if (MA == ClobberAt) { | |||||
399 | if (auto *MD = dyn_cast<MemoryDef>(MA)) { | |||||
400 | // instructionClobbersQuery isn't essentially free, so don't use `|=`, | |||||
401 | // since it won't let us short-circuit. | |||||
402 | // | |||||
403 | // Also, note that this can't be hoisted out of the `Worklist` loop, | |||||
404 | // since MD may only act as a clobber for 1 of N MemoryLocations. | |||||
405 | FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD); | |||||
406 | if (!FoundClobber) { | |||||
407 | ClobberAlias CA = | |||||
408 | instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); | |||||
409 | if (CA.IsClobber) { | |||||
410 | FoundClobber = true; | |||||
411 | // Not used: CA.AR; | |||||
412 | } | |||||
413 | } | |||||
414 | } | |||||
415 | break; | |||||
416 | } | |||||
417 | ||||||
418 | // We should never hit liveOnEntry, unless it's the clobber. | |||||
419 | assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(MA) && \"Hit liveOnEntry before clobber?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 419, __extension__ __PRETTY_FUNCTION__)); | |||||
420 | ||||||
421 | if (auto *MD = dyn_cast<MemoryDef>(MA)) { | |||||
422 | (void)MD; | |||||
423 | assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)(static_cast <bool> (!instructionClobbersQuery(MD, MAP. second, Query.Inst, AA) .IsClobber && "Found clobber before reaching ClobberAt!" ) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 425, __extension__ __PRETTY_FUNCTION__)) | |||||
424 | .IsClobber &&(static_cast <bool> (!instructionClobbersQuery(MD, MAP. second, Query.Inst, AA) .IsClobber && "Found clobber before reaching ClobberAt!" ) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 425, __extension__ __PRETTY_FUNCTION__)) | |||||
425 | "Found clobber before reaching ClobberAt!")(static_cast <bool> (!instructionClobbersQuery(MD, MAP. second, Query.Inst, AA) .IsClobber && "Found clobber before reaching ClobberAt!" ) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 425, __extension__ __PRETTY_FUNCTION__)); | |||||
426 | continue; | |||||
427 | } | |||||
428 | ||||||
429 | assert(isa<MemoryPhi>(MA))(static_cast <bool> (isa<MemoryPhi>(MA)) ? void ( 0) : __assert_fail ("isa<MemoryPhi>(MA)", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 429, __extension__ __PRETTY_FUNCTION__)); | |||||
430 | Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end()); | |||||
431 | } | |||||
432 | } | |||||
433 | ||||||
434 | // If ClobberAt is a MemoryPhi, we can assume something above it acted as a | |||||
435 | // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. | |||||
436 | assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber" ) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 437, __extension__ __PRETTY_FUNCTION__)) | |||||
437 | "ClobberAt never acted as a clobber")(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber" ) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 437, __extension__ __PRETTY_FUNCTION__)); | |||||
438 | } | |||||
439 | ||||||
440 | namespace { | |||||
441 | ||||||
442 | /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up | |||||
443 | /// in one class. | |||||
444 | class ClobberWalker { | |||||
445 | /// Save a few bytes by using unsigned instead of size_t. | |||||
446 | using ListIndex = unsigned; | |||||
447 | ||||||
448 | /// Represents a span of contiguous MemoryDefs, potentially ending in a | |||||
449 | /// MemoryPhi. | |||||
450 | struct DefPath { | |||||
451 | MemoryLocation Loc; | |||||
452 | // Note that, because we always walk in reverse, Last will always dominate | |||||
453 | // First. Also note that First and Last are inclusive. | |||||
454 | MemoryAccess *First; | |||||
455 | MemoryAccess *Last; | |||||
456 | Optional<ListIndex> Previous; | |||||
457 | ||||||
458 | DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, | |||||
459 | Optional<ListIndex> Previous) | |||||
460 | : Loc(Loc), First(First), Last(Last), Previous(Previous) {} | |||||
461 | ||||||
462 | DefPath(const MemoryLocation &Loc, MemoryAccess *Init, | |||||
463 | Optional<ListIndex> Previous) | |||||
464 | : DefPath(Loc, Init, Init, Previous) {} | |||||
465 | }; | |||||
466 | ||||||
467 | const MemorySSA &MSSA; | |||||
468 | AliasAnalysis &AA; | |||||
469 | DominatorTree &DT; | |||||
470 | UpwardsMemoryQuery *Query; | |||||
471 | ||||||
472 | // Phi optimization bookkeeping | |||||
473 | SmallVector<DefPath, 32> Paths; | |||||
474 | DenseSet<ConstMemoryAccessPair> VisitedPhis; | |||||
475 | ||||||
476 | /// Find the nearest def or phi that `From` can legally be optimized to. | |||||
477 | const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { | |||||
478 | assert(From->getNumOperands() && "Phi with no operands?")(static_cast <bool> (From->getNumOperands() && "Phi with no operands?") ? void (0) : __assert_fail ("From->getNumOperands() && \"Phi with no operands?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 478, __extension__ __PRETTY_FUNCTION__)); | |||||
479 | ||||||
480 | BasicBlock *BB = From->getBlock(); | |||||
481 | MemoryAccess *Result = MSSA.getLiveOnEntryDef(); | |||||
482 | DomTreeNode *Node = DT.getNode(BB); | |||||
483 | while ((Node = Node->getIDom())) { | |||||
484 | auto *Defs = MSSA.getBlockDefs(Node->getBlock()); | |||||
485 | if (Defs) | |||||
486 | return &*Defs->rbegin(); | |||||
487 | } | |||||
488 | return Result; | |||||
489 | } | |||||
490 | ||||||
491 | /// Result of calling walkToPhiOrClobber. | |||||
492 | struct UpwardsWalkResult { | |||||
493 | /// The "Result" of the walk. Either a clobber, the last thing we walked, or | |||||
494 | /// both. Include alias info when clobber found. | |||||
495 | MemoryAccess *Result; | |||||
496 | bool IsKnownClobber; | |||||
497 | Optional<AliasResult> AR; | |||||
498 | }; | |||||
499 | ||||||
500 | /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. | |||||
501 | /// This will update Desc.Last as it walks. It will (optionally) also stop at | |||||
502 | /// StopAt. | |||||
503 | /// | |||||
504 | /// This does not test for whether StopAt is a clobber | |||||
505 | UpwardsWalkResult | |||||
506 | walkToPhiOrClobber(DefPath &Desc, | |||||
507 | const MemoryAccess *StopAt = nullptr) const { | |||||
508 | assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world")(static_cast <bool> (!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world") ? void (0) : __assert_fail ( "!isa<MemoryUse>(Desc.Last) && \"Uses don't exist in my world\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 508, __extension__ __PRETTY_FUNCTION__)); | |||||
509 | ||||||
510 | for (MemoryAccess *Current : def_chain(Desc.Last)) { | |||||
511 | Desc.Last = Current; | |||||
512 | if (Current == StopAt) | |||||
513 | return {Current, false, MayAlias}; | |||||
514 | ||||||
515 | if (auto *MD = dyn_cast<MemoryDef>(Current)) { | |||||
516 | if (MSSA.isLiveOnEntryDef(MD)) | |||||
517 | return {MD, true, MustAlias}; | |||||
518 | ClobberAlias CA = | |||||
519 | instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA); | |||||
520 | if (CA.IsClobber) | |||||
521 | return {MD, true, CA.AR}; | |||||
522 | } | |||||
523 | } | |||||
524 | ||||||
525 | assert(isa<MemoryPhi>(Desc.Last) &&(static_cast <bool> (isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 526, __extension__ __PRETTY_FUNCTION__)) | |||||
526 | "Ended at a non-clobber that's not a phi?")(static_cast <bool> (isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 526, __extension__ __PRETTY_FUNCTION__)); | |||||
527 | return {Desc.Last, false, MayAlias}; | |||||
528 | } | |||||
529 | ||||||
530 | void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, | |||||
531 | ListIndex PriorNode) { | |||||
532 | auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}), | |||||
533 | upward_defs_end()); | |||||
534 | for (const MemoryAccessPair &P : UpwardDefs) { | |||||
535 | PausedSearches.push_back(Paths.size()); | |||||
536 | Paths.emplace_back(P.second, P.first, PriorNode); | |||||
537 | } | |||||
538 | } | |||||
539 | ||||||
540 | /// Represents a search that terminated after finding a clobber. This clobber | |||||
541 | /// may or may not be present in the path of defs from LastNode..SearchStart, | |||||
542 | /// since it may have been retrieved from cache. | |||||
543 | struct TerminatedPath { | |||||
544 | MemoryAccess *Clobber; | |||||
545 | ListIndex LastNode; | |||||
546 | }; | |||||
547 | ||||||
548 | /// Get an access that keeps us from optimizing to the given phi. | |||||
549 | /// | |||||
550 | /// PausedSearches is an array of indices into the Paths array. Its incoming | |||||
551 | /// value is the indices of searches that stopped at the last phi optimization | |||||
552 | /// target. It's left in an unspecified state. | |||||
553 | /// | |||||
554 | /// If this returns None, NewPaused is a vector of searches that terminated | |||||
555 | /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. | |||||
556 | Optional<TerminatedPath> | |||||
557 | getBlockingAccess(const MemoryAccess *StopWhere, | |||||
558 | SmallVectorImpl<ListIndex> &PausedSearches, | |||||
559 | SmallVectorImpl<ListIndex> &NewPaused, | |||||
560 | SmallVectorImpl<TerminatedPath> &Terminated) { | |||||
561 | assert(!PausedSearches.empty() && "No searches to continue?")(static_cast <bool> (!PausedSearches.empty() && "No searches to continue?") ? void (0) : __assert_fail ("!PausedSearches.empty() && \"No searches to continue?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 561, __extension__ __PRETTY_FUNCTION__)); | |||||
562 | ||||||
563 | // BFS vs DFS really doesn't make a difference here, so just do a DFS with | |||||
564 | // PausedSearches as our stack. | |||||
565 | while (!PausedSearches.empty()) { | |||||
566 | ListIndex PathIndex = PausedSearches.pop_back_val(); | |||||
567 | DefPath &Node = Paths[PathIndex]; | |||||
568 | ||||||
569 | // If we've already visited this path with this MemoryLocation, we don't | |||||
570 | // need to do so again. | |||||
571 | // | |||||
572 | // NOTE: That we just drop these paths on the ground makes caching | |||||
573 | // behavior sporadic. e.g. given a diamond: | |||||
574 | // A | |||||
575 | // B C | |||||
576 | // D | |||||
577 | // | |||||
578 | // ...If we walk D, B, A, C, we'll only cache the result of phi | |||||
579 | // optimization for A, B, and D; C will be skipped because it dies here. | |||||
580 | // This arguably isn't the worst thing ever, since: | |||||
581 | // - We generally query things in a top-down order, so if we got below D | |||||
582 | // without needing cache entries for {C, MemLoc}, then chances are | |||||
583 | // that those cache entries would end up ultimately unused. | |||||
584 | // - We still cache things for A, so C only needs to walk up a bit. | |||||
585 | // If this behavior becomes problematic, we can fix without a ton of extra | |||||
586 | // work. | |||||
587 | if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) | |||||
588 | continue; | |||||
589 | ||||||
590 | UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere); | |||||
591 | if (Res.IsKnownClobber) { | |||||
592 | assert(Res.Result != StopWhere)(static_cast <bool> (Res.Result != StopWhere) ? void (0 ) : __assert_fail ("Res.Result != StopWhere", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 592, __extension__ __PRETTY_FUNCTION__)); | |||||
593 | // If this wasn't a cache hit, we hit a clobber when walking. That's a | |||||
594 | // failure. | |||||
595 | TerminatedPath Term{Res.Result, PathIndex}; | |||||
596 | if (!MSSA.dominates(Res.Result, StopWhere)) | |||||
597 | return Term; | |||||
598 | ||||||
599 | // Otherwise, it's a valid thing to potentially optimize to. | |||||
600 | Terminated.push_back(Term); | |||||
601 | continue; | |||||
602 | } | |||||
603 | ||||||
604 | if (Res.Result == StopWhere) { | |||||
605 | // We've hit our target. Save this path off for if we want to continue | |||||
606 | // walking. | |||||
607 | NewPaused.push_back(PathIndex); | |||||
608 | continue; | |||||
609 | } | |||||
610 | ||||||
611 | assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Res.Result) && \"liveOnEntry is a clobber\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 611, __extension__ __PRETTY_FUNCTION__)); | |||||
612 | addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); | |||||
613 | } | |||||
614 | ||||||
615 | return None; | |||||
616 | } | |||||
617 | ||||||
618 | template <typename T, typename Walker> | |||||
619 | struct generic_def_path_iterator | |||||
620 | : public iterator_facade_base<generic_def_path_iterator<T, Walker>, | |||||
621 | std::forward_iterator_tag, T *> { | |||||
622 | generic_def_path_iterator() = default; | |||||
623 | generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} | |||||
624 | ||||||
625 | T &operator*() const { return curNode(); } | |||||
626 | ||||||
627 | generic_def_path_iterator &operator++() { | |||||
628 | N = curNode().Previous; | |||||
629 | return *this; | |||||
630 | } | |||||
631 | ||||||
632 | bool operator==(const generic_def_path_iterator &O) const { | |||||
633 | if (N.hasValue() != O.N.hasValue()) | |||||
634 | return false; | |||||
635 | return !N.hasValue() || *N == *O.N; | |||||
636 | } | |||||
637 | ||||||
638 | private: | |||||
639 | T &curNode() const { return W->Paths[*N]; } | |||||
640 | ||||||
641 | Walker *W = nullptr; | |||||
642 | Optional<ListIndex> N = None; | |||||
643 | }; | |||||
644 | ||||||
645 | using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; | |||||
646 | using const_def_path_iterator = | |||||
647 | generic_def_path_iterator<const DefPath, const ClobberWalker>; | |||||
648 | ||||||
649 | iterator_range<def_path_iterator> def_path(ListIndex From) { | |||||
650 | return make_range(def_path_iterator(this, From), def_path_iterator()); | |||||
651 | } | |||||
652 | ||||||
653 | iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { | |||||
654 | return make_range(const_def_path_iterator(this, From), | |||||
655 | const_def_path_iterator()); | |||||
656 | } | |||||
657 | ||||||
658 | struct OptznResult { | |||||
659 | /// The path that contains our result. | |||||
660 | TerminatedPath PrimaryClobber; | |||||
661 | /// The paths that we can legally cache back from, but that aren't | |||||
662 | /// necessarily the result of the Phi optimization. | |||||
663 | SmallVector<TerminatedPath, 4> OtherClobbers; | |||||
664 | }; | |||||
665 | ||||||
666 | ListIndex defPathIndex(const DefPath &N) const { | |||||
667 | // The assert looks nicer if we don't need to do &N | |||||
668 | const DefPath *NP = &N; | |||||
669 | assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&(static_cast <bool> (!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 670, __extension__ __PRETTY_FUNCTION__)) | |||||
670 | "Out of bounds DefPath!")(static_cast <bool> (!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 670, __extension__ __PRETTY_FUNCTION__)); | |||||
671 | return NP - &Paths.front(); | |||||
672 | } | |||||
673 | ||||||
674 | /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths | |||||
675 | /// that act as legal clobbers. Note that this won't return *all* clobbers. | |||||
676 | /// | |||||
677 | /// Phi optimization algorithm tl;dr: | |||||
678 | /// - Find the earliest def/phi, A, we can optimize to | |||||
679 | /// - Find if all paths from the starting memory access ultimately reach A | |||||
680 | /// - If not, optimization isn't possible. | |||||
681 | /// - Otherwise, walk from A to another clobber or phi, A'. | |||||
682 | /// - If A' is a def, we're done. | |||||
683 | /// - If A' is a phi, try to optimize it. | |||||
684 | /// | |||||
685 | /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path | |||||
686 | /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. | |||||
687 | OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, | |||||
688 | const MemoryLocation &Loc) { | |||||
689 | assert(Paths.empty() && VisitedPhis.empty() &&(static_cast <bool> (Paths.empty() && VisitedPhis .empty() && "Reset the optimization state.") ? void ( 0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 690, __extension__ __PRETTY_FUNCTION__)) | |||||
690 | "Reset the optimization state.")(static_cast <bool> (Paths.empty() && VisitedPhis .empty() && "Reset the optimization state.") ? void ( 0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 690, __extension__ __PRETTY_FUNCTION__)); | |||||
691 | ||||||
692 | Paths.emplace_back(Loc, Start, Phi, None); | |||||
693 | // Stores how many "valid" optimization nodes we had prior to calling | |||||
694 | // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. | |||||
695 | auto PriorPathsSize = Paths.size(); | |||||
696 | ||||||
697 | SmallVector<ListIndex, 16> PausedSearches; | |||||
698 | SmallVector<ListIndex, 8> NewPaused; | |||||
699 | SmallVector<TerminatedPath, 4> TerminatedPaths; | |||||
700 | ||||||
701 | addSearches(Phi, PausedSearches, 0); | |||||
702 | ||||||
703 | // Moves the TerminatedPath with the "most dominated" Clobber to the end of | |||||
704 | // Paths. | |||||
705 | auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { | |||||
706 | assert(!Paths.empty() && "Need a path to move")(static_cast <bool> (!Paths.empty() && "Need a path to move" ) ? void (0) : __assert_fail ("!Paths.empty() && \"Need a path to move\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 706, __extension__ __PRETTY_FUNCTION__)); | |||||
707 | auto Dom = Paths.begin(); | |||||
708 | for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) | |||||
709 | if (!MSSA.dominates(I->Clobber, Dom->Clobber)) | |||||
710 | Dom = I; | |||||
711 | auto Last = Paths.end() - 1; | |||||
712 | if (Last != Dom) | |||||
713 | std::iter_swap(Last, Dom); | |||||
714 | }; | |||||
715 | ||||||
716 | MemoryPhi *Current = Phi; | |||||
717 | while (true) { | |||||
718 | assert(!MSSA.isLiveOnEntryDef(Current) &&(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 719, __extension__ __PRETTY_FUNCTION__)) | |||||
719 | "liveOnEntry wasn't treated as a clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 719, __extension__ __PRETTY_FUNCTION__)); | |||||
720 | ||||||
721 | const auto *Target = getWalkTarget(Current); | |||||
722 | // If a TerminatedPath doesn't dominate Target, then it wasn't a legal | |||||
723 | // optimization for the prior phi. | |||||
724 | assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target ); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 726, __extension__ __PRETTY_FUNCTION__)) | |||||
725 | return MSSA.dominates(P.Clobber, Target);(static_cast <bool> (all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target ); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 726, __extension__ __PRETTY_FUNCTION__)) | |||||
726 | }))(static_cast <bool> (all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target ); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 726, __extension__ __PRETTY_FUNCTION__)); | |||||
727 | ||||||
728 | // FIXME: This is broken, because the Blocker may be reported to be | |||||
729 | // liveOnEntry, and we'll happily wait for that to disappear (read: never) | |||||
730 | // For the moment, this is fine, since we do nothing with blocker info. | |||||
731 | if (Optional<TerminatedPath> Blocker = getBlockingAccess( | |||||
732 | Target, PausedSearches, NewPaused, TerminatedPaths)) { | |||||
733 | ||||||
734 | // Find the node we started at. We can't search based on N->Last, since | |||||
735 | // we may have gone around a loop with a different MemoryLocation. | |||||
736 | auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { | |||||
737 | return defPathIndex(N) < PriorPathsSize; | |||||
738 | }); | |||||
739 | assert(Iter != def_path_iterator())(static_cast <bool> (Iter != def_path_iterator()) ? void (0) : __assert_fail ("Iter != def_path_iterator()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 739, __extension__ __PRETTY_FUNCTION__)); | |||||
740 | ||||||
741 | DefPath &CurNode = *Iter; | |||||
742 | assert(CurNode.Last == Current)(static_cast <bool> (CurNode.Last == Current) ? void (0 ) : __assert_fail ("CurNode.Last == Current", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 742, __extension__ __PRETTY_FUNCTION__)); | |||||
743 | ||||||
744 | // Two things: | |||||
745 | // A. We can't reliably cache all of NewPaused back. Consider a case | |||||
746 | // where we have two paths in NewPaused; one of which can't optimize | |||||
747 | // above this phi, whereas the other can. If we cache the second path | |||||
748 | // back, we'll end up with suboptimal cache entries. We can handle | |||||
749 | // cases like this a bit better when we either try to find all | |||||
750 | // clobbers that block phi optimization, or when our cache starts | |||||
751 | // supporting unfinished searches. | |||||
752 | // B. We can't reliably cache TerminatedPaths back here without doing | |||||
753 | // extra checks; consider a case like: | |||||
754 | // T | |||||
755 | // / \ | |||||
756 | // D C | |||||
757 | // \ / | |||||
758 | // S | |||||
759 | // Where T is our target, C is a node with a clobber on it, D is a | |||||
760 | // diamond (with a clobber *only* on the left or right node, N), and | |||||
761 | // S is our start. Say we walk to D, through the node opposite N | |||||
762 | // (read: ignoring the clobber), and see a cache entry in the top | |||||
763 | // node of D. That cache entry gets put into TerminatedPaths. We then | |||||
764 | // walk up to C (N is later in our worklist), find the clobber, and | |||||
765 | // quit. If we append TerminatedPaths to OtherClobbers, we'll cache | |||||
766 | // the bottom part of D to the cached clobber, ignoring the clobber | |||||
767 | // in N. Again, this problem goes away if we start tracking all | |||||
768 | // blockers for a given phi optimization. | |||||
769 | TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; | |||||
770 | return {Result, {}}; | |||||
771 | } | |||||
772 | ||||||
773 | // If there's nothing left to search, then all paths led to valid clobbers | |||||
774 | // that we got from our cache; pick the nearest to the start, and allow | |||||
775 | // the rest to be cached back. | |||||
776 | if (NewPaused.empty()) { | |||||
777 | MoveDominatedPathToEnd(TerminatedPaths); | |||||
778 | TerminatedPath Result = TerminatedPaths.pop_back_val(); | |||||
779 | return {Result, std::move(TerminatedPaths)}; | |||||
780 | } | |||||
781 | ||||||
782 | MemoryAccess *DefChainEnd = nullptr; | |||||
783 | SmallVector<TerminatedPath, 4> Clobbers; | |||||
784 | for (ListIndex Paused : NewPaused) { | |||||
785 | UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); | |||||
786 | if (WR.IsKnownClobber) | |||||
787 | Clobbers.push_back({WR.Result, Paused}); | |||||
788 | else | |||||
789 | // Micro-opt: If we hit the end of the chain, save it. | |||||
790 | DefChainEnd = WR.Result; | |||||
791 | } | |||||
792 | ||||||
793 | if (!TerminatedPaths.empty()) { | |||||
794 | // If we couldn't find the dominating phi/liveOnEntry in the above loop, | |||||
795 | // do it now. | |||||
796 | if (!DefChainEnd) | |||||
797 | for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) | |||||
798 | DefChainEnd = MA; | |||||
799 | ||||||
800 | // If any of the terminated paths don't dominate the phi we'll try to | |||||
801 | // optimize, we need to figure out what they are and quit. | |||||
802 | const BasicBlock *ChainBB = DefChainEnd->getBlock(); | |||||
803 | for (const TerminatedPath &TP : TerminatedPaths) { | |||||
804 | // Because we know that DefChainEnd is as "high" as we can go, we | |||||
805 | // don't need local dominance checks; BB dominance is sufficient. | |||||
806 | if (DT.dominates(ChainBB, TP.Clobber->getBlock())) | |||||
807 | Clobbers.push_back(TP); | |||||
808 | } | |||||
809 | } | |||||
810 | ||||||
811 | // If we have clobbers in the def chain, find the one closest to Current | |||||
812 | // and quit. | |||||
813 | if (!Clobbers.empty()) { | |||||
814 | MoveDominatedPathToEnd(Clobbers); | |||||
815 | TerminatedPath Result = Clobbers.pop_back_val(); | |||||
816 | return {Result, std::move(Clobbers)}; | |||||
817 | } | |||||
818 | ||||||
819 | assert(all_of(NewPaused,(static_cast <bool> (all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 820, __extension__ __PRETTY_FUNCTION__)) | |||||
820 | [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }))(static_cast <bool> (all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 820, __extension__ __PRETTY_FUNCTION__)); | |||||
821 | ||||||
822 | // Because liveOnEntry is a clobber, this must be a phi. | |||||
823 | auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); | |||||
824 | ||||||
825 | PriorPathsSize = Paths.size(); | |||||
826 | PausedSearches.clear(); | |||||
827 | for (ListIndex I : NewPaused) | |||||
828 | addSearches(DefChainPhi, PausedSearches, I); | |||||
829 | NewPaused.clear(); | |||||
830 | ||||||
831 | Current = DefChainPhi; | |||||
832 | } | |||||
833 | } | |||||
834 | ||||||
835 | void verifyOptResult(const OptznResult &R) const { | |||||
836 | assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R. PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 838, __extension__ __PRETTY_FUNCTION__)) | |||||
837 | return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);(static_cast <bool> (all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R. PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 838, __extension__ __PRETTY_FUNCTION__)) | |||||
838 | }))(static_cast <bool> (all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R. PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 838, __extension__ __PRETTY_FUNCTION__)); | |||||
839 | } | |||||
840 | ||||||
841 | void resetPhiOptznState() { | |||||
842 | Paths.clear(); | |||||
843 | VisitedPhis.clear(); | |||||
844 | } | |||||
845 | ||||||
846 | public: | |||||
847 | ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT) | |||||
848 | : MSSA(MSSA), AA(AA), DT(DT) {} | |||||
849 | ||||||
850 | /// Finds the nearest clobber for the given query, optimizing phis if | |||||
851 | /// possible. | |||||
852 | MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) { | |||||
853 | Query = &Q; | |||||
854 | ||||||
855 | MemoryAccess *Current = Start; | |||||
856 | // This walker pretends uses don't exist. If we're handed one, silently grab | |||||
857 | // its def. (This has the nice side-effect of ensuring we never cache uses) | |||||
858 | if (auto *MU = dyn_cast<MemoryUse>(Start)) | |||||
859 | Current = MU->getDefiningAccess(); | |||||
860 | ||||||
861 | DefPath FirstDesc(Q.StartingLoc, Current, Current, None); | |||||
862 | // Fast path for the overly-common case (no crazy phi optimization | |||||
863 | // necessary) | |||||
864 | UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); | |||||
865 | MemoryAccess *Result; | |||||
866 | if (WalkResult.IsKnownClobber) { | |||||
867 | Result = WalkResult.Result; | |||||
868 | Q.AR = WalkResult.AR; | |||||
869 | } else { | |||||
870 | OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), | |||||
871 | Current, Q.StartingLoc); | |||||
872 | verifyOptResult(OptRes); | |||||
873 | resetPhiOptznState(); | |||||
874 | Result = OptRes.PrimaryClobber.Clobber; | |||||
875 | } | |||||
876 | ||||||
877 | #ifdef EXPENSIVE_CHECKS | |||||
878 | checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); | |||||
879 | #endif | |||||
880 | return Result; | |||||
881 | } | |||||
882 | ||||||
883 | void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA)(static_cast <bool> (MSSA == &this->MSSA) ? void (0) : __assert_fail ("MSSA == &this->MSSA", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 883, __extension__ __PRETTY_FUNCTION__)); } | |||||
884 | }; | |||||
885 | ||||||
886 | struct RenamePassData { | |||||
887 | DomTreeNode *DTN; | |||||
888 | DomTreeNode::const_iterator ChildIt; | |||||
889 | MemoryAccess *IncomingVal; | |||||
890 | ||||||
891 | RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, | |||||
892 | MemoryAccess *M) | |||||
893 | : DTN(D), ChildIt(It), IncomingVal(M) {} | |||||
894 | ||||||
895 | void swap(RenamePassData &RHS) { | |||||
896 | std::swap(DTN, RHS.DTN); | |||||
897 | std::swap(ChildIt, RHS.ChildIt); | |||||
898 | std::swap(IncomingVal, RHS.IncomingVal); | |||||
899 | } | |||||
900 | }; | |||||
901 | ||||||
902 | } // end anonymous namespace | |||||
903 | ||||||
904 | namespace llvm { | |||||
905 | ||||||
906 | /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no | |||||
907 | /// longer does caching on its own, but the name has been retained for the | |||||
908 | /// moment. | |||||
909 | class MemorySSA::CachingWalker final : public MemorySSAWalker { | |||||
910 | ClobberWalker Walker; | |||||
911 | ||||||
912 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &); | |||||
913 | ||||||
914 | public: | |||||
915 | CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *); | |||||
916 | ~CachingWalker() override = default; | |||||
917 | ||||||
918 | using MemorySSAWalker::getClobberingMemoryAccess; | |||||
919 | ||||||
920 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; | |||||
921 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, | |||||
922 | const MemoryLocation &) override; | |||||
923 | void invalidateInfo(MemoryAccess *) override; | |||||
924 | ||||||
925 | void verify(const MemorySSA *MSSA) override { | |||||
926 | MemorySSAWalker::verify(MSSA); | |||||
927 | Walker.verify(MSSA); | |||||
928 | } | |||||
929 | }; | |||||
930 | ||||||
931 | } // end namespace llvm | |||||
932 | ||||||
933 | void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, | |||||
934 | bool RenameAllUses) { | |||||
935 | // Pass through values to our successors | |||||
936 | for (const BasicBlock *S : successors(BB)) { | |||||
937 | auto It = PerBlockAccesses.find(S); | |||||
938 | // Rename the phi nodes in our successor block | |||||
939 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | |||||
940 | continue; | |||||
941 | AccessList *Accesses = It->second.get(); | |||||
942 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | |||||
943 | if (RenameAllUses) { | |||||
944 | int PhiIndex = Phi->getBasicBlockIndex(BB); | |||||
945 | assert(PhiIndex != -1 && "Incomplete phi during partial rename")(static_cast <bool> (PhiIndex != -1 && "Incomplete phi during partial rename" ) ? void (0) : __assert_fail ("PhiIndex != -1 && \"Incomplete phi during partial rename\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 945, __extension__ __PRETTY_FUNCTION__)); | |||||
946 | Phi->setIncomingValue(PhiIndex, IncomingVal); | |||||
947 | } else | |||||
948 | Phi->addIncoming(IncomingVal, BB); | |||||
949 | } | |||||
950 | } | |||||
951 | ||||||
952 | /// Rename a single basic block into MemorySSA form. | |||||
953 | /// Uses the standard SSA renaming algorithm. | |||||
954 | /// \returns The new incoming value. | |||||
955 | MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, | |||||
956 | bool RenameAllUses) { | |||||
957 | auto It = PerBlockAccesses.find(BB); | |||||
958 | // Skip most processing if the list is empty. | |||||
959 | if (It != PerBlockAccesses.end()) { | |||||
960 | AccessList *Accesses = It->second.get(); | |||||
961 | for (MemoryAccess &L : *Accesses) { | |||||
962 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { | |||||
963 | if (MUD->getDefiningAccess() == nullptr || RenameAllUses) | |||||
964 | MUD->setDefiningAccess(IncomingVal); | |||||
965 | if (isa<MemoryDef>(&L)) | |||||
966 | IncomingVal = &L; | |||||
967 | } else { | |||||
968 | IncomingVal = &L; | |||||
969 | } | |||||
970 | } | |||||
971 | } | |||||
972 | return IncomingVal; | |||||
973 | } | |||||
974 | ||||||
975 | /// This is the standard SSA renaming algorithm. | |||||
976 | /// | |||||
977 | /// We walk the dominator tree in preorder, renaming accesses, and then filling | |||||
978 | /// in phi nodes in our successors. | |||||
979 | void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, | |||||
980 | SmallPtrSetImpl<BasicBlock *> &Visited, | |||||
981 | bool SkipVisited, bool RenameAllUses) { | |||||
982 | SmallVector<RenamePassData, 32> WorkStack; | |||||
983 | // Skip everything if we already renamed this block and we are skipping. | |||||
984 | // Note: You can't sink this into the if, because we need it to occur | |||||
985 | // regardless of whether we skip blocks or not. | |||||
986 | bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; | |||||
987 | if (SkipVisited && AlreadyVisited) | |||||
988 | return; | |||||
989 | ||||||
990 | IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); | |||||
991 | renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); | |||||
992 | WorkStack.push_back({Root, Root->begin(), IncomingVal}); | |||||
993 | ||||||
994 | while (!WorkStack.empty()) { | |||||
995 | DomTreeNode *Node = WorkStack.back().DTN; | |||||
996 | DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; | |||||
997 | IncomingVal = WorkStack.back().IncomingVal; | |||||
998 | ||||||
999 | if (ChildIt == Node->end()) { | |||||
1000 | WorkStack.pop_back(); | |||||
1001 | } else { | |||||
1002 | DomTreeNode *Child = *ChildIt; | |||||
1003 | ++WorkStack.back().ChildIt; | |||||
1004 | BasicBlock *BB = Child->getBlock(); | |||||
1005 | // Note: You can't sink this into the if, because we need it to occur | |||||
1006 | // regardless of whether we skip blocks or not. | |||||
1007 | AlreadyVisited = !Visited.insert(BB).second; | |||||
1008 | if (SkipVisited && AlreadyVisited) { | |||||
1009 | // We already visited this during our renaming, which can happen when | |||||
1010 | // being asked to rename multiple blocks. Figure out the incoming val, | |||||
1011 | // which is the last def. | |||||
1012 | // Incoming value can only change if there is a block def, and in that | |||||
1013 | // case, it's the last block def in the list. | |||||
1014 | if (auto *BlockDefs = getWritableBlockDefs(BB)) | |||||
1015 | IncomingVal = &*BlockDefs->rbegin(); | |||||
1016 | } else | |||||
1017 | IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); | |||||
1018 | renameSuccessorPhis(BB, IncomingVal, RenameAllUses); | |||||
1019 | WorkStack.push_back({Child, Child->begin(), IncomingVal}); | |||||
1020 | } | |||||
1021 | } | |||||
1022 | } | |||||
1023 | ||||||
1024 | /// This handles unreachable block accesses by deleting phi nodes in | |||||
1025 | /// unreachable blocks, and marking all other unreachable MemoryAccess's as | |||||
1026 | /// being uses of the live on entry definition. | |||||
1027 | void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { | |||||
1028 | assert(!DT->isReachableFromEntry(BB) &&(static_cast <bool> (!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks") ? void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1029, __extension__ __PRETTY_FUNCTION__)) | |||||
1029 | "Reachable block found while handling unreachable blocks")(static_cast <bool> (!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks") ? void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1029, __extension__ __PRETTY_FUNCTION__)); | |||||
1030 | ||||||
1031 | // Make sure phi nodes in our reachable successors end up with a | |||||
1032 | // LiveOnEntryDef for our incoming edge, even though our block is forward | |||||
1033 | // unreachable. We could just disconnect these blocks from the CFG fully, | |||||
1034 | // but we do not right now. | |||||
1035 | for (const BasicBlock *S : successors(BB)) { | |||||
1036 | if (!DT->isReachableFromEntry(S)) | |||||
1037 | continue; | |||||
1038 | auto It = PerBlockAccesses.find(S); | |||||
1039 | // Rename the phi nodes in our successor block | |||||
1040 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) | |||||
1041 | continue; | |||||
1042 | AccessList *Accesses = It->second.get(); | |||||
1043 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); | |||||
1044 | Phi->addIncoming(LiveOnEntryDef.get(), BB); | |||||
1045 | } | |||||
1046 | ||||||
1047 | auto It = PerBlockAccesses.find(BB); | |||||
1048 | if (It == PerBlockAccesses.end()) | |||||
1049 | return; | |||||
1050 | ||||||
1051 | auto &Accesses = It->second; | |||||
1052 | for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { | |||||
1053 | auto Next = std::next(AI); | |||||
1054 | // If we have a phi, just remove it. We are going to replace all | |||||
1055 | // users with live on entry. | |||||
1056 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) | |||||
1057 | UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); | |||||
1058 | else | |||||
1059 | Accesses->erase(AI); | |||||
1060 | AI = Next; | |||||
1061 | } | |||||
1062 | } | |||||
1063 | ||||||
1064 | MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) | |||||
1065 | : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), | |||||
1066 | NextID(0) { | |||||
1067 | buildMemorySSA(); | |||||
1068 | } | |||||
1069 | ||||||
1070 | MemorySSA::~MemorySSA() { | |||||
1071 | // Drop all our references | |||||
1072 | for (const auto &Pair : PerBlockAccesses) | |||||
1073 | for (MemoryAccess &MA : *Pair.second) | |||||
1074 | MA.dropAllReferences(); | |||||
1075 | } | |||||
1076 | ||||||
1077 | MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { | |||||
1078 | auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); | |||||
1079 | ||||||
1080 | if (Res.second) | |||||
1081 | Res.first->second = llvm::make_unique<AccessList>(); | |||||
1082 | return Res.first->second.get(); | |||||
1083 | } | |||||
1084 | ||||||
1085 | MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { | |||||
1086 | auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); | |||||
1087 | ||||||
1088 | if (Res.second) | |||||
1089 | Res.first->second = llvm::make_unique<DefsList>(); | |||||
1090 | return Res.first->second.get(); | |||||
1091 | } | |||||
1092 | ||||||
1093 | namespace llvm { | |||||
1094 | ||||||
1095 | /// This class is a batch walker of all MemoryUse's in the program, and points | |||||
1096 | /// their defining access at the thing that actually clobbers them. Because it | |||||
1097 | /// is a batch walker that touches everything, it does not operate like the | |||||
1098 | /// other walkers. This walker is basically performing a top-down SSA renaming | |||||
1099 | /// pass, where the version stack is used as the cache. This enables it to be | |||||
1100 | /// significantly more time and memory efficient than using the regular walker, | |||||
1101 | /// which is walking bottom-up. | |||||
1102 | class MemorySSA::OptimizeUses { | |||||
1103 | public: | |||||
1104 | OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA, | |||||
1105 | DominatorTree *DT) | |||||
1106 | : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) { | |||||
1107 | Walker = MSSA->getWalker(); | |||||
1108 | } | |||||
1109 | ||||||
1110 | void optimizeUses(); | |||||
1111 | ||||||
1112 | private: | |||||
1113 | /// This represents where a given memorylocation is in the stack. | |||||
1114 | struct MemlocStackInfo { | |||||
1115 | // This essentially is keeping track of versions of the stack. Whenever | |||||
1116 | // the stack changes due to pushes or pops, these versions increase. | |||||
1117 | unsigned long StackEpoch; | |||||
1118 | unsigned long PopEpoch; | |||||
1119 | // This is the lower bound of places on the stack to check. It is equal to | |||||
1120 | // the place the last stack walk ended. | |||||
1121 | // Note: Correctness depends on this being initialized to 0, which densemap | |||||
1122 | // does | |||||
1123 | unsigned long LowerBound; | |||||
1124 | const BasicBlock *LowerBoundBlock; | |||||
1125 | // This is where the last walk for this memory location ended. | |||||
1126 | unsigned long LastKill; | |||||
1127 | bool LastKillValid; | |||||
1128 | Optional<AliasResult> AR; | |||||
1129 | }; | |||||
1130 | ||||||
1131 | void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, | |||||
1132 | SmallVectorImpl<MemoryAccess *> &, | |||||
1133 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &); | |||||
1134 | ||||||
1135 | MemorySSA *MSSA; | |||||
1136 | MemorySSAWalker *Walker; | |||||
1137 | AliasAnalysis *AA; | |||||
1138 | DominatorTree *DT; | |||||
1139 | }; | |||||
1140 | ||||||
1141 | } // end namespace llvm | |||||
1142 | ||||||
1143 | /// Optimize the uses in a given block This is basically the SSA renaming | |||||
1144 | /// algorithm, with one caveat: We are able to use a single stack for all | |||||
1145 | /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is | |||||
1146 | /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just | |||||
1147 | /// going to be some position in that stack of possible ones. | |||||
1148 | /// | |||||
1149 | /// We track the stack positions that each MemoryLocation needs | |||||
1150 | /// to check, and last ended at. This is because we only want to check the | |||||
1151 | /// things that changed since last time. The same MemoryLocation should | |||||
1152 | /// get clobbered by the same store (getModRefInfo does not use invariantness or | |||||
1153 | /// things like this, and if they start, we can modify MemoryLocOrCall to | |||||
1154 | /// include relevant data) | |||||
1155 | void MemorySSA::OptimizeUses::optimizeUsesInBlock( | |||||
1156 | const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, | |||||
1157 | SmallVectorImpl<MemoryAccess *> &VersionStack, | |||||
1158 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { | |||||
1159 | ||||||
1160 | /// If no accesses, nothing to do. | |||||
1161 | MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); | |||||
1162 | if (Accesses == nullptr) | |||||
1163 | return; | |||||
1164 | ||||||
1165 | // Pop everything that doesn't dominate the current block off the stack, | |||||
1166 | // increment the PopEpoch to account for this. | |||||
1167 | while (true) { | |||||
1168 | assert((static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1170, __extension__ __PRETTY_FUNCTION__)) | |||||
1169 | !VersionStack.empty() &&(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1170, __extension__ __PRETTY_FUNCTION__)) | |||||
1170 | "Version stack should have liveOnEntry sentinel dominating everything")(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1170, __extension__ __PRETTY_FUNCTION__)); | |||||
1171 | BasicBlock *BackBlock = VersionStack.back()->getBlock(); | |||||
1172 | if (DT->dominates(BackBlock, BB)) | |||||
1173 | break; | |||||
1174 | while (VersionStack.back()->getBlock() == BackBlock) | |||||
1175 | VersionStack.pop_back(); | |||||
1176 | ++PopEpoch; | |||||
1177 | } | |||||
1178 | ||||||
1179 | for (MemoryAccess &MA : *Accesses) { | |||||
1180 | auto *MU = dyn_cast<MemoryUse>(&MA); | |||||
1181 | if (!MU) { | |||||
1182 | VersionStack.push_back(&MA); | |||||
1183 | ++StackEpoch; | |||||
1184 | continue; | |||||
1185 | } | |||||
1186 | ||||||
1187 | if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { | |||||
1188 | MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None); | |||||
1189 | continue; | |||||
1190 | } | |||||
1191 | ||||||
1192 | MemoryLocOrCall UseMLOC(MU); | |||||
1193 | auto &LocInfo = LocStackInfo[UseMLOC]; | |||||
1194 | // If the pop epoch changed, it means we've removed stuff from top of | |||||
1195 | // stack due to changing blocks. We may have to reset the lower bound or | |||||
1196 | // last kill info. | |||||
1197 | if (LocInfo.PopEpoch != PopEpoch) { | |||||
1198 | LocInfo.PopEpoch = PopEpoch; | |||||
1199 | LocInfo.StackEpoch = StackEpoch; | |||||
1200 | // If the lower bound was in something that no longer dominates us, we | |||||
1201 | // have to reset it. | |||||
1202 | // We can't simply track stack size, because the stack may have had | |||||
1203 | // pushes/pops in the meantime. | |||||
1204 | // XXX: This is non-optimal, but only is slower cases with heavily | |||||
1205 | // branching dominator trees. To get the optimal number of queries would | |||||
1206 | // be to make lowerbound and lastkill a per-loc stack, and pop it until | |||||
1207 | // the top of that stack dominates us. This does not seem worth it ATM. | |||||
1208 | // A much cheaper optimization would be to always explore the deepest | |||||
1209 | // branch of the dominator tree first. This will guarantee this resets on | |||||
1210 | // the smallest set of blocks. | |||||
1211 | if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && | |||||
1212 | !DT->dominates(LocInfo.LowerBoundBlock, BB)) { | |||||
1213 | // Reset the lower bound of things to check. | |||||
1214 | // TODO: Some day we should be able to reset to last kill, rather than | |||||
1215 | // 0. | |||||
1216 | LocInfo.LowerBound = 0; | |||||
1217 | LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); | |||||
1218 | LocInfo.LastKillValid = false; | |||||
1219 | } | |||||
1220 | } else if (LocInfo.StackEpoch != StackEpoch) { | |||||
1221 | // If all that has changed is the StackEpoch, we only have to check the | |||||
1222 | // new things on the stack, because we've checked everything before. In | |||||
1223 | // this case, the lower bound of things to check remains the same. | |||||
1224 | LocInfo.PopEpoch = PopEpoch; | |||||
1225 | LocInfo.StackEpoch = StackEpoch; | |||||
1226 | } | |||||
1227 | if (!LocInfo.LastKillValid) { | |||||
1228 | LocInfo.LastKill = VersionStack.size() - 1; | |||||
1229 | LocInfo.LastKillValid = true; | |||||
1230 | LocInfo.AR = MayAlias; | |||||
1231 | } | |||||
1232 | ||||||
1233 | // At this point, we should have corrected last kill and LowerBound to be | |||||
1234 | // in bounds. | |||||
1235 | assert(LocInfo.LowerBound < VersionStack.size() &&(static_cast <bool> (LocInfo.LowerBound < VersionStack .size() && "Lower bound out of range") ? void (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1236, __extension__ __PRETTY_FUNCTION__)) | |||||
1236 | "Lower bound out of range")(static_cast <bool> (LocInfo.LowerBound < VersionStack .size() && "Lower bound out of range") ? void (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1236, __extension__ __PRETTY_FUNCTION__)); | |||||
1237 | assert(LocInfo.LastKill < VersionStack.size() &&(static_cast <bool> (LocInfo.LastKill < VersionStack .size() && "Last kill info out of range") ? void (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1238, __extension__ __PRETTY_FUNCTION__)) | |||||
1238 | "Last kill info out of range")(static_cast <bool> (LocInfo.LastKill < VersionStack .size() && "Last kill info out of range") ? void (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1238, __extension__ __PRETTY_FUNCTION__)); | |||||
1239 | // In any case, the new upper bound is the top of the stack. | |||||
1240 | unsigned long UpperBound = VersionStack.size() - 1; | |||||
1241 | ||||||
1242 | if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { | |||||
1243 | LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | |||||
1244 | << *(MU->getMemoryInst()) << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | |||||
1245 | << " because there are "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | |||||
1246 | << UpperBound - LocInfo.LowerBounddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) | |||||
1247 | << " stores to disambiguate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false); | |||||
1248 | // Because we did not walk, LastKill is no longer valid, as this may | |||||
1249 | // have been a kill. | |||||
1250 | LocInfo.LastKillValid = false; | |||||
1251 | continue; | |||||
1252 | } | |||||
1253 | bool FoundClobberResult = false; | |||||
1254 | while (UpperBound > LocInfo.LowerBound) { | |||||
1255 | if (isa<MemoryPhi>(VersionStack[UpperBound])) { | |||||
1256 | // For phis, use the walker, see where we ended up, go there | |||||
1257 | Instruction *UseInst = MU->getMemoryInst(); | |||||
1258 | MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst); | |||||
1259 | // We are guaranteed to find it or something is wrong | |||||
1260 | while (VersionStack[UpperBound] != Result) { | |||||
1261 | assert(UpperBound != 0)(static_cast <bool> (UpperBound != 0) ? void (0) : __assert_fail ("UpperBound != 0", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1261, __extension__ __PRETTY_FUNCTION__)); | |||||
1262 | --UpperBound; | |||||
1263 | } | |||||
1264 | FoundClobberResult = true; | |||||
1265 | break; | |||||
1266 | } | |||||
1267 | ||||||
1268 | MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); | |||||
1269 | // If the lifetime of the pointer ends at this instruction, it's live on | |||||
1270 | // entry. | |||||
1271 | if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) { | |||||
1272 | // Reset UpperBound to liveOnEntryDef's place in the stack | |||||
1273 | UpperBound = 0; | |||||
1274 | FoundClobberResult = true; | |||||
1275 | LocInfo.AR = MustAlias; | |||||
1276 | break; | |||||
1277 | } | |||||
1278 | ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA); | |||||
1279 | if (CA.IsClobber) { | |||||
1280 | FoundClobberResult = true; | |||||
1281 | LocInfo.AR = CA.AR; | |||||
1282 | break; | |||||
1283 | } | |||||
1284 | --UpperBound; | |||||
1285 | } | |||||
1286 | ||||||
1287 | // Note: Phis always have AliasResult AR set to MayAlias ATM. | |||||
1288 | ||||||
1289 | // At the end of this loop, UpperBound is either a clobber, or lower bound | |||||
1290 | // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. | |||||
1291 | if (FoundClobberResult || UpperBound < LocInfo.LastKill) { | |||||
1292 | // We were last killed now by where we got to | |||||
1293 | if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound])) | |||||
1294 | LocInfo.AR = None; | |||||
1295 | MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR); | |||||
1296 | LocInfo.LastKill = UpperBound; | |||||
1297 | } else { | |||||
1298 | // Otherwise, we checked all the new ones, and now we know we can get to | |||||
1299 | // LastKill. | |||||
1300 | MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR); | |||||
1301 | } | |||||
1302 | LocInfo.LowerBound = VersionStack.size() - 1; | |||||
1303 | LocInfo.LowerBoundBlock = BB; | |||||
1304 | } | |||||
1305 | } | |||||
1306 | ||||||
1307 | /// Optimize uses to point to their actual clobbering definitions. | |||||
1308 | void MemorySSA::OptimizeUses::optimizeUses() { | |||||
1309 | SmallVector<MemoryAccess *, 16> VersionStack; | |||||
1310 | DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; | |||||
1311 | VersionStack.push_back(MSSA->getLiveOnEntryDef()); | |||||
1312 | ||||||
1313 | unsigned long StackEpoch = 1; | |||||
1314 | unsigned long PopEpoch = 1; | |||||
1315 | // We perform a non-recursive top-down dominator tree walk. | |||||
1316 | for (const auto *DomNode : depth_first(DT->getRootNode())) | |||||
1317 | optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, | |||||
1318 | LocStackInfo); | |||||
1319 | } | |||||
1320 | ||||||
1321 | void MemorySSA::placePHINodes( | |||||
1322 | const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) { | |||||
1323 | // Determine where our MemoryPhi's should go | |||||
1324 | ForwardIDFCalculator IDFs(*DT); | |||||
1325 | IDFs.setDefiningBlocks(DefiningBlocks); | |||||
1326 | SmallVector<BasicBlock *, 32> IDFBlocks; | |||||
1327 | IDFs.calculate(IDFBlocks); | |||||
1328 | ||||||
1329 | // Now place MemoryPhi nodes. | |||||
1330 | for (auto &BB : IDFBlocks) | |||||
1331 | createMemoryPhi(BB); | |||||
1332 | } | |||||
1333 | ||||||
1334 | void MemorySSA::buildMemorySSA() { | |||||
1335 | // We create an access to represent "live on entry", for things like | |||||
1336 | // arguments or users of globals, where the memory they use is defined before | |||||
1337 | // the beginning of the function. We do not actually insert it into the IR. | |||||
1338 | // We do not define a live on exit for the immediate uses, and thus our | |||||
1339 | // semantics do *not* imply that something with no immediate uses can simply | |||||
1340 | // be removed. | |||||
1341 | BasicBlock &StartingPoint = F.getEntryBlock(); | |||||
1342 | LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr, | |||||
1343 | &StartingPoint, NextID++)); | |||||
1344 | ||||||
1345 | // We maintain lists of memory accesses per-block, trading memory for time. We | |||||
1346 | // could just look up the memory access for every possible instruction in the | |||||
1347 | // stream. | |||||
1348 | SmallPtrSet<BasicBlock *, 32> DefiningBlocks; | |||||
1349 | // Go through each block, figure out where defs occur, and chain together all | |||||
1350 | // the accesses. | |||||
1351 | for (BasicBlock &B : F) { | |||||
1352 | bool InsertIntoDef = false; | |||||
1353 | AccessList *Accesses = nullptr; | |||||
1354 | DefsList *Defs = nullptr; | |||||
1355 | for (Instruction &I : B) { | |||||
1356 | MemoryUseOrDef *MUD = createNewAccess(&I); | |||||
1357 | if (!MUD) | |||||
1358 | continue; | |||||
1359 | ||||||
1360 | if (!Accesses) | |||||
1361 | Accesses = getOrCreateAccessList(&B); | |||||
1362 | Accesses->push_back(MUD); | |||||
1363 | if (isa<MemoryDef>(MUD)) { | |||||
1364 | InsertIntoDef = true; | |||||
1365 | if (!Defs) | |||||
1366 | Defs = getOrCreateDefsList(&B); | |||||
1367 | Defs->push_back(*MUD); | |||||
1368 | } | |||||
1369 | } | |||||
1370 | if (InsertIntoDef) | |||||
1371 | DefiningBlocks.insert(&B); | |||||
1372 | } | |||||
1373 | placePHINodes(DefiningBlocks); | |||||
1374 | ||||||
1375 | // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get | |||||
1376 | // filled in with all blocks. | |||||
1377 | SmallPtrSet<BasicBlock *, 16> Visited; | |||||
1378 | renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); | |||||
1379 | ||||||
1380 | CachingWalker *Walker = getWalkerImpl(); | |||||
1381 | ||||||
1382 | OptimizeUses(this, Walker, AA, DT).optimizeUses(); | |||||
1383 | ||||||
1384 | // Mark the uses in unreachable blocks as live on entry, so that they go | |||||
1385 | // somewhere. | |||||
1386 | for (auto &BB : F) | |||||
1387 | if (!Visited.count(&BB)) | |||||
1388 | markUnreachableAsLiveOnEntry(&BB); | |||||
1389 | } | |||||
1390 | ||||||
1391 | MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } | |||||
1392 | ||||||
1393 | MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() { | |||||
1394 | if (Walker) | |||||
1395 | return Walker.get(); | |||||
1396 | ||||||
1397 | Walker = llvm::make_unique<CachingWalker>(this, AA, DT); | |||||
1398 | return Walker.get(); | |||||
1399 | } | |||||
1400 | ||||||
1401 | // This is a helper function used by the creation routines. It places NewAccess | |||||
1402 | // into the access and defs lists for a given basic block, at the given | |||||
1403 | // insertion point. | |||||
1404 | void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, | |||||
1405 | const BasicBlock *BB, | |||||
1406 | InsertionPlace Point) { | |||||
1407 | auto *Accesses = getOrCreateAccessList(BB); | |||||
1408 | if (Point == Beginning) { | |||||
1409 | // If it's a phi node, it goes first, otherwise, it goes after any phi | |||||
1410 | // nodes. | |||||
1411 | if (isa<MemoryPhi>(NewAccess)) { | |||||
1412 | Accesses->push_front(NewAccess); | |||||
1413 | auto *Defs = getOrCreateDefsList(BB); | |||||
1414 | Defs->push_front(*NewAccess); | |||||
1415 | } else { | |||||
1416 | auto AI = find_if_not( | |||||
1417 | *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); | |||||
1418 | Accesses->insert(AI, NewAccess); | |||||
1419 | if (!isa<MemoryUse>(NewAccess)) { | |||||
1420 | auto *Defs = getOrCreateDefsList(BB); | |||||
1421 | auto DI = find_if_not( | |||||
1422 | *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); | |||||
1423 | Defs->insert(DI, *NewAccess); | |||||
1424 | } | |||||
1425 | } | |||||
1426 | } else { | |||||
1427 | Accesses->push_back(NewAccess); | |||||
1428 | if (!isa<MemoryUse>(NewAccess)) { | |||||
1429 | auto *Defs = getOrCreateDefsList(BB); | |||||
1430 | Defs->push_back(*NewAccess); | |||||
1431 | } | |||||
1432 | } | |||||
1433 | BlockNumberingValid.erase(BB); | |||||
1434 | } | |||||
1435 | ||||||
1436 | void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, | |||||
1437 | AccessList::iterator InsertPt) { | |||||
1438 | auto *Accesses = getWritableBlockAccesses(BB); | |||||
1439 | bool WasEnd = InsertPt == Accesses->end(); | |||||
1440 | Accesses->insert(AccessList::iterator(InsertPt), What); | |||||
1441 | if (!isa<MemoryUse>(What)) { | |||||
1442 | auto *Defs = getOrCreateDefsList(BB); | |||||
1443 | // If we got asked to insert at the end, we have an easy job, just shove it | |||||
1444 | // at the end. If we got asked to insert before an existing def, we also get | |||||
1445 | // an iterator. If we got asked to insert before a use, we have to hunt for | |||||
1446 | // the next def. | |||||
1447 | if (WasEnd) { | |||||
1448 | Defs->push_back(*What); | |||||
1449 | } else if (isa<MemoryDef>(InsertPt)) { | |||||
1450 | Defs->insert(InsertPt->getDefsIterator(), *What); | |||||
1451 | } else { | |||||
1452 | while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) | |||||
1453 | ++InsertPt; | |||||
1454 | // Either we found a def, or we are inserting at the end | |||||
1455 | if (InsertPt == Accesses->end()) | |||||
1456 | Defs->push_back(*What); | |||||
1457 | else | |||||
1458 | Defs->insert(InsertPt->getDefsIterator(), *What); | |||||
1459 | } | |||||
1460 | } | |||||
1461 | BlockNumberingValid.erase(BB); | |||||
1462 | } | |||||
1463 | ||||||
1464 | // Move What before Where in the IR. The end result is that What will belong to | |||||
1465 | // the right lists and have the right Block set, but will not otherwise be | |||||
1466 | // correct. It will not have the right defining access, and if it is a def, | |||||
1467 | // things below it will not properly be updated. | |||||
1468 | void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, | |||||
1469 | AccessList::iterator Where) { | |||||
1470 | // Keep it in the lookup tables, remove from the lists | |||||
1471 | removeFromLists(What, false); | |||||
1472 | What->setBlock(BB); | |||||
1473 | insertIntoListsBefore(What, BB, Where); | |||||
1474 | } | |||||
1475 | ||||||
1476 | void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB, | |||||
1477 | InsertionPlace Point) { | |||||
1478 | if (isa<MemoryPhi>(What)) { | |||||
1479 | assert(Point == Beginning &&(static_cast <bool> (Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? void (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1480, __extension__ __PRETTY_FUNCTION__)) | |||||
1480 | "Can only move a Phi at the beginning of the block")(static_cast <bool> (Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? void (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1480, __extension__ __PRETTY_FUNCTION__)); | |||||
1481 | // Update lookup table entry | |||||
1482 | ValueToMemoryAccess.erase(What->getBlock()); | |||||
1483 | bool Inserted = ValueToMemoryAccess.insert({BB, What}).second; | |||||
1484 | (void)Inserted; | |||||
1485 | assert(Inserted && "Cannot move a Phi to a block that already has one")(static_cast <bool> (Inserted && "Cannot move a Phi to a block that already has one" ) ? void (0) : __assert_fail ("Inserted && \"Cannot move a Phi to a block that already has one\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1485, __extension__ __PRETTY_FUNCTION__)); | |||||
1486 | } | |||||
1487 | ||||||
1488 | removeFromLists(What, false); | |||||
1489 | What->setBlock(BB); | |||||
1490 | insertIntoListsForBlock(What, BB, Point); | |||||
1491 | } | |||||
1492 | ||||||
1493 | MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { | |||||
1494 | assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB")(static_cast <bool> (!getMemoryAccess(BB) && "MemoryPhi already exists for this BB" ) ? void (0) : __assert_fail ("!getMemoryAccess(BB) && \"MemoryPhi already exists for this BB\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1494, __extension__ __PRETTY_FUNCTION__)); | |||||
1495 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); | |||||
1496 | // Phi's always are placed at the front of the block. | |||||
1497 | insertIntoListsForBlock(Phi, BB, Beginning); | |||||
1498 | ValueToMemoryAccess[BB] = Phi; | |||||
1499 | return Phi; | |||||
1500 | } | |||||
1501 | ||||||
1502 | MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, | |||||
1503 | MemoryAccess *Definition) { | |||||
1504 | assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI")(static_cast <bool> (!isa<PHINode>(I) && "Cannot create a defined access for a PHI" ) ? void (0) : __assert_fail ("!isa<PHINode>(I) && \"Cannot create a defined access for a PHI\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1504, __extension__ __PRETTY_FUNCTION__)); | |||||
1505 | MemoryUseOrDef *NewAccess = createNewAccess(I); | |||||
1506 | assert((static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction" ) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1508, __extension__ __PRETTY_FUNCTION__)) | |||||
1507 | NewAccess != nullptr &&(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction" ) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1508, __extension__ __PRETTY_FUNCTION__)) | |||||
1508 | "Tried to create a memory access for a non-memory touching instruction")(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction" ) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1508, __extension__ __PRETTY_FUNCTION__)); | |||||
1509 | NewAccess->setDefiningAccess(Definition); | |||||
1510 | return NewAccess; | |||||
1511 | } | |||||
1512 | ||||||
1513 | // Return true if the instruction has ordering constraints. | |||||
1514 | // Note specifically that this only considers stores and loads | |||||
1515 | // because others are still considered ModRef by getModRefInfo. | |||||
1516 | static inline bool isOrdered(const Instruction *I) { | |||||
1517 | if (auto *SI = dyn_cast<StoreInst>(I)) { | |||||
1518 | if (!SI->isUnordered()) | |||||
1519 | return true; | |||||
1520 | } else if (auto *LI = dyn_cast<LoadInst>(I)) { | |||||
1521 | if (!LI->isUnordered()) | |||||
1522 | return true; | |||||
1523 | } | |||||
1524 | return false; | |||||
1525 | } | |||||
1526 | ||||||
1527 | /// Helper function to create new memory accesses | |||||
1528 | MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) { | |||||
1529 | // The assume intrinsic has a control dependency which we model by claiming | |||||
1530 | // that it writes arbitrarily. Ignore that fake memory dependency here. | |||||
1531 | // FIXME: Replace this special casing with a more accurate modelling of | |||||
1532 | // assume's control dependency. | |||||
1533 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) | |||||
1534 | if (II->getIntrinsicID() == Intrinsic::assume) | |||||
1535 | return nullptr; | |||||
1536 | ||||||
1537 | // Find out what affect this instruction has on memory. | |||||
1538 | ModRefInfo ModRef = AA->getModRefInfo(I, None); | |||||
1539 | // The isOrdered check is used to ensure that volatiles end up as defs | |||||
1540 | // (atomics end up as ModRef right now anyway). Until we separate the | |||||
1541 | // ordering chain from the memory chain, this enables people to see at least | |||||
1542 | // some relative ordering to volatiles. Note that getClobberingMemoryAccess | |||||
1543 | // will still give an answer that bypasses other volatile loads. TODO: | |||||
1544 | // Separate memory aliasing and ordering into two different chains so that we | |||||
1545 | // can precisely represent both "what memory will this read/write/is clobbered | |||||
1546 | // by" and "what instructions can I move this past". | |||||
1547 | bool Def = isModSet(ModRef) || isOrdered(I); | |||||
1548 | bool Use = isRefSet(ModRef); | |||||
1549 | ||||||
1550 | // It's possible for an instruction to not modify memory at all. During | |||||
1551 | // construction, we ignore them. | |||||
1552 | if (!Def && !Use) | |||||
1553 | return nullptr; | |||||
1554 | ||||||
1555 | MemoryUseOrDef *MUD; | |||||
1556 | if (Def) | |||||
1557 | MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); | |||||
1558 | else | |||||
1559 | MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); | |||||
1560 | ValueToMemoryAccess[I] = MUD; | |||||
1561 | return MUD; | |||||
1562 | } | |||||
1563 | ||||||
1564 | /// Returns true if \p Replacer dominates \p Replacee . | |||||
1565 | bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, | |||||
1566 | const MemoryAccess *Replacee) const { | |||||
1567 | if (isa<MemoryUseOrDef>(Replacee)) | |||||
1568 | return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); | |||||
1569 | const auto *MP = cast<MemoryPhi>(Replacee); | |||||
1570 | // For a phi node, the use occurs in the predecessor block of the phi node. | |||||
1571 | // Since we may occur multiple times in the phi node, we have to check each | |||||
1572 | // operand to ensure Replacer dominates each operand where Replacee occurs. | |||||
1573 | for (const Use &Arg : MP->operands()) { | |||||
1574 | if (Arg.get() != Replacee && | |||||
1575 | !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) | |||||
1576 | return false; | |||||
1577 | } | |||||
1578 | return true; | |||||
1579 | } | |||||
1580 | ||||||
1581 | /// Properly remove \p MA from all of MemorySSA's lookup tables. | |||||
1582 | void MemorySSA::removeFromLookups(MemoryAccess *MA) { | |||||
1583 | assert(MA->use_empty() &&(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses" ) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1584, __extension__ __PRETTY_FUNCTION__)) | |||||
1584 | "Trying to remove memory access that still has uses")(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses" ) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1584, __extension__ __PRETTY_FUNCTION__)); | |||||
1585 | BlockNumbering.erase(MA); | |||||
1586 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | |||||
1587 | MUD->setDefiningAccess(nullptr); | |||||
1588 | // Invalidate our walker's cache if necessary | |||||
1589 | if (!isa<MemoryUse>(MA)) | |||||
1590 | Walker->invalidateInfo(MA); | |||||
1591 | ||||||
1592 | Value *MemoryInst; | |||||
1593 | if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | |||||
1594 | MemoryInst = MUD->getMemoryInst(); | |||||
1595 | else | |||||
1596 | MemoryInst = MA->getBlock(); | |||||
1597 | ||||||
1598 | auto VMA = ValueToMemoryAccess.find(MemoryInst); | |||||
1599 | if (VMA->second == MA) | |||||
1600 | ValueToMemoryAccess.erase(VMA); | |||||
1601 | } | |||||
1602 | ||||||
1603 | /// Properly remove \p MA from all of MemorySSA's lists. | |||||
1604 | /// | |||||
1605 | /// Because of the way the intrusive list and use lists work, it is important to | |||||
1606 | /// do removal in the right order. | |||||
1607 | /// ShouldDelete defaults to true, and will cause the memory access to also be | |||||
1608 | /// deleted, not just removed. | |||||
1609 | void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { | |||||
1610 | BasicBlock *BB = MA->getBlock(); | |||||
1611 | // The access list owns the reference, so we erase it from the non-owning list | |||||
1612 | // first. | |||||
1613 | if (!isa<MemoryUse>(MA)) { | |||||
1614 | auto DefsIt = PerBlockDefs.find(BB); | |||||
1615 | std::unique_ptr<DefsList> &Defs = DefsIt->second; | |||||
1616 | Defs->remove(*MA); | |||||
1617 | if (Defs->empty()) | |||||
1618 | PerBlockDefs.erase(DefsIt); | |||||
1619 | } | |||||
1620 | ||||||
1621 | // The erase call here will delete it. If we don't want it deleted, we call | |||||
1622 | // remove instead. | |||||
1623 | auto AccessIt = PerBlockAccesses.find(BB); | |||||
1624 | std::unique_ptr<AccessList> &Accesses = AccessIt->second; | |||||
1625 | if (ShouldDelete) | |||||
1626 | Accesses->erase(MA); | |||||
1627 | else | |||||
1628 | Accesses->remove(MA); | |||||
1629 | ||||||
1630 | if (Accesses->empty()) { | |||||
1631 | PerBlockAccesses.erase(AccessIt); | |||||
1632 | BlockNumberingValid.erase(BB); | |||||
1633 | } | |||||
1634 | } | |||||
1635 | ||||||
1636 | void MemorySSA::print(raw_ostream &OS) const { | |||||
1637 | MemorySSAAnnotatedWriter Writer(this); | |||||
1638 | F.print(OS, &Writer); | |||||
1639 | } | |||||
1640 | ||||||
1641 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |||||
1642 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MemorySSA::dump() const { print(dbgs()); } | |||||
1643 | #endif | |||||
1644 | ||||||
1645 | void MemorySSA::verifyMemorySSA() const { | |||||
1646 | verifyDefUses(F); | |||||
1647 | verifyDomination(F); | |||||
1648 | verifyOrdering(F); | |||||
1649 | verifyDominationNumbers(F); | |||||
1650 | Walker->verify(this); | |||||
1651 | } | |||||
1652 | ||||||
1653 | /// Verify that all of the blocks we believe to have valid domination numbers | |||||
1654 | /// actually have valid domination numbers. | |||||
1655 | void MemorySSA::verifyDominationNumbers(const Function &F) const { | |||||
1656 | #ifndef NDEBUG | |||||
1657 | if (BlockNumberingValid.empty()) | |||||
1658 | return; | |||||
1659 | ||||||
1660 | SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid; | |||||
1661 | for (const BasicBlock &BB : F) { | |||||
1662 | if (!ValidBlocks.count(&BB)) | |||||
1663 | continue; | |||||
1664 | ||||||
1665 | ValidBlocks.erase(&BB); | |||||
1666 | ||||||
1667 | const AccessList *Accesses = getBlockAccesses(&BB); | |||||
1668 | // It's correct to say an empty block has valid numbering. | |||||
1669 | if (!Accesses) | |||||
1670 | continue; | |||||
1671 | ||||||
1672 | // Block numbering starts at 1. | |||||
1673 | unsigned long LastNumber = 0; | |||||
1674 | for (const MemoryAccess &MA : *Accesses) { | |||||
1675 | auto ThisNumberIter = BlockNumbering.find(&MA); | |||||
1676 | assert(ThisNumberIter != BlockNumbering.end() &&(static_cast <bool> (ThisNumberIter != BlockNumbering.end () && "MemoryAccess has no domination number in a valid block!" ) ? void (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1677, __extension__ __PRETTY_FUNCTION__)) | |||||
1677 | "MemoryAccess has no domination number in a valid block!")(static_cast <bool> (ThisNumberIter != BlockNumbering.end () && "MemoryAccess has no domination number in a valid block!" ) ? void (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1677, __extension__ __PRETTY_FUNCTION__)); | |||||
1678 | ||||||
1679 | unsigned long ThisNumber = ThisNumberIter->second; | |||||
1680 | assert(ThisNumber > LastNumber &&(static_cast <bool> (ThisNumber > LastNumber && "Domination numbers should be strictly increasing!") ? void ( 0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1681, __extension__ __PRETTY_FUNCTION__)) | |||||
1681 | "Domination numbers should be strictly increasing!")(static_cast <bool> (ThisNumber > LastNumber && "Domination numbers should be strictly increasing!") ? void ( 0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1681, __extension__ __PRETTY_FUNCTION__)); | |||||
1682 | LastNumber = ThisNumber; | |||||
1683 | } | |||||
1684 | } | |||||
1685 | ||||||
1686 | assert(ValidBlocks.empty() &&(static_cast <bool> (ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? void (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1687, __extension__ __PRETTY_FUNCTION__)) | |||||
1687 | "All valid BasicBlocks should exist in F -- dangling pointers?")(static_cast <bool> (ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? void (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1687, __extension__ __PRETTY_FUNCTION__)); | |||||
1688 | #endif | |||||
1689 | } | |||||
1690 | ||||||
1691 | /// Verify that the order and existence of MemoryAccesses matches the | |||||
1692 | /// order and existence of memory affecting instructions. | |||||
1693 | void MemorySSA::verifyOrdering(Function &F) const { | |||||
1694 | // Walk all the blocks, comparing what the lookups think and what the access | |||||
1695 | // lists think, as well as the order in the blocks vs the order in the access | |||||
1696 | // lists. | |||||
1697 | SmallVector<MemoryAccess *, 32> ActualAccesses; | |||||
1698 | SmallVector<MemoryAccess *, 32> ActualDefs; | |||||
1699 | for (BasicBlock &B : F) { | |||||
1700 | const AccessList *AL = getBlockAccesses(&B); | |||||
1701 | const auto *DL = getBlockDefs(&B); | |||||
1702 | MemoryAccess *Phi = getMemoryAccess(&B); | |||||
1703 | if (Phi) { | |||||
1704 | ActualAccesses.push_back(Phi); | |||||
1705 | ActualDefs.push_back(Phi); | |||||
1706 | } | |||||
1707 | ||||||
1708 | for (Instruction &I : B) { | |||||
1709 | MemoryAccess *MA = getMemoryAccess(&I); | |||||
1710 | assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1713, __extension__ __PRETTY_FUNCTION__)) | |||||
1711 | "We have memory affecting instructions "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1713, __extension__ __PRETTY_FUNCTION__)) | |||||
1712 | "in this block but they are not in the "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1713, __extension__ __PRETTY_FUNCTION__)) | |||||
1713 | "access list or defs list")(static_cast <bool> ((!MA || (AL && (isa<MemoryUse >(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list" ) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1713, __extension__ __PRETTY_FUNCTION__)); | |||||
1714 | if (MA) { | |||||
1715 | ActualAccesses.push_back(MA); | |||||
1716 | if (isa<MemoryDef>(MA)) | |||||
1717 | ActualDefs.push_back(MA); | |||||
1718 | } | |||||
1719 | } | |||||
1720 | // Either we hit the assert, really have no accesses, or we have both | |||||
1721 | // accesses and an access list. | |||||
1722 | // Same with defs. | |||||
1723 | if (!AL && !DL) | |||||
1724 | continue; | |||||
1725 | assert(AL->size() == ActualAccesses.size() &&(static_cast <bool> (AL->size() == ActualAccesses.size () && "We don't have the same number of accesses in the block as on the " "access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1727, __extension__ __PRETTY_FUNCTION__)) | |||||
| ||||||
1726 | "We don't have the same number of accesses in the block as on the "(static_cast <bool> (AL->size() == ActualAccesses.size () && "We don't have the same number of accesses in the block as on the " "access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1727, __extension__ __PRETTY_FUNCTION__)) | |||||
1727 | "access list")(static_cast <bool> (AL->size() == ActualAccesses.size () && "We don't have the same number of accesses in the block as on the " "access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1727, __extension__ __PRETTY_FUNCTION__)); | |||||
1728 | assert((DL || ActualDefs.size() == 0) &&(static_cast <bool> ((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1729, __extension__ __PRETTY_FUNCTION__)) | |||||
1729 | "Either we should have a defs list, or we should have no defs")(static_cast <bool> ((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1729, __extension__ __PRETTY_FUNCTION__)); | |||||
1730 | assert((!DL || DL->size() == ActualDefs.size()) &&(static_cast <bool> ((!DL || DL->size() == ActualDefs .size()) && "We don't have the same number of defs in the block as on the " "def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1732, __extension__ __PRETTY_FUNCTION__)) | |||||
1731 | "We don't have the same number of defs in the block as on the "(static_cast <bool> ((!DL || DL->size() == ActualDefs .size()) && "We don't have the same number of defs in the block as on the " "def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1732, __extension__ __PRETTY_FUNCTION__)) | |||||
1732 | "def list")(static_cast <bool> ((!DL || DL->size() == ActualDefs .size()) && "We don't have the same number of defs in the block as on the " "def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1732, __extension__ __PRETTY_FUNCTION__)); | |||||
1733 | auto ALI = AL->begin(); | |||||
1734 | auto AAI = ActualAccesses.begin(); | |||||
1735 | while (ALI != AL->end() && AAI != ActualAccesses.end()) { | |||||
1736 | assert(&*ALI == *AAI && "Not the same accesses in the same order")(static_cast <bool> (&*ALI == *AAI && "Not the same accesses in the same order" ) ? void (0) : __assert_fail ("&*ALI == *AAI && \"Not the same accesses in the same order\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1736, __extension__ __PRETTY_FUNCTION__)); | |||||
1737 | ++ALI; | |||||
1738 | ++AAI; | |||||
1739 | } | |||||
1740 | ActualAccesses.clear(); | |||||
1741 | if (DL) { | |||||
1742 | auto DLI = DL->begin(); | |||||
1743 | auto ADI = ActualDefs.begin(); | |||||
1744 | while (DLI != DL->end() && ADI != ActualDefs.end()) { | |||||
1745 | assert(&*DLI == *ADI && "Not the same defs in the same order")(static_cast <bool> (&*DLI == *ADI && "Not the same defs in the same order" ) ? void (0) : __assert_fail ("&*DLI == *ADI && \"Not the same defs in the same order\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1745, __extension__ __PRETTY_FUNCTION__)); | |||||
1746 | ++DLI; | |||||
1747 | ++ADI; | |||||
1748 | } | |||||
1749 | } | |||||
1750 | ActualDefs.clear(); | |||||
1751 | } | |||||
1752 | } | |||||
1753 | ||||||
1754 | /// Verify the domination properties of MemorySSA by checking that each | |||||
1755 | /// definition dominates all of its uses. | |||||
1756 | void MemorySSA::verifyDomination(Function &F) const { | |||||
1757 | #ifndef NDEBUG | |||||
1758 | for (BasicBlock &B : F) { | |||||
1759 | // Phi nodes are attached to basic blocks | |||||
1760 | if (MemoryPhi *MP = getMemoryAccess(&B)) | |||||
1761 | for (const Use &U : MP->uses()) | |||||
1762 | assert(dominates(MP, U) && "Memory PHI does not dominate it's uses")(static_cast <bool> (dominates(MP, U) && "Memory PHI does not dominate it's uses" ) ? void (0) : __assert_fail ("dominates(MP, U) && \"Memory PHI does not dominate it's uses\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1762, __extension__ __PRETTY_FUNCTION__)); | |||||
1763 | ||||||
1764 | for (Instruction &I : B) { | |||||
1765 | MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I)); | |||||
1766 | if (!MD) | |||||
1767 | continue; | |||||
1768 | ||||||
1769 | for (const Use &U : MD->uses()) | |||||
1770 | assert(dominates(MD, U) && "Memory Def does not dominate it's uses")(static_cast <bool> (dominates(MD, U) && "Memory Def does not dominate it's uses" ) ? void (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1770, __extension__ __PRETTY_FUNCTION__)); | |||||
1771 | } | |||||
1772 | } | |||||
1773 | #endif | |||||
1774 | } | |||||
1775 | ||||||
1776 | /// Verify the def-use lists in MemorySSA, by verifying that \p Use | |||||
1777 | /// appears in the use list of \p Def. | |||||
1778 | void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { | |||||
1779 | #ifndef NDEBUG | |||||
1780 | // The live on entry use may cause us to get a NULL def here | |||||
1781 | if (!Def) | |||||
1782 | assert(isLiveOnEntryDef(Use) &&(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1783, __extension__ __PRETTY_FUNCTION__)) | |||||
1783 | "Null def but use not point to live on entry def")(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1783, __extension__ __PRETTY_FUNCTION__)); | |||||
1784 | else | |||||
1785 | assert(is_contained(Def->users(), Use) &&(static_cast <bool> (is_contained(Def->users(), Use) && "Did not find use in def's use list") ? void (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1786, __extension__ __PRETTY_FUNCTION__)) | |||||
1786 | "Did not find use in def's use list")(static_cast <bool> (is_contained(Def->users(), Use) && "Did not find use in def's use list") ? void (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1786, __extension__ __PRETTY_FUNCTION__)); | |||||
1787 | #endif | |||||
1788 | } | |||||
1789 | ||||||
1790 | /// Verify the immediate use information, by walking all the memory | |||||
1791 | /// accesses and verifying that, for each use, it appears in the | |||||
1792 | /// appropriate def's use list | |||||
1793 | void MemorySSA::verifyDefUses(Function &F) const { | |||||
1794 | for (BasicBlock &B : F) { | |||||
1795 | // Phi nodes are attached to basic blocks | |||||
1796 | if (MemoryPhi *Phi = getMemoryAccess(&B)) { | |||||
1797 | assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance((static_cast <bool> (Phi->getNumOperands() == static_cast <unsigned>(std::distance( pred_begin(&B), pred_end( &B))) && "Incomplete MemoryPhi Node") ? void (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1799, __extension__ __PRETTY_FUNCTION__)) | |||||
1798 | pred_begin(&B), pred_end(&B))) &&(static_cast <bool> (Phi->getNumOperands() == static_cast <unsigned>(std::distance( pred_begin(&B), pred_end( &B))) && "Incomplete MemoryPhi Node") ? void (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1799, __extension__ __PRETTY_FUNCTION__)) | |||||
1799 | "Incomplete MemoryPhi Node")(static_cast <bool> (Phi->getNumOperands() == static_cast <unsigned>(std::distance( pred_begin(&B), pred_end( &B))) && "Incomplete MemoryPhi Node") ? void (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1799, __extension__ __PRETTY_FUNCTION__)); | |||||
1800 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { | |||||
1801 | verifyUseInDefs(Phi->getIncomingValue(I), Phi); | |||||
1802 | assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=(static_cast <bool> (find(predecessors(&B), Phi-> getIncomingBlock(I)) != pred_end(&B) && "Incoming phi block not a block predecessor" ) ? void (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1804, __extension__ __PRETTY_FUNCTION__)) | |||||
1803 | pred_end(&B) &&(static_cast <bool> (find(predecessors(&B), Phi-> getIncomingBlock(I)) != pred_end(&B) && "Incoming phi block not a block predecessor" ) ? void (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1804, __extension__ __PRETTY_FUNCTION__)) | |||||
1804 | "Incoming phi block not a block predecessor")(static_cast <bool> (find(predecessors(&B), Phi-> getIncomingBlock(I)) != pred_end(&B) && "Incoming phi block not a block predecessor" ) ? void (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1804, __extension__ __PRETTY_FUNCTION__)); | |||||
1805 | } | |||||
1806 | } | |||||
1807 | ||||||
1808 | for (Instruction &I : B) { | |||||
1809 | if (MemoryUseOrDef *MA = getMemoryAccess(&I)) { | |||||
1810 | verifyUseInDefs(MA->getDefiningAccess(), MA); | |||||
1811 | } | |||||
1812 | } | |||||
1813 | } | |||||
1814 | } | |||||
1815 | ||||||
1816 | MemoryUseOrDef *MemorySSA::getMemoryAccess(const Instruction *I) const { | |||||
1817 | return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); | |||||
1818 | } | |||||
1819 | ||||||
1820 | MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const { | |||||
1821 | return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); | |||||
1822 | } | |||||
1823 | ||||||
1824 | /// Perform a local numbering on blocks so that instruction ordering can be | |||||
1825 | /// determined in constant time. | |||||
1826 | /// TODO: We currently just number in order. If we numbered by N, we could | |||||
1827 | /// allow at least N-1 sequences of insertBefore or insertAfter (and at least | |||||
1828 | /// log2(N) sequences of mixed before and after) without needing to invalidate | |||||
1829 | /// the numbering. | |||||
1830 | void MemorySSA::renumberBlock(const BasicBlock *B) const { | |||||
1831 | // The pre-increment ensures the numbers really start at 1. | |||||
1832 | unsigned long CurrentNumber = 0; | |||||
1833 | const AccessList *AL = getBlockAccesses(B); | |||||
1834 | assert(AL != nullptr && "Asking to renumber an empty block")(static_cast <bool> (AL != nullptr && "Asking to renumber an empty block" ) ? void (0) : __assert_fail ("AL != nullptr && \"Asking to renumber an empty block\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1834, __extension__ __PRETTY_FUNCTION__)); | |||||
1835 | for (const auto &I : *AL) | |||||
1836 | BlockNumbering[&I] = ++CurrentNumber; | |||||
1837 | BlockNumberingValid.insert(B); | |||||
1838 | } | |||||
1839 | ||||||
1840 | /// Determine, for two memory accesses in the same block, | |||||
1841 | /// whether \p Dominator dominates \p Dominatee. | |||||
1842 | /// \returns True if \p Dominator dominates \p Dominatee. | |||||
1843 | bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, | |||||
1844 | const MemoryAccess *Dominatee) const { | |||||
1845 | const BasicBlock *DominatorBlock = Dominator->getBlock(); | |||||
1846 | ||||||
1847 | assert((DominatorBlock == Dominatee->getBlock()) &&(static_cast <bool> ((DominatorBlock == Dominatee->getBlock ()) && "Asking for local domination when accesses are in different blocks!" ) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1848, __extension__ __PRETTY_FUNCTION__)) | |||||
1848 | "Asking for local domination when accesses are in different blocks!")(static_cast <bool> ((DominatorBlock == Dominatee->getBlock ()) && "Asking for local domination when accesses are in different blocks!" ) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1848, __extension__ __PRETTY_FUNCTION__)); | |||||
1849 | // A node dominates itself. | |||||
1850 | if (Dominatee == Dominator) | |||||
1851 | return true; | |||||
1852 | ||||||
1853 | // When Dominatee is defined on function entry, it is not dominated by another | |||||
1854 | // memory access. | |||||
1855 | if (isLiveOnEntryDef(Dominatee)) | |||||
1856 | return false; | |||||
1857 | ||||||
1858 | // When Dominator is defined on function entry, it dominates the other memory | |||||
1859 | // access. | |||||
1860 | if (isLiveOnEntryDef(Dominator)) | |||||
1861 | return true; | |||||
1862 | ||||||
1863 | if (!BlockNumberingValid.count(DominatorBlock)) | |||||
1864 | renumberBlock(DominatorBlock); | |||||
1865 | ||||||
1866 | unsigned long DominatorNum = BlockNumbering.lookup(Dominator); | |||||
1867 | // All numbers start with 1 | |||||
1868 | assert(DominatorNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominatorNum != 0 && "Block was not numbered properly" ) ? void (0) : __assert_fail ("DominatorNum != 0 && \"Block was not numbered properly\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1868, __extension__ __PRETTY_FUNCTION__)); | |||||
1869 | unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); | |||||
1870 | assert(DominateeNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominateeNum != 0 && "Block was not numbered properly" ) ? void (0) : __assert_fail ("DominateeNum != 0 && \"Block was not numbered properly\"" , "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1870, __extension__ __PRETTY_FUNCTION__)); | |||||
1871 | return DominatorNum < DominateeNum; | |||||
1872 | } | |||||
1873 | ||||||
1874 | bool MemorySSA::dominates(const MemoryAccess *Dominator, | |||||
1875 | const MemoryAccess *Dominatee) const { | |||||
1876 | if (Dominator == Dominatee) | |||||
1877 | return true; | |||||
1878 | ||||||
1879 | if (isLiveOnEntryDef(Dominatee)) | |||||
1880 | return false; | |||||
1881 | ||||||
1882 | if (Dominator->getBlock() != Dominatee->getBlock()) | |||||
1883 | return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); | |||||
1884 | return locallyDominates(Dominator, Dominatee); | |||||
1885 | } | |||||
1886 | ||||||
1887 | bool MemorySSA::dominates(const MemoryAccess *Dominator, | |||||
1888 | const Use &Dominatee) const { | |||||
1889 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { | |||||
1890 | BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); | |||||
1891 | // The def must dominate the incoming block of the phi. | |||||
1892 | if (UseBB != Dominator->getBlock()) | |||||
1893 | return DT->dominates(Dominator->getBlock(), UseBB); | |||||
1894 | // If the UseBB and the DefBB are the same, compare locally. | |||||
1895 | return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); | |||||
1896 | } | |||||
1897 | // If it's not a PHI node use, the normal dominates can already handle it. | |||||
1898 | return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); | |||||
1899 | } | |||||
1900 | ||||||
1901 | const static char LiveOnEntryStr[] = "liveOnEntry"; | |||||
1902 | ||||||
1903 | void MemoryAccess::print(raw_ostream &OS) const { | |||||
1904 | switch (getValueID()) { | |||||
1905 | case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); | |||||
1906 | case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); | |||||
1907 | case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); | |||||
1908 | } | |||||
1909 | llvm_unreachable("invalid value id")::llvm::llvm_unreachable_internal("invalid value id", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/MemorySSA.cpp" , 1909); | |||||
1910 | } | |||||
1911 | ||||||
1912 | void MemoryDef::print(raw_ostream &OS) const { | |||||
1913 | MemoryAccess *UO = getDefiningAccess(); | |||||
1914 | ||||||
1915 | auto printID = [&OS](MemoryAccess *A) { | |||||
1916 | if (A && A->getID()) | |||||
1917 | OS << A->getID(); | |||||
1918 | else | |||||
1919 | OS << LiveOnEntryStr; | |||||
1920 | }; | |||||
1921 | ||||||
1922 | OS << getID() << " = MemoryDef("; | |||||
1923 | printID(UO); | |||||
1924 | OS << ")"; | |||||
1925 | ||||||
1926 | if (isOptimized()) { | |||||
1927 | OS << "->"; | |||||
1928 | printID(getOptimized()); | |||||
1929 | ||||||
1930 | if (Optional<AliasResult> AR = getOptimizedAccessType()) | |||||
1931 | OS << " " << *AR; | |||||
1932 | } | |||||
1933 | } | |||||
1934 | ||||||
1935 | void MemoryPhi::print(raw_ostream &OS) const { | |||||
1936 | bool First = true; | |||||
1937 | OS << getID() << " = MemoryPhi("; | |||||
1938 | for (const auto &Op : operands()) { | |||||
1939 | BasicBlock *BB = getIncomingBlock(Op); | |||||
1940 | MemoryAccess *MA = cast<MemoryAccess>(Op); | |||||
1941 | if (!First) | |||||
1942 | OS << ','; | |||||
1943 | else | |||||
1944 | First = false; | |||||
1945 | ||||||
1946 | OS << '{'; | |||||
1947 | if (BB->hasName()) | |||||
1948 | OS << BB->getName(); | |||||
1949 | else | |||||
1950 | BB->printAsOperand(OS, false); | |||||
1951 | OS << ','; | |||||
1952 | if (unsigned ID = MA->getID()) | |||||
1953 | OS << ID; | |||||
1954 | else | |||||
1955 | OS << LiveOnEntryStr; | |||||
1956 | OS << '}'; | |||||
1957 | } | |||||
1958 | OS << ')'; | |||||
1959 | } | |||||
1960 | ||||||
1961 | void MemoryUse::print(raw_ostream &OS) const { | |||||
1962 | MemoryAccess *UO = getDefiningAccess(); | |||||
1963 | OS << "MemoryUse("; | |||||
1964 | if (UO && UO->getID()) | |||||
1965 | OS << UO->getID(); | |||||
1966 | else | |||||
1967 | OS << LiveOnEntryStr; | |||||
1968 | OS << ')'; | |||||
1969 | ||||||
1970 | if (Optional<AliasResult> AR = getOptimizedAccessType()) | |||||
1971 | OS << " " << *AR; | |||||
1972 | } | |||||
1973 | ||||||
1974 | void MemoryAccess::dump() const { | |||||
1975 | // Cannot completely remove virtual function even in release mode. | |||||
1976 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |||||
1977 | print(dbgs()); | |||||
1978 | dbgs() << "\n"; | |||||
1979 | #endif | |||||
1980 | } | |||||
1981 | ||||||
1982 | char MemorySSAPrinterLegacyPass::ID = 0; | |||||
1983 | ||||||
1984 | MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { | |||||
1985 | initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); | |||||
1986 | } | |||||
1987 | ||||||
1988 | void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { | |||||
1989 | AU.setPreservesAll(); | |||||
1990 | AU.addRequired<MemorySSAWrapperPass>(); | |||||
1991 | } | |||||
1992 | ||||||
1993 | bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { | |||||
1994 | auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); | |||||
1995 | MSSA.print(dbgs()); | |||||
1996 | if (VerifyMemorySSA) | |||||
1997 | MSSA.verifyMemorySSA(); | |||||
1998 | return false; | |||||
1999 | } | |||||
2000 | ||||||
2001 | AnalysisKey MemorySSAAnalysis::Key; | |||||
2002 | ||||||
2003 | MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, | |||||
2004 | FunctionAnalysisManager &AM) { | |||||
2005 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); | |||||
2006 | auto &AA = AM.getResult<AAManager>(F); | |||||
2007 | return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT)); | |||||
2008 | } | |||||
2009 | ||||||
2010 | PreservedAnalyses MemorySSAPrinterPass::run(Function &F, | |||||
2011 | FunctionAnalysisManager &AM) { | |||||
2012 | OS << "MemorySSA for function: " << F.getName() << "\n"; | |||||
2013 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS); | |||||
2014 | ||||||
2015 | return PreservedAnalyses::all(); | |||||
2016 | } | |||||
2017 | ||||||
2018 | PreservedAnalyses MemorySSAVerifierPass::run(Function &F, | |||||
2019 | FunctionAnalysisManager &AM) { | |||||
2020 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); | |||||
2021 | ||||||
2022 | return PreservedAnalyses::all(); | |||||
2023 | } | |||||
2024 | ||||||
2025 | char MemorySSAWrapperPass::ID = 0; | |||||
2026 | ||||||
2027 | MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { | |||||
2028 | initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); | |||||
2029 | } | |||||
2030 | ||||||
2031 | void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } | |||||
2032 | ||||||
2033 | void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { | |||||
2034 | AU.setPreservesAll(); | |||||
2035 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); | |||||
2036 | AU.addRequiredTransitive<AAResultsWrapperPass>(); | |||||
2037 | } | |||||
2038 | ||||||
2039 | bool MemorySSAWrapperPass::runOnFunction(Function &F) { | |||||
2040 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | |||||
2041 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); | |||||
2042 | MSSA.reset(new MemorySSA(F, &AA, &DT)); | |||||
2043 | return false; | |||||
2044 | } | |||||
2045 | ||||||
2046 | void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); } | |||||
| ||||||
2047 | ||||||
2048 | void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { | |||||
2049 | MSSA->print(OS); | |||||
2050 | } | |||||
2051 | ||||||
2052 | MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} | |||||
2053 | ||||||
2054 | MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A, | |||||
2055 | DominatorTree *D) | |||||
2056 | : MemorySSAWalker(M), Walker(*M, *A, *D) {} | |||||
2057 | ||||||
2058 | void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) { | |||||
2059 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) | |||||
2060 | MUD->resetOptimized(); | |||||
2061 | } | |||||
2062 | ||||||
2063 | /// Walk the use-def chains starting at \p MA and find | |||||
2064 | /// the MemoryAccess that actually clobbers Loc. | |||||
2065 | /// | |||||
2066 | /// \returns our clobbering memory access | |||||
2067 | MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( | |||||
2068 | MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) { | |||||
2069 | return Walker.findClobber(StartingAccess, Q); | |||||
2070 | } | |||||
2071 | ||||||
2072 | MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( | |||||
2073 | MemoryAccess *StartingAccess, const MemoryLocation &Loc) { | |||||
2074 | if (isa<MemoryPhi>(StartingAccess)) | |||||
2075 | return StartingAccess; | |||||
2076 | ||||||
2077 | auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); | |||||
2078 | if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) | |||||
2079 | return StartingUseOrDef; | |||||
2080 | ||||||
2081 | Instruction *I = StartingUseOrDef->getMemoryInst(); | |||||
2082 | ||||||
2083 | // Conservatively, fences are always clobbers, so don't perform the walk if we | |||||
2084 | // hit a fence. | |||||
2085 | if (!ImmutableCallSite(I) && I->isFenceLike()) | |||||
2086 | return StartingUseOrDef; | |||||
2087 | ||||||
2088 | UpwardsMemoryQuery Q; | |||||
2089 | Q.OriginalAccess = StartingUseOrDef; | |||||
2090 | Q.StartingLoc = Loc; | |||||
2091 | Q.Inst = I; | |||||
2092 | Q.IsCall = false; | |||||
2093 | ||||||
2094 | // Unlike the other function, do not walk to the def of a def, because we are | |||||
2095 | // handed something we already believe is the clobbering access. | |||||
2096 | MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) | |||||
2097 | ? StartingUseOrDef->getDefiningAccess() | |||||
2098 | : StartingUseOrDef; | |||||
2099 | ||||||
2100 | MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q); | |||||
2101 | LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (false); | |||||
2102 | LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *StartingUseOrDef << "\n" ; } } while (false); | |||||
2103 | LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Final Memory SSA clobber for " << *I << " is "; } } while (false); | |||||
2104 | LLVM_DEBUG(dbgs() << *Clobber << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *Clobber << "\n"; } } while (false); | |||||
2105 | return Clobber; | |||||
2106 | } | |||||
2107 | ||||||
2108 | MemoryAccess * | |||||
2109 | MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) { | |||||
2110 | auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); | |||||
2111 | // If this is a MemoryPhi, we can't do anything. | |||||
2112 | if (!StartingAccess) | |||||
2113 | return MA; | |||||
2114 | ||||||
2115 | // If this is an already optimized use or def, return the optimized result. | |||||
2116 | // Note: Currently, we store the optimized def result in a separate field, | |||||
2117 | // since we can't use the defining access. | |||||
2118 | if (StartingAccess->isOptimized()) | |||||
2119 | return StartingAccess->getOptimized(); | |||||
2120 | ||||||
2121 | const Instruction *I = StartingAccess->getMemoryInst(); | |||||
2122 | UpwardsMemoryQuery Q(I, StartingAccess); | |||||
2123 | // We can't sanely do anything with a fence, since they conservatively clobber | |||||
2124 | // all memory, and have no locations to get pointers from to try to | |||||
2125 | // disambiguate. | |||||
2126 | if (!Q.IsCall && I->isFenceLike()) | |||||
2127 | return StartingAccess; | |||||
2128 | ||||||
2129 | if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) { | |||||
2130 | MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); | |||||
2131 | StartingAccess->setOptimized(LiveOnEntry); | |||||
2132 | StartingAccess->setOptimizedAccessType(None); | |||||
2133 | return LiveOnEntry; | |||||
2134 | } | |||||
2135 | ||||||
2136 | // Start with the thing we already think clobbers this location | |||||
2137 | MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); | |||||
2138 | ||||||
2139 | // At this point, DefiningAccess may be the live on entry def. | |||||
2140 | // If it is, we will not get a better result. | |||||
2141 | if (MSSA->isLiveOnEntryDef(DefiningAccess)) { | |||||
2142 | StartingAccess->setOptimized(DefiningAccess); | |||||
2143 | StartingAccess->setOptimizedAccessType(None); | |||||
2144 | return DefiningAccess; | |||||
2145 | } | |||||
2146 | ||||||
2147 | MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q); | |||||
2148 | LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (false); | |||||
2149 | LLVM_DEBUG(dbgs() << *DefiningAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *DefiningAccess << "\n" ; } } while (false); | |||||
2150 | LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Final Memory SSA clobber for " << *I << " is "; } } while (false); | |||||
2151 | LLVM_DEBUG(dbgs() << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *Result << "\n"; } } while (false); | |||||
2152 | ||||||
2153 | StartingAccess->setOptimized(Result); | |||||
2154 | if (MSSA->isLiveOnEntryDef(Result)) | |||||
2155 | StartingAccess->setOptimizedAccessType(None); | |||||
2156 | else if (Q.AR == MustAlias) | |||||
2157 | StartingAccess->setOptimizedAccessType(MustAlias); | |||||
2158 | ||||||
2159 | return Result; | |||||
2160 | } | |||||
2161 | ||||||
2162 | MemoryAccess * | |||||
2163 | DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { | |||||
2164 | if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) | |||||
2165 | return Use->getDefiningAccess(); | |||||
2166 | return MA; | |||||
2167 | } | |||||
2168 | ||||||
2169 | MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( | |||||
2170 | MemoryAccess *StartingAccess, const MemoryLocation &) { | |||||
2171 | if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) | |||||
2172 | return Use->getDefiningAccess(); | |||||
2173 | return StartingAccess; | |||||
2174 | } | |||||
2175 | ||||||
2176 | void MemoryPhi::deleteMe(DerivedUser *Self) { | |||||
2177 | delete static_cast<MemoryPhi *>(Self); | |||||
2178 | } | |||||
2179 | ||||||
2180 | void MemoryDef::deleteMe(DerivedUser *Self) { | |||||
2181 | delete static_cast<MemoryDef *>(Self); | |||||
2182 | } | |||||
2183 | ||||||
2184 | void MemoryUse::deleteMe(DerivedUser *Self) { | |||||
2185 | delete static_cast<MemoryUse *>(Self); | |||||
2186 | } |
1 | //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | /// \file |
11 | /// This file exposes an interface to building/using memory SSA to |
12 | /// walk memory instructions using a use/def graph. |
13 | /// |
14 | /// Memory SSA class builds an SSA form that links together memory access |
15 | /// instructions such as loads, stores, atomics, and calls. Additionally, it |
16 | /// does a trivial form of "heap versioning" Every time the memory state changes |
17 | /// in the program, we generate a new heap version. It generates |
18 | /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. |
19 | /// |
20 | /// As a trivial example, |
21 | /// define i32 @main() #0 { |
22 | /// entry: |
23 | /// %call = call noalias i8* @_Znwm(i64 4) #2 |
24 | /// %0 = bitcast i8* %call to i32* |
25 | /// %call1 = call noalias i8* @_Znwm(i64 4) #2 |
26 | /// %1 = bitcast i8* %call1 to i32* |
27 | /// store i32 5, i32* %0, align 4 |
28 | /// store i32 7, i32* %1, align 4 |
29 | /// %2 = load i32* %0, align 4 |
30 | /// %3 = load i32* %1, align 4 |
31 | /// %add = add nsw i32 %2, %3 |
32 | /// ret i32 %add |
33 | /// } |
34 | /// |
35 | /// Will become |
36 | /// define i32 @main() #0 { |
37 | /// entry: |
38 | /// ; 1 = MemoryDef(0) |
39 | /// %call = call noalias i8* @_Znwm(i64 4) #3 |
40 | /// %2 = bitcast i8* %call to i32* |
41 | /// ; 2 = MemoryDef(1) |
42 | /// %call1 = call noalias i8* @_Znwm(i64 4) #3 |
43 | /// %4 = bitcast i8* %call1 to i32* |
44 | /// ; 3 = MemoryDef(2) |
45 | /// store i32 5, i32* %2, align 4 |
46 | /// ; 4 = MemoryDef(3) |
47 | /// store i32 7, i32* %4, align 4 |
48 | /// ; MemoryUse(3) |
49 | /// %7 = load i32* %2, align 4 |
50 | /// ; MemoryUse(4) |
51 | /// %8 = load i32* %4, align 4 |
52 | /// %add = add nsw i32 %7, %8 |
53 | /// ret i32 %add |
54 | /// } |
55 | /// |
56 | /// Given this form, all the stores that could ever effect the load at %8 can be |
57 | /// gotten by using the MemoryUse associated with it, and walking from use to |
58 | /// def until you hit the top of the function. |
59 | /// |
60 | /// Each def also has a list of users associated with it, so you can walk from |
61 | /// both def to users, and users to defs. Note that we disambiguate MemoryUses, |
62 | /// but not the RHS of MemoryDefs. You can see this above at %7, which would |
63 | /// otherwise be a MemoryUse(4). Being disambiguated means that for a given |
64 | /// store, all the MemoryUses on its use lists are may-aliases of that store |
65 | /// (but the MemoryDefs on its use list may not be). |
66 | /// |
67 | /// MemoryDefs are not disambiguated because it would require multiple reaching |
68 | /// definitions, which would require multiple phis, and multiple memoryaccesses |
69 | /// per instruction. |
70 | // |
71 | //===----------------------------------------------------------------------===// |
72 | |
73 | #ifndef LLVM_ANALYSIS_MEMORYSSA_H |
74 | #define LLVM_ANALYSIS_MEMORYSSA_H |
75 | |
76 | #include "llvm/ADT/DenseMap.h" |
77 | #include "llvm/ADT/GraphTraits.h" |
78 | #include "llvm/ADT/SmallPtrSet.h" |
79 | #include "llvm/ADT/SmallVector.h" |
80 | #include "llvm/ADT/ilist.h" |
81 | #include "llvm/ADT/ilist_node.h" |
82 | #include "llvm/ADT/iterator.h" |
83 | #include "llvm/ADT/iterator_range.h" |
84 | #include "llvm/ADT/simple_ilist.h" |
85 | #include "llvm/Analysis/AliasAnalysis.h" |
86 | #include "llvm/Analysis/MemoryLocation.h" |
87 | #include "llvm/Analysis/PHITransAddr.h" |
88 | #include "llvm/IR/BasicBlock.h" |
89 | #include "llvm/IR/DerivedUser.h" |
90 | #include "llvm/IR/Dominators.h" |
91 | #include "llvm/IR/Module.h" |
92 | #include "llvm/IR/Type.h" |
93 | #include "llvm/IR/Use.h" |
94 | #include "llvm/IR/User.h" |
95 | #include "llvm/IR/Value.h" |
96 | #include "llvm/IR/ValueHandle.h" |
97 | #include "llvm/Pass.h" |
98 | #include "llvm/Support/Casting.h" |
99 | #include <algorithm> |
100 | #include <cassert> |
101 | #include <cstddef> |
102 | #include <iterator> |
103 | #include <memory> |
104 | #include <utility> |
105 | |
106 | namespace llvm { |
107 | |
108 | class Function; |
109 | class Instruction; |
110 | class MemoryAccess; |
111 | class MemorySSAWalker; |
112 | class LLVMContext; |
113 | class raw_ostream; |
114 | |
115 | namespace MSSAHelpers { |
116 | |
117 | struct AllAccessTag {}; |
118 | struct DefsOnlyTag {}; |
119 | |
120 | } // end namespace MSSAHelpers |
121 | |
122 | enum : unsigned { |
123 | // Used to signify what the default invalid ID is for MemoryAccess's |
124 | // getID() |
125 | INVALID_MEMORYACCESS_ID = -1U |
126 | }; |
127 | |
128 | template <class T> class memoryaccess_def_iterator_base; |
129 | using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; |
130 | using const_memoryaccess_def_iterator = |
131 | memoryaccess_def_iterator_base<const MemoryAccess>; |
132 | |
133 | // The base for all memory accesses. All memory accesses in a block are |
134 | // linked together using an intrusive list. |
135 | class MemoryAccess |
136 | : public DerivedUser, |
137 | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, |
138 | public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { |
139 | public: |
140 | using AllAccessType = |
141 | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
142 | using DefsOnlyType = |
143 | ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
144 | |
145 | MemoryAccess(const MemoryAccess &) = delete; |
146 | MemoryAccess &operator=(const MemoryAccess &) = delete; |
147 | |
148 | void *operator new(size_t) = delete; |
149 | |
150 | // Methods for support type inquiry through isa, cast, and |
151 | // dyn_cast |
152 | static bool classof(const Value *V) { |
153 | unsigned ID = V->getValueID(); |
154 | return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; |
155 | } |
156 | |
157 | BasicBlock *getBlock() const { return Block; } |
158 | |
159 | void print(raw_ostream &OS) const; |
160 | void dump() const; |
161 | |
162 | /// The user iterators for a memory access |
163 | using iterator = user_iterator; |
164 | using const_iterator = const_user_iterator; |
165 | |
166 | /// This iterator walks over all of the defs in a given |
167 | /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For |
168 | /// MemoryUse/MemoryDef, this walks the defining access. |
169 | memoryaccess_def_iterator defs_begin(); |
170 | const_memoryaccess_def_iterator defs_begin() const; |
171 | memoryaccess_def_iterator defs_end(); |
172 | const_memoryaccess_def_iterator defs_end() const; |
173 | |
174 | /// Get the iterators for the all access list and the defs only list |
175 | /// We default to the all access list. |
176 | AllAccessType::self_iterator getIterator() { |
177 | return this->AllAccessType::getIterator(); |
178 | } |
179 | AllAccessType::const_self_iterator getIterator() const { |
180 | return this->AllAccessType::getIterator(); |
181 | } |
182 | AllAccessType::reverse_self_iterator getReverseIterator() { |
183 | return this->AllAccessType::getReverseIterator(); |
184 | } |
185 | AllAccessType::const_reverse_self_iterator getReverseIterator() const { |
186 | return this->AllAccessType::getReverseIterator(); |
187 | } |
188 | DefsOnlyType::self_iterator getDefsIterator() { |
189 | return this->DefsOnlyType::getIterator(); |
190 | } |
191 | DefsOnlyType::const_self_iterator getDefsIterator() const { |
192 | return this->DefsOnlyType::getIterator(); |
193 | } |
194 | DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { |
195 | return this->DefsOnlyType::getReverseIterator(); |
196 | } |
197 | DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { |
198 | return this->DefsOnlyType::getReverseIterator(); |
199 | } |
200 | |
201 | protected: |
202 | friend class MemoryDef; |
203 | friend class MemoryPhi; |
204 | friend class MemorySSA; |
205 | friend class MemoryUse; |
206 | friend class MemoryUseOrDef; |
207 | |
208 | /// Used by MemorySSA to change the block of a MemoryAccess when it is |
209 | /// moved. |
210 | void setBlock(BasicBlock *BB) { Block = BB; } |
211 | |
212 | /// Used for debugging and tracking things about MemoryAccesses. |
213 | /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. |
214 | inline unsigned getID() const; |
215 | |
216 | MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, |
217 | BasicBlock *BB, unsigned NumOperands) |
218 | : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), |
219 | Block(BB) {} |
220 | |
221 | // Use deleteValue() to delete a generic MemoryAccess. |
222 | ~MemoryAccess() = default; |
223 | |
224 | private: |
225 | BasicBlock *Block; |
226 | }; |
227 | |
228 | template <> |
229 | struct ilist_alloc_traits<MemoryAccess> { |
230 | static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); } |
231 | }; |
232 | |
233 | inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { |
234 | MA.print(OS); |
235 | return OS; |
236 | } |
237 | |
238 | /// Class that has the common methods + fields of memory uses/defs. It's |
239 | /// a little awkward to have, but there are many cases where we want either a |
240 | /// use or def, and there are many cases where uses are needed (defs aren't |
241 | /// acceptable), and vice-versa. |
242 | /// |
243 | /// This class should never be instantiated directly; make a MemoryUse or |
244 | /// MemoryDef instead. |
245 | class MemoryUseOrDef : public MemoryAccess { |
246 | public: |
247 | void *operator new(size_t) = delete; |
248 | |
249 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
250 | |
251 | /// Get the instruction that this MemoryUse represents. |
252 | Instruction *getMemoryInst() const { return MemoryInstruction; } |
253 | |
254 | /// Get the access that produces the memory state used by this Use. |
255 | MemoryAccess *getDefiningAccess() const { return getOperand(0); } |
256 | |
257 | static bool classof(const Value *MA) { |
258 | return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; |
259 | } |
260 | |
261 | // Sadly, these have to be public because they are needed in some of the |
262 | // iterators. |
263 | inline bool isOptimized() const; |
264 | inline MemoryAccess *getOptimized() const; |
265 | inline void setOptimized(MemoryAccess *); |
266 | |
267 | // Retrieve AliasResult type of the optimized access. Ideally this would be |
268 | // returned by the caching walker and may go away in the future. |
269 | Optional<AliasResult> getOptimizedAccessType() const { |
270 | return OptimizedAccessAlias; |
271 | } |
272 | |
273 | /// Reset the ID of what this MemoryUse was optimized to, causing it to |
274 | /// be rewalked by the walker if necessary. |
275 | /// This really should only be called by tests. |
276 | inline void resetOptimized(); |
277 | |
278 | protected: |
279 | friend class MemorySSA; |
280 | friend class MemorySSAUpdater; |
281 | |
282 | MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, |
283 | DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB) |
284 | : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInstruction(MI), |
285 | OptimizedAccessAlias(MayAlias) { |
286 | setDefiningAccess(DMA); |
287 | } |
288 | |
289 | // Use deleteValue() to delete a generic MemoryUseOrDef. |
290 | ~MemoryUseOrDef() = default; |
291 | |
292 | void setOptimizedAccessType(Optional<AliasResult> AR) { |
293 | OptimizedAccessAlias = AR; |
294 | } |
295 | |
296 | void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false, |
297 | Optional<AliasResult> AR = MayAlias) { |
298 | if (!Optimized) { |
299 | setOperand(0, DMA); |
300 | return; |
301 | } |
302 | setOptimized(DMA); |
303 | setOptimizedAccessType(AR); |
304 | } |
305 | |
306 | private: |
307 | Instruction *MemoryInstruction; |
308 | Optional<AliasResult> OptimizedAccessAlias; |
309 | }; |
310 | |
311 | template <> |
312 | struct OperandTraits<MemoryUseOrDef> |
313 | : public FixedNumOperandTraits<MemoryUseOrDef, 1> {}; |
314 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)MemoryUseOrDef::op_iterator MemoryUseOrDef::op_begin() { return OperandTraits<MemoryUseOrDef>::op_begin(this); } MemoryUseOrDef ::const_op_iterator MemoryUseOrDef::op_begin() const { return OperandTraits<MemoryUseOrDef>::op_begin(const_cast< MemoryUseOrDef*>(this)); } MemoryUseOrDef::op_iterator MemoryUseOrDef ::op_end() { return OperandTraits<MemoryUseOrDef>::op_end (this); } MemoryUseOrDef::const_op_iterator MemoryUseOrDef::op_end () const { return OperandTraits<MemoryUseOrDef>::op_end (const_cast<MemoryUseOrDef*>(this)); } MemoryAccess *MemoryUseOrDef ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryUseOrDef>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 314, __extension__ __PRETTY_FUNCTION__)); return cast_or_null <MemoryAccess>( OperandTraits<MemoryUseOrDef>::op_begin (const_cast<MemoryUseOrDef*>(this))[i_nocapture].get()) ; } void MemoryUseOrDef::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUseOrDef>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 314, __extension__ __PRETTY_FUNCTION__)); OperandTraits< MemoryUseOrDef>::op_begin(this)[i_nocapture] = Val_nocapture ; } unsigned MemoryUseOrDef::getNumOperands() const { return OperandTraits <MemoryUseOrDef>::operands(this); } template <int Idx_nocapture > Use &MemoryUseOrDef::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &MemoryUseOrDef::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
315 | |
316 | /// Represents read-only accesses to memory |
317 | /// |
318 | /// In particular, the set of Instructions that will be represented by |
319 | /// MemoryUse's is exactly the set of Instructions for which |
320 | /// AliasAnalysis::getModRefInfo returns "Ref". |
321 | class MemoryUse final : public MemoryUseOrDef { |
322 | public: |
323 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
324 | |
325 | MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) |
326 | : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {} |
327 | |
328 | // allocate space for exactly one operand |
329 | void *operator new(size_t s) { return User::operator new(s, 1); } |
330 | |
331 | static bool classof(const Value *MA) { |
332 | return MA->getValueID() == MemoryUseVal; |
333 | } |
334 | |
335 | void print(raw_ostream &OS) const; |
336 | |
337 | void setOptimized(MemoryAccess *DMA) { |
338 | OptimizedID = DMA->getID(); |
339 | setOperand(0, DMA); |
340 | } |
341 | |
342 | bool isOptimized() const { |
343 | return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); |
344 | } |
345 | |
346 | MemoryAccess *getOptimized() const { |
347 | return getDefiningAccess(); |
348 | } |
349 | |
350 | void resetOptimized() { |
351 | OptimizedID = INVALID_MEMORYACCESS_ID; |
352 | } |
353 | |
354 | protected: |
355 | friend class MemorySSA; |
356 | |
357 | private: |
358 | static void deleteMe(DerivedUser *Self); |
359 | |
360 | unsigned OptimizedID = INVALID_MEMORYACCESS_ID; |
361 | }; |
362 | |
363 | template <> |
364 | struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; |
365 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)MemoryUse::op_iterator MemoryUse::op_begin() { return OperandTraits <MemoryUse>::op_begin(this); } MemoryUse::const_op_iterator MemoryUse::op_begin() const { return OperandTraits<MemoryUse >::op_begin(const_cast<MemoryUse*>(this)); } MemoryUse ::op_iterator MemoryUse::op_end() { return OperandTraits<MemoryUse >::op_end(this); } MemoryUse::const_op_iterator MemoryUse:: op_end() const { return OperandTraits<MemoryUse>::op_end (const_cast<MemoryUse*>(this)); } MemoryAccess *MemoryUse ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryUse>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 365, __extension__ __PRETTY_FUNCTION__)); return cast_or_null <MemoryAccess>( OperandTraits<MemoryUse>::op_begin (const_cast<MemoryUse*>(this))[i_nocapture].get()); } void MemoryUse::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture ) { (static_cast <bool> (i_nocapture < OperandTraits <MemoryUse>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryUse>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 365, __extension__ __PRETTY_FUNCTION__)); OperandTraits< MemoryUse>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryUse::getNumOperands() const { return OperandTraits <MemoryUse>::operands(this); } template <int Idx_nocapture > Use &MemoryUse::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & MemoryUse::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
366 | |
367 | /// Represents a read-write access to memory, whether it is a must-alias, |
368 | /// or a may-alias. |
369 | /// |
370 | /// In particular, the set of Instructions that will be represented by |
371 | /// MemoryDef's is exactly the set of Instructions for which |
372 | /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". |
373 | /// Note that, in order to provide def-def chains, all defs also have a use |
374 | /// associated with them. This use points to the nearest reaching |
375 | /// MemoryDef/MemoryPhi. |
376 | class MemoryDef final : public MemoryUseOrDef { |
377 | public: |
378 | friend class MemorySSA; |
379 | |
380 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
381 | |
382 | MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, |
383 | unsigned Ver) |
384 | : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {} |
385 | |
386 | // allocate space for exactly one operand |
387 | void *operator new(size_t s) { return User::operator new(s, 1); } |
388 | |
389 | static bool classof(const Value *MA) { |
390 | return MA->getValueID() == MemoryDefVal; |
391 | } |
392 | |
393 | void setOptimized(MemoryAccess *MA) { |
394 | Optimized = MA; |
395 | OptimizedID = getDefiningAccess()->getID(); |
396 | } |
397 | |
398 | MemoryAccess *getOptimized() const { |
399 | return cast_or_null<MemoryAccess>(Optimized); |
400 | } |
401 | |
402 | bool isOptimized() const { |
403 | return getOptimized() && getDefiningAccess() && |
404 | OptimizedID == getDefiningAccess()->getID(); |
405 | } |
406 | |
407 | void resetOptimized() { |
408 | OptimizedID = INVALID_MEMORYACCESS_ID; |
409 | } |
410 | |
411 | void print(raw_ostream &OS) const; |
412 | |
413 | unsigned getID() const { return ID; } |
414 | |
415 | private: |
416 | static void deleteMe(DerivedUser *Self); |
417 | |
418 | const unsigned ID; |
419 | unsigned OptimizedID = INVALID_MEMORYACCESS_ID; |
420 | WeakVH Optimized; |
421 | }; |
422 | |
423 | template <> |
424 | struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {}; |
425 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)MemoryDef::op_iterator MemoryDef::op_begin() { return OperandTraits <MemoryDef>::op_begin(this); } MemoryDef::const_op_iterator MemoryDef::op_begin() const { return OperandTraits<MemoryDef >::op_begin(const_cast<MemoryDef*>(this)); } MemoryDef ::op_iterator MemoryDef::op_end() { return OperandTraits<MemoryDef >::op_end(this); } MemoryDef::const_op_iterator MemoryDef:: op_end() const { return OperandTraits<MemoryDef>::op_end (const_cast<MemoryDef*>(this)); } MemoryAccess *MemoryDef ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryDef>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 425, __extension__ __PRETTY_FUNCTION__)); return cast_or_null <MemoryAccess>( OperandTraits<MemoryDef>::op_begin (const_cast<MemoryDef*>(this))[i_nocapture].get()); } void MemoryDef::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture ) { (static_cast <bool> (i_nocapture < OperandTraits <MemoryDef>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryDef>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 425, __extension__ __PRETTY_FUNCTION__)); OperandTraits< MemoryDef>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryDef::getNumOperands() const { return OperandTraits <MemoryDef>::operands(this); } template <int Idx_nocapture > Use &MemoryDef::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & MemoryDef::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
426 | |
427 | /// Represents phi nodes for memory accesses. |
428 | /// |
429 | /// These have the same semantic as regular phi nodes, with the exception that |
430 | /// only one phi will ever exist in a given basic block. |
431 | /// Guaranteeing one phi per block means guaranteeing there is only ever one |
432 | /// valid reaching MemoryDef/MemoryPHI along each path to the phi node. |
433 | /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or |
434 | /// a MemoryPhi's operands. |
435 | /// That is, given |
436 | /// if (a) { |
437 | /// store %a |
438 | /// store %b |
439 | /// } |
440 | /// it *must* be transformed into |
441 | /// if (a) { |
442 | /// 1 = MemoryDef(liveOnEntry) |
443 | /// store %a |
444 | /// 2 = MemoryDef(1) |
445 | /// store %b |
446 | /// } |
447 | /// and *not* |
448 | /// if (a) { |
449 | /// 1 = MemoryDef(liveOnEntry) |
450 | /// store %a |
451 | /// 2 = MemoryDef(liveOnEntry) |
452 | /// store %b |
453 | /// } |
454 | /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the |
455 | /// end of the branch, and if there are not two phi nodes, one will be |
456 | /// disconnected completely from the SSA graph below that point. |
457 | /// Because MemoryUse's do not generate new definitions, they do not have this |
458 | /// issue. |
459 | class MemoryPhi final : public MemoryAccess { |
460 | // allocate space for exactly zero operands |
461 | void *operator new(size_t s) { return User::operator new(s); } |
462 | |
463 | public: |
464 | /// Provide fast operand accessors |
465 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)public: inline MemoryAccess *getOperand(unsigned) const; inline void setOperand(unsigned, MemoryAccess*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const ; protected: template <int> inline Use &Op(); template <int> inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
466 | |
467 | MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) |
468 | : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), |
469 | ReservedSpace(NumPreds) { |
470 | allocHungoffUses(ReservedSpace); |
471 | } |
472 | |
473 | // Block iterator interface. This provides access to the list of incoming |
474 | // basic blocks, which parallels the list of incoming values. |
475 | using block_iterator = BasicBlock **; |
476 | using const_block_iterator = BasicBlock *const *; |
477 | |
478 | block_iterator block_begin() { |
479 | auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace); |
480 | return reinterpret_cast<block_iterator>(Ref + 1); |
481 | } |
482 | |
483 | const_block_iterator block_begin() const { |
484 | const auto *Ref = |
485 | reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace); |
486 | return reinterpret_cast<const_block_iterator>(Ref + 1); |
487 | } |
488 | |
489 | block_iterator block_end() { return block_begin() + getNumOperands(); } |
490 | |
491 | const_block_iterator block_end() const { |
492 | return block_begin() + getNumOperands(); |
493 | } |
494 | |
495 | iterator_range<block_iterator> blocks() { |
496 | return make_range(block_begin(), block_end()); |
497 | } |
498 | |
499 | iterator_range<const_block_iterator> blocks() const { |
500 | return make_range(block_begin(), block_end()); |
501 | } |
502 | |
503 | op_range incoming_values() { return operands(); } |
504 | |
505 | const_op_range incoming_values() const { return operands(); } |
506 | |
507 | /// Return the number of incoming edges |
508 | unsigned getNumIncomingValues() const { return getNumOperands(); } |
509 | |
510 | /// Return incoming value number x |
511 | MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } |
512 | void setIncomingValue(unsigned I, MemoryAccess *V) { |
513 | assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!" ) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 513, __extension__ __PRETTY_FUNCTION__)); |
514 | setOperand(I, V); |
515 | } |
516 | |
517 | static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } |
518 | static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } |
519 | |
520 | /// Return incoming basic block number @p i. |
521 | BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } |
522 | |
523 | /// Return incoming basic block corresponding |
524 | /// to an operand of the PHI. |
525 | BasicBlock *getIncomingBlock(const Use &U) const { |
526 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 526, __extension__ __PRETTY_FUNCTION__)); |
527 | return getIncomingBlock(unsigned(&U - op_begin())); |
528 | } |
529 | |
530 | /// Return incoming basic block corresponding |
531 | /// to value use iterator. |
532 | BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { |
533 | return getIncomingBlock(I.getUse()); |
534 | } |
535 | |
536 | void setIncomingBlock(unsigned I, BasicBlock *BB) { |
537 | assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 537, __extension__ __PRETTY_FUNCTION__)); |
538 | block_begin()[I] = BB; |
539 | } |
540 | |
541 | /// Add an incoming value to the end of the PHI list |
542 | void addIncoming(MemoryAccess *V, BasicBlock *BB) { |
543 | if (getNumOperands() == ReservedSpace) |
544 | growOperands(); // Get more space! |
545 | // Initialize some new operands. |
546 | setNumHungOffUseOperands(getNumOperands() + 1); |
547 | setIncomingValue(getNumOperands() - 1, V); |
548 | setIncomingBlock(getNumOperands() - 1, BB); |
549 | } |
550 | |
551 | /// Return the first index of the specified basic |
552 | /// block in the value list for this PHI. Returns -1 if no instance. |
553 | int getBasicBlockIndex(const BasicBlock *BB) const { |
554 | for (unsigned I = 0, E = getNumOperands(); I != E; ++I) |
555 | if (block_begin()[I] == BB) |
556 | return I; |
557 | return -1; |
558 | } |
559 | |
560 | MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const { |
561 | int Idx = getBasicBlockIndex(BB); |
562 | assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 562, __extension__ __PRETTY_FUNCTION__)); |
563 | return getIncomingValue(Idx); |
564 | } |
565 | |
566 | // After deleting incoming position I, the order of incoming may be changed. |
567 | void unorderedDeleteIncoming(unsigned I) { |
568 | unsigned E = getNumOperands(); |
569 | assert(I < E && "Cannot remove out of bounds Phi entry.")(static_cast <bool> (I < E && "Cannot remove out of bounds Phi entry." ) ? void (0) : __assert_fail ("I < E && \"Cannot remove out of bounds Phi entry.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 569, __extension__ __PRETTY_FUNCTION__)); |
570 | // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi |
571 | // itself should be deleted. |
572 | assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with " "at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 573, __extension__ __PRETTY_FUNCTION__)) |
573 | "at least 2 values.")(static_cast <bool> (E >= 2 && "Cannot only remove incoming values in MemoryPhis with " "at least 2 values.") ? void (0) : __assert_fail ("E >= 2 && \"Cannot only remove incoming values in MemoryPhis with \" \"at least 2 values.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 573, __extension__ __PRETTY_FUNCTION__)); |
574 | setIncomingValue(I, getIncomingValue(E - 1)); |
575 | setIncomingBlock(I, block_begin()[E - 1]); |
576 | setOperand(E - 1, nullptr); |
577 | block_begin()[E - 1] = nullptr; |
578 | setNumHungOffUseOperands(getNumOperands() - 1); |
579 | } |
580 | |
581 | // After deleting entries that satisfy Pred, remaining entries may have |
582 | // changed order. |
583 | template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) { |
584 | for (unsigned I = 0, E = getNumOperands(); I != E; ++I) |
585 | if (Pred(getIncomingValue(I), getIncomingBlock(I))) { |
586 | unorderedDeleteIncoming(I); |
587 | E = getNumOperands(); |
588 | --I; |
589 | } |
590 | assert(getNumOperands() >= 1 &&(static_cast <bool> (getNumOperands() >= 1 && "Cannot remove all incoming blocks in a MemoryPhi.") ? void ( 0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 591, __extension__ __PRETTY_FUNCTION__)) |
591 | "Cannot remove all incoming blocks in a MemoryPhi.")(static_cast <bool> (getNumOperands() >= 1 && "Cannot remove all incoming blocks in a MemoryPhi.") ? void ( 0) : __assert_fail ("getNumOperands() >= 1 && \"Cannot remove all incoming blocks in a MemoryPhi.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 591, __extension__ __PRETTY_FUNCTION__)); |
592 | } |
593 | |
594 | // After deleting incoming block BB, the incoming blocks order may be changed. |
595 | void unorderedDeleteIncomingBlock(const BasicBlock *BB) { |
596 | unorderedDeleteIncomingIf( |
597 | [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; }); |
598 | } |
599 | |
600 | // After deleting incoming memory access MA, the incoming accesses order may |
601 | // be changed. |
602 | void unorderedDeleteIncomingValue(const MemoryAccess *MA) { |
603 | unorderedDeleteIncomingIf( |
604 | [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; }); |
605 | } |
606 | |
607 | static bool classof(const Value *V) { |
608 | return V->getValueID() == MemoryPhiVal; |
609 | } |
610 | |
611 | void print(raw_ostream &OS) const; |
612 | |
613 | unsigned getID() const { return ID; } |
614 | |
615 | protected: |
616 | friend class MemorySSA; |
617 | |
618 | /// this is more complicated than the generic |
619 | /// User::allocHungoffUses, because we have to allocate Uses for the incoming |
620 | /// values and pointers to the incoming blocks, all in one allocation. |
621 | void allocHungoffUses(unsigned N) { |
622 | User::allocHungoffUses(N, /* IsPhi */ true); |
623 | } |
624 | |
625 | private: |
626 | // For debugging only |
627 | const unsigned ID; |
628 | unsigned ReservedSpace; |
629 | |
630 | /// This grows the operand list in response to a push_back style of |
631 | /// operation. This grows the number of ops by 1.5 times. |
632 | void growOperands() { |
633 | unsigned E = getNumOperands(); |
634 | // 2 op PHI nodes are VERY common, so reserve at least enough for that. |
635 | ReservedSpace = std::max(E + E / 2, 2u); |
636 | growHungoffUses(ReservedSpace, /* IsPhi */ true); |
637 | } |
638 | |
639 | static void deleteMe(DerivedUser *Self); |
640 | }; |
641 | |
642 | inline unsigned MemoryAccess::getID() const { |
643 | assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&(static_cast <bool> ((isa<MemoryDef>(this) || isa <MemoryPhi>(this)) && "only memory defs and phis have ids" ) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 644, __extension__ __PRETTY_FUNCTION__)) |
644 | "only memory defs and phis have ids")(static_cast <bool> ((isa<MemoryDef>(this) || isa <MemoryPhi>(this)) && "only memory defs and phis have ids" ) ? void (0) : __assert_fail ("(isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && \"only memory defs and phis have ids\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 644, __extension__ __PRETTY_FUNCTION__)); |
645 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
646 | return MD->getID(); |
647 | return cast<MemoryPhi>(this)->getID(); |
648 | } |
649 | |
650 | inline bool MemoryUseOrDef::isOptimized() const { |
651 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
652 | return MD->isOptimized(); |
653 | return cast<MemoryUse>(this)->isOptimized(); |
654 | } |
655 | |
656 | inline MemoryAccess *MemoryUseOrDef::getOptimized() const { |
657 | if (const auto *MD = dyn_cast<MemoryDef>(this)) |
658 | return MD->getOptimized(); |
659 | return cast<MemoryUse>(this)->getOptimized(); |
660 | } |
661 | |
662 | inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { |
663 | if (auto *MD = dyn_cast<MemoryDef>(this)) |
664 | MD->setOptimized(MA); |
665 | else |
666 | cast<MemoryUse>(this)->setOptimized(MA); |
667 | } |
668 | |
669 | inline void MemoryUseOrDef::resetOptimized() { |
670 | if (auto *MD = dyn_cast<MemoryDef>(this)) |
671 | MD->resetOptimized(); |
672 | else |
673 | cast<MemoryUse>(this)->resetOptimized(); |
674 | } |
675 | |
676 | template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; |
677 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)MemoryPhi::op_iterator MemoryPhi::op_begin() { return OperandTraits <MemoryPhi>::op_begin(this); } MemoryPhi::const_op_iterator MemoryPhi::op_begin() const { return OperandTraits<MemoryPhi >::op_begin(const_cast<MemoryPhi*>(this)); } MemoryPhi ::op_iterator MemoryPhi::op_end() { return OperandTraits<MemoryPhi >::op_end(this); } MemoryPhi::const_op_iterator MemoryPhi:: op_end() const { return OperandTraits<MemoryPhi>::op_end (const_cast<MemoryPhi*>(this)); } MemoryAccess *MemoryPhi ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<MemoryPhi>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 677, __extension__ __PRETTY_FUNCTION__)); return cast_or_null <MemoryAccess>( OperandTraits<MemoryPhi>::op_begin (const_cast<MemoryPhi*>(this))[i_nocapture].get()); } void MemoryPhi::setOperand(unsigned i_nocapture, MemoryAccess *Val_nocapture ) { (static_cast <bool> (i_nocapture < OperandTraits <MemoryPhi>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<MemoryPhi>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 677, __extension__ __PRETTY_FUNCTION__)); OperandTraits< MemoryPhi>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned MemoryPhi::getNumOperands() const { return OperandTraits <MemoryPhi>::operands(this); } template <int Idx_nocapture > Use &MemoryPhi::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & MemoryPhi::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
678 | |
679 | /// Encapsulates MemorySSA, including all data associated with memory |
680 | /// accesses. |
681 | class MemorySSA { |
682 | public: |
683 | MemorySSA(Function &, AliasAnalysis *, DominatorTree *); |
684 | ~MemorySSA(); |
685 | |
686 | MemorySSAWalker *getWalker(); |
687 | |
688 | /// Given a memory Mod/Ref'ing instruction, get the MemorySSA |
689 | /// access associated with it. If passed a basic block gets the memory phi |
690 | /// node that exists for that block, if there is one. Otherwise, this will get |
691 | /// a MemoryUseOrDef. |
692 | MemoryUseOrDef *getMemoryAccess(const Instruction *) const; |
693 | MemoryPhi *getMemoryAccess(const BasicBlock *BB) const; |
694 | |
695 | void dump() const; |
696 | void print(raw_ostream &) const; |
697 | |
698 | /// Return true if \p MA represents the live on entry value |
699 | /// |
700 | /// Loads and stores from pointer arguments and other global values may be |
701 | /// defined by memory operations that do not occur in the current function, so |
702 | /// they may be live on entry to the function. MemorySSA represents such |
703 | /// memory state by the live on entry definition, which is guaranteed to occur |
704 | /// before any other memory access in the function. |
705 | inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { |
706 | return MA == LiveOnEntryDef.get(); |
707 | } |
708 | |
709 | inline MemoryAccess *getLiveOnEntryDef() const { |
710 | return LiveOnEntryDef.get(); |
711 | } |
712 | |
713 | // Sadly, iplists, by default, owns and deletes pointers added to the |
714 | // list. It's not currently possible to have two iplists for the same type, |
715 | // where one owns the pointers, and one does not. This is because the traits |
716 | // are per-type, not per-tag. If this ever changes, we should make the |
717 | // DefList an iplist. |
718 | using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; |
719 | using DefsList = |
720 | simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; |
721 | |
722 | /// Return the list of MemoryAccess's for a given basic block. |
723 | /// |
724 | /// This list is not modifiable by the user. |
725 | const AccessList *getBlockAccesses(const BasicBlock *BB) const { |
726 | return getWritableBlockAccesses(BB); |
727 | } |
728 | |
729 | /// Return the list of MemoryDef's and MemoryPhi's for a given basic |
730 | /// block. |
731 | /// |
732 | /// This list is not modifiable by the user. |
733 | const DefsList *getBlockDefs(const BasicBlock *BB) const { |
734 | return getWritableBlockDefs(BB); |
735 | } |
736 | |
737 | /// Given two memory accesses in the same basic block, determine |
738 | /// whether MemoryAccess \p A dominates MemoryAccess \p B. |
739 | bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; |
740 | |
741 | /// Given two memory accesses in potentially different blocks, |
742 | /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. |
743 | bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; |
744 | |
745 | /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A |
746 | /// dominates Use \p B. |
747 | bool dominates(const MemoryAccess *A, const Use &B) const; |
748 | |
749 | /// Verify that MemorySSA is self consistent (IE definitions dominate |
750 | /// all uses, uses appear in the right places). This is used by unit tests. |
751 | void verifyMemorySSA() const; |
752 | |
753 | /// Used in various insertion functions to specify whether we are talking |
754 | /// about the beginning or end of a block. |
755 | enum InsertionPlace { Beginning, End }; |
756 | |
757 | protected: |
758 | // Used by Memory SSA annotater, dumpers, and wrapper pass |
759 | friend class MemorySSAAnnotatedWriter; |
760 | friend class MemorySSAPrinterLegacyPass; |
761 | friend class MemorySSAUpdater; |
762 | |
763 | void verifyDefUses(Function &F) const; |
764 | void verifyDomination(Function &F) const; |
765 | void verifyOrdering(Function &F) const; |
766 | void verifyDominationNumbers(const Function &F) const; |
767 | |
768 | // This is used by the use optimizer and updater. |
769 | AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { |
770 | auto It = PerBlockAccesses.find(BB); |
771 | return It == PerBlockAccesses.end() ? nullptr : It->second.get(); |
772 | } |
773 | |
774 | // This is used by the use optimizer and updater. |
775 | DefsList *getWritableBlockDefs(const BasicBlock *BB) const { |
776 | auto It = PerBlockDefs.find(BB); |
777 | return It == PerBlockDefs.end() ? nullptr : It->second.get(); |
778 | } |
779 | |
780 | // These is used by the updater to perform various internal MemorySSA |
781 | // machinsations. They do not always leave the IR in a correct state, and |
782 | // relies on the updater to fixup what it breaks, so it is not public. |
783 | |
784 | void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); |
785 | void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point); |
786 | |
787 | // Rename the dominator tree branch rooted at BB. |
788 | void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, |
789 | SmallPtrSetImpl<BasicBlock *> &Visited) { |
790 | renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); |
791 | } |
792 | |
793 | void removeFromLookups(MemoryAccess *); |
794 | void removeFromLists(MemoryAccess *, bool ShouldDelete = true); |
795 | void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, |
796 | InsertionPlace); |
797 | void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, |
798 | AccessList::iterator); |
799 | MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *); |
800 | |
801 | private: |
802 | class CachingWalker; |
803 | class OptimizeUses; |
804 | |
805 | CachingWalker *getWalkerImpl(); |
806 | void buildMemorySSA(); |
807 | void optimizeUses(); |
808 | |
809 | void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; |
810 | |
811 | using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; |
812 | using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; |
813 | |
814 | void |
815 | determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks); |
816 | void markUnreachableAsLiveOnEntry(BasicBlock *BB); |
817 | bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const; |
818 | MemoryPhi *createMemoryPhi(BasicBlock *BB); |
819 | MemoryUseOrDef *createNewAccess(Instruction *); |
820 | MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace); |
821 | void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &); |
822 | MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); |
823 | void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); |
824 | void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, |
825 | SmallPtrSetImpl<BasicBlock *> &Visited, |
826 | bool SkipVisited = false, bool RenameAllUses = false); |
827 | AccessList *getOrCreateAccessList(const BasicBlock *); |
828 | DefsList *getOrCreateDefsList(const BasicBlock *); |
829 | void renumberBlock(const BasicBlock *) const; |
830 | AliasAnalysis *AA; |
831 | DominatorTree *DT; |
832 | Function &F; |
833 | |
834 | // Memory SSA mappings |
835 | DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; |
836 | |
837 | // These two mappings contain the main block to access/def mappings for |
838 | // MemorySSA. The list contained in PerBlockAccesses really owns all the |
839 | // MemoryAccesses. |
840 | // Both maps maintain the invariant that if a block is found in them, the |
841 | // corresponding list is not empty, and if a block is not found in them, the |
842 | // corresponding list is empty. |
843 | AccessMap PerBlockAccesses; |
844 | DefsMap PerBlockDefs; |
845 | std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef; |
846 | |
847 | // Domination mappings |
848 | // Note that the numbering is local to a block, even though the map is |
849 | // global. |
850 | mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; |
851 | mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; |
852 | |
853 | // Memory SSA building info |
854 | std::unique_ptr<CachingWalker> Walker; |
855 | unsigned NextID; |
856 | }; |
857 | |
858 | // Internal MemorySSA utils, for use by MemorySSA classes and walkers |
859 | class MemorySSAUtil { |
860 | protected: |
861 | friend class GVNHoist; |
862 | friend class MemorySSAWalker; |
863 | |
864 | // This function should not be used by new passes. |
865 | static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, |
866 | AliasAnalysis &AA); |
867 | }; |
868 | |
869 | // This pass does eager building and then printing of MemorySSA. It is used by |
870 | // the tests to be able to build, dump, and verify Memory SSA. |
871 | class MemorySSAPrinterLegacyPass : public FunctionPass { |
872 | public: |
873 | MemorySSAPrinterLegacyPass(); |
874 | |
875 | bool runOnFunction(Function &) override; |
876 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
877 | |
878 | static char ID; |
879 | }; |
880 | |
881 | /// An analysis that produces \c MemorySSA for a function. |
882 | /// |
883 | class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { |
884 | friend AnalysisInfoMixin<MemorySSAAnalysis>; |
885 | |
886 | static AnalysisKey Key; |
887 | |
888 | public: |
889 | // Wrap MemorySSA result to ensure address stability of internal MemorySSA |
890 | // pointers after construction. Use a wrapper class instead of plain |
891 | // unique_ptr<MemorySSA> to avoid build breakage on MSVC. |
892 | struct Result { |
893 | Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} |
894 | |
895 | MemorySSA &getMSSA() { return *MSSA.get(); } |
896 | |
897 | std::unique_ptr<MemorySSA> MSSA; |
898 | }; |
899 | |
900 | Result run(Function &F, FunctionAnalysisManager &AM); |
901 | }; |
902 | |
903 | /// Printer pass for \c MemorySSA. |
904 | class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { |
905 | raw_ostream &OS; |
906 | |
907 | public: |
908 | explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {} |
909 | |
910 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
911 | }; |
912 | |
913 | /// Verifier pass for \c MemorySSA. |
914 | struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { |
915 | PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); |
916 | }; |
917 | |
918 | /// Legacy analysis pass which computes \c MemorySSA. |
919 | class MemorySSAWrapperPass : public FunctionPass { |
920 | public: |
921 | MemorySSAWrapperPass(); |
922 | |
923 | static char ID; |
924 | |
925 | bool runOnFunction(Function &) override; |
926 | void releaseMemory() override; |
927 | MemorySSA &getMSSA() { return *MSSA; } |
928 | const MemorySSA &getMSSA() const { return *MSSA; } |
929 | |
930 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
931 | |
932 | void verifyAnalysis() const override; |
933 | void print(raw_ostream &OS, const Module *M = nullptr) const override; |
934 | |
935 | private: |
936 | std::unique_ptr<MemorySSA> MSSA; |
937 | }; |
938 | |
939 | /// This is the generic walker interface for walkers of MemorySSA. |
940 | /// Walkers are used to be able to further disambiguate the def-use chains |
941 | /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives |
942 | /// you. |
943 | /// In particular, while the def-use chains provide basic information, and are |
944 | /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a |
945 | /// MemoryUse as AliasAnalysis considers it, a user mant want better or other |
946 | /// information. In particular, they may want to use SCEV info to further |
947 | /// disambiguate memory accesses, or they may want the nearest dominating |
948 | /// may-aliasing MemoryDef for a call or a store. This API enables a |
949 | /// standardized interface to getting and using that info. |
950 | class MemorySSAWalker { |
951 | public: |
952 | MemorySSAWalker(MemorySSA *); |
953 | virtual ~MemorySSAWalker() = default; |
954 | |
955 | using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; |
956 | |
957 | /// Given a memory Mod/Ref/ModRef'ing instruction, calling this |
958 | /// will give you the nearest dominating MemoryAccess that Mod's the location |
959 | /// the instruction accesses (by skipping any def which AA can prove does not |
960 | /// alias the location(s) accessed by the instruction given). |
961 | /// |
962 | /// Note that this will return a single access, and it must dominate the |
963 | /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, |
964 | /// this will return the MemoryPhi, not the operand. This means that |
965 | /// given: |
966 | /// if (a) { |
967 | /// 1 = MemoryDef(liveOnEntry) |
968 | /// store %a |
969 | /// } else { |
970 | /// 2 = MemoryDef(liveOnEntry) |
971 | /// store %b |
972 | /// } |
973 | /// 3 = MemoryPhi(2, 1) |
974 | /// MemoryUse(3) |
975 | /// load %a |
976 | /// |
977 | /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef |
978 | /// in the if (a) branch. |
979 | MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { |
980 | MemoryAccess *MA = MSSA->getMemoryAccess(I); |
981 | assert(MA && "Handed an instruction that MemorySSA doesn't recognize?")(static_cast <bool> (MA && "Handed an instruction that MemorySSA doesn't recognize?" ) ? void (0) : __assert_fail ("MA && \"Handed an instruction that MemorySSA doesn't recognize?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 981, __extension__ __PRETTY_FUNCTION__)); |
982 | return getClobberingMemoryAccess(MA); |
983 | } |
984 | |
985 | /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), |
986 | /// but takes a MemoryAccess instead of an Instruction. |
987 | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0; |
988 | |
989 | /// Given a potentially clobbering memory access and a new location, |
990 | /// calling this will give you the nearest dominating clobbering MemoryAccess |
991 | /// (by skipping non-aliasing def links). |
992 | /// |
993 | /// This version of the function is mainly used to disambiguate phi translated |
994 | /// pointers, where the value of a pointer may have changed from the initial |
995 | /// memory access. Note that this expects to be handed either a MemoryUse, |
996 | /// or an already potentially clobbering access. Unlike the above API, if |
997 | /// given a MemoryDef that clobbers the pointer as the starting access, it |
998 | /// will return that MemoryDef, whereas the above would return the clobber |
999 | /// starting from the use side of the memory def. |
1000 | virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1001 | const MemoryLocation &) = 0; |
1002 | |
1003 | /// Given a memory access, invalidate anything this walker knows about |
1004 | /// that access. |
1005 | /// This API is used by walkers that store information to perform basic cache |
1006 | /// invalidation. This will be called by MemorySSA at appropriate times for |
1007 | /// the walker it uses or returns. |
1008 | virtual void invalidateInfo(MemoryAccess *) {} |
1009 | |
1010 | virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA)(static_cast <bool> (MSSA == this->MSSA) ? void (0) : __assert_fail ("MSSA == this->MSSA", "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 1010, __extension__ __PRETTY_FUNCTION__)); } |
1011 | |
1012 | protected: |
1013 | friend class MemorySSA; // For updating MSSA pointer in MemorySSA move |
1014 | // constructor. |
1015 | MemorySSA *MSSA; |
1016 | }; |
1017 | |
1018 | /// A MemorySSAWalker that does no alias queries, or anything else. It |
1019 | /// simply returns the links as they were constructed by the builder. |
1020 | class DoNothingMemorySSAWalker final : public MemorySSAWalker { |
1021 | public: |
1022 | // Keep the overrides below from hiding the Instruction overload of |
1023 | // getClobberingMemoryAccess. |
1024 | using MemorySSAWalker::getClobberingMemoryAccess; |
1025 | |
1026 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; |
1027 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
1028 | const MemoryLocation &) override; |
1029 | }; |
1030 | |
1031 | using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; |
1032 | using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; |
1033 | |
1034 | /// Iterator base class used to implement const and non-const iterators |
1035 | /// over the defining accesses of a MemoryAccess. |
1036 | template <class T> |
1037 | class memoryaccess_def_iterator_base |
1038 | : public iterator_facade_base<memoryaccess_def_iterator_base<T>, |
1039 | std::forward_iterator_tag, T, ptrdiff_t, T *, |
1040 | T *> { |
1041 | using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; |
1042 | |
1043 | public: |
1044 | memoryaccess_def_iterator_base(T *Start) : Access(Start) {} |
1045 | memoryaccess_def_iterator_base() = default; |
1046 | |
1047 | bool operator==(const memoryaccess_def_iterator_base &Other) const { |
1048 | return Access == Other.Access && (!Access || ArgNo == Other.ArgNo); |
1049 | } |
1050 | |
1051 | // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the |
1052 | // block from the operand in constant time (In a PHINode, the uselist has |
1053 | // both, so it's just subtraction). We provide it as part of the |
1054 | // iterator to avoid callers having to linear walk to get the block. |
1055 | // If the operation becomes constant time on MemoryPHI's, this bit of |
1056 | // abstraction breaking should be removed. |
1057 | BasicBlock *getPhiArgBlock() const { |
1058 | MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); |
1059 | assert(MP && "Tried to get phi arg block when not iterating over a PHI")(static_cast <bool> (MP && "Tried to get phi arg block when not iterating over a PHI" ) ? void (0) : __assert_fail ("MP && \"Tried to get phi arg block when not iterating over a PHI\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 1059, __extension__ __PRETTY_FUNCTION__)); |
1060 | return MP->getIncomingBlock(ArgNo); |
1061 | } |
1062 | |
1063 | typename BaseT::iterator::pointer operator*() const { |
1064 | assert(Access && "Tried to access past the end of our iterator")(static_cast <bool> (Access && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("Access && \"Tried to access past the end of our iterator\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 1064, __extension__ __PRETTY_FUNCTION__)); |
1065 | // Go to the first argument for phis, and the defining access for everything |
1066 | // else. |
1067 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) |
1068 | return MP->getIncomingValue(ArgNo); |
1069 | return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); |
1070 | } |
1071 | |
1072 | using BaseT::operator++; |
1073 | memoryaccess_def_iterator &operator++() { |
1074 | assert(Access && "Hit end of iterator")(static_cast <bool> (Access && "Hit end of iterator" ) ? void (0) : __assert_fail ("Access && \"Hit end of iterator\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 1074, __extension__ __PRETTY_FUNCTION__)); |
1075 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) { |
1076 | if (++ArgNo >= MP->getNumIncomingValues()) { |
1077 | ArgNo = 0; |
1078 | Access = nullptr; |
1079 | } |
1080 | } else { |
1081 | Access = nullptr; |
1082 | } |
1083 | return *this; |
1084 | } |
1085 | |
1086 | private: |
1087 | T *Access = nullptr; |
1088 | unsigned ArgNo = 0; |
1089 | }; |
1090 | |
1091 | inline memoryaccess_def_iterator MemoryAccess::defs_begin() { |
1092 | return memoryaccess_def_iterator(this); |
1093 | } |
1094 | |
1095 | inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { |
1096 | return const_memoryaccess_def_iterator(this); |
1097 | } |
1098 | |
1099 | inline memoryaccess_def_iterator MemoryAccess::defs_end() { |
1100 | return memoryaccess_def_iterator(); |
1101 | } |
1102 | |
1103 | inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { |
1104 | return const_memoryaccess_def_iterator(); |
1105 | } |
1106 | |
1107 | /// GraphTraits for a MemoryAccess, which walks defs in the normal case, |
1108 | /// and uses in the inverse case. |
1109 | template <> struct GraphTraits<MemoryAccess *> { |
1110 | using NodeRef = MemoryAccess *; |
1111 | using ChildIteratorType = memoryaccess_def_iterator; |
1112 | |
1113 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1114 | static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } |
1115 | static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } |
1116 | }; |
1117 | |
1118 | template <> struct GraphTraits<Inverse<MemoryAccess *>> { |
1119 | using NodeRef = MemoryAccess *; |
1120 | using ChildIteratorType = MemoryAccess::iterator; |
1121 | |
1122 | static NodeRef getEntryNode(NodeRef N) { return N; } |
1123 | static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } |
1124 | static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } |
1125 | }; |
1126 | |
1127 | /// Provide an iterator that walks defs, giving both the memory access, |
1128 | /// and the current pointer location, updating the pointer location as it |
1129 | /// changes due to phi node translation. |
1130 | /// |
1131 | /// This iterator, while somewhat specialized, is what most clients actually |
1132 | /// want when walking upwards through MemorySSA def chains. It takes a pair of |
1133 | /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the |
1134 | /// memory location through phi nodes for the user. |
1135 | class upward_defs_iterator |
1136 | : public iterator_facade_base<upward_defs_iterator, |
1137 | std::forward_iterator_tag, |
1138 | const MemoryAccessPair> { |
1139 | using BaseT = upward_defs_iterator::iterator_facade_base; |
1140 | |
1141 | public: |
1142 | upward_defs_iterator(const MemoryAccessPair &Info) |
1143 | : DefIterator(Info.first), Location(Info.second), |
1144 | OriginalAccess(Info.first) { |
1145 | CurrentPair.first = nullptr; |
1146 | |
1147 | WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); |
1148 | fillInCurrentPair(); |
1149 | } |
1150 | |
1151 | upward_defs_iterator() { CurrentPair.first = nullptr; } |
1152 | |
1153 | bool operator==(const upward_defs_iterator &Other) const { |
1154 | return DefIterator == Other.DefIterator; |
1155 | } |
1156 | |
1157 | BaseT::iterator::reference operator*() const { |
1158 | assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 1159, __extension__ __PRETTY_FUNCTION__)) |
1159 | "Tried to access past the end of our iterator")(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of our iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of our iterator\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 1159, __extension__ __PRETTY_FUNCTION__)); |
1160 | return CurrentPair; |
1161 | } |
1162 | |
1163 | using BaseT::operator++; |
1164 | upward_defs_iterator &operator++() { |
1165 | assert(DefIterator != OriginalAccess->defs_end() &&(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of the iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 1166, __extension__ __PRETTY_FUNCTION__)) |
1166 | "Tried to access past the end of the iterator")(static_cast <bool> (DefIterator != OriginalAccess-> defs_end() && "Tried to access past the end of the iterator" ) ? void (0) : __assert_fail ("DefIterator != OriginalAccess->defs_end() && \"Tried to access past the end of the iterator\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 1166, __extension__ __PRETTY_FUNCTION__)); |
1167 | ++DefIterator; |
1168 | if (DefIterator != OriginalAccess->defs_end()) |
1169 | fillInCurrentPair(); |
1170 | return *this; |
1171 | } |
1172 | |
1173 | BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } |
1174 | |
1175 | private: |
1176 | void fillInCurrentPair() { |
1177 | CurrentPair.first = *DefIterator; |
1178 | if (WalkingPhi && Location.Ptr) { |
1179 | PHITransAddr Translator( |
1180 | const_cast<Value *>(Location.Ptr), |
1181 | OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); |
1182 | if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), |
1183 | DefIterator.getPhiArgBlock(), nullptr, |
1184 | false)) |
1185 | if (Translator.getAddr() != Location.Ptr) { |
1186 | CurrentPair.second = Location.getWithNewPtr(Translator.getAddr()); |
1187 | return; |
1188 | } |
1189 | } |
1190 | CurrentPair.second = Location; |
1191 | } |
1192 | |
1193 | MemoryAccessPair CurrentPair; |
1194 | memoryaccess_def_iterator DefIterator; |
1195 | MemoryLocation Location; |
1196 | MemoryAccess *OriginalAccess = nullptr; |
1197 | bool WalkingPhi = false; |
1198 | }; |
1199 | |
1200 | inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) { |
1201 | return upward_defs_iterator(Pair); |
1202 | } |
1203 | |
1204 | inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } |
1205 | |
1206 | inline iterator_range<upward_defs_iterator> |
1207 | upward_defs(const MemoryAccessPair &Pair) { |
1208 | return make_range(upward_defs_begin(Pair), upward_defs_end()); |
1209 | } |
1210 | |
1211 | /// Walks the defining accesses of MemoryDefs. Stops after we hit something that |
1212 | /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when |
1213 | /// comparing against a null def_chain_iterator, this will compare equal only |
1214 | /// after walking said Phi/liveOnEntry. |
1215 | /// |
1216 | /// The UseOptimizedChain flag specifies whether to walk the clobbering |
1217 | /// access chain, or all the accesses. |
1218 | /// |
1219 | /// Normally, MemoryDef are all just def/use linked together, so a def_chain on |
1220 | /// a MemoryDef will walk all MemoryDefs above it in the program until it hits |
1221 | /// a phi node. The optimized chain walks the clobbering access of a store. |
1222 | /// So if you are just trying to find, given a store, what the next |
1223 | /// thing that would clobber the same memory is, you want the optimized chain. |
1224 | template <class T, bool UseOptimizedChain = false> |
1225 | struct def_chain_iterator |
1226 | : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, |
1227 | std::forward_iterator_tag, MemoryAccess *> { |
1228 | def_chain_iterator() : MA(nullptr) {} |
1229 | def_chain_iterator(T MA) : MA(MA) {} |
1230 | |
1231 | T operator*() const { return MA; } |
1232 | |
1233 | def_chain_iterator &operator++() { |
1234 | // N.B. liveOnEntry has a null defining access. |
1235 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { |
1236 | if (UseOptimizedChain && MUD->isOptimized()) |
1237 | MA = MUD->getOptimized(); |
1238 | else |
1239 | MA = MUD->getDefiningAccess(); |
1240 | } else { |
1241 | MA = nullptr; |
1242 | } |
1243 | |
1244 | return *this; |
1245 | } |
1246 | |
1247 | bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } |
1248 | |
1249 | private: |
1250 | T MA; |
1251 | }; |
1252 | |
1253 | template <class T> |
1254 | inline iterator_range<def_chain_iterator<T>> |
1255 | def_chain(T MA, MemoryAccess *UpTo = nullptr) { |
1256 | #ifdef EXPENSIVE_CHECKS |
1257 | assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo ) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!" ) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 1258, __extension__ __PRETTY_FUNCTION__)) |
1258 | "UpTo isn't in the def chain!")(static_cast <bool> ((!UpTo || find(def_chain(MA), UpTo ) != def_chain_iterator<T>()) && "UpTo isn't in the def chain!" ) ? void (0) : __assert_fail ("(!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && \"UpTo isn't in the def chain!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/Analysis/MemorySSA.h" , 1258, __extension__ __PRETTY_FUNCTION__)); |
1259 | #endif |
1260 | return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); |
1261 | } |
1262 | |
1263 | template <class T> |
1264 | inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { |
1265 | return make_range(def_chain_iterator<T, true>(MA), |
1266 | def_chain_iterator<T, true>(nullptr)); |
1267 | } |
1268 | |
1269 | } // end namespace llvm |
1270 | |
1271 | #endif // LLVM_ANALYSIS_MEMORYSSA_H |