Bug Summary

File:lib/Analysis/MemorySSA.cpp
Warning:line 782, column 37
Called C++ object pointer is null

Annotated Source Code

1//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the MemorySSA class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/MemorySSA.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/DenseMapInfo.h"
17#include "llvm/ADT/DenseSet.h"
18#include "llvm/ADT/DepthFirstIterator.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/Optional.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/Analysis/IteratedDominanceFrontier.h"
29#include "llvm/Analysis/MemoryLocation.h"
30#include "llvm/IR/AssemblyAnnotationWriter.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/CallSite.h"
33#include "llvm/IR/Dominators.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/IntrinsicInst.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/PassManager.h"
41#include "llvm/IR/Use.h"
42#include "llvm/Pass.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/CommandLine.h"
46#include "llvm/Support/Compiler.h"
47#include "llvm/Support/Debug.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/FormattedStream.h"
50#include "llvm/Support/raw_ostream.h"
51#include <algorithm>
52#include <cassert>
53#include <iterator>
54#include <memory>
55#include <utility>
56
57using namespace llvm;
58
59#define DEBUG_TYPE"memoryssa" "memoryssa"
60
61INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry
&Registry) {
62 true)static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry
&Registry) {
63INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
64INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
65INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", &
MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySSAWrapperPass>), false, true); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag
; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag
, initializeMemorySSAWrapperPassPassOnce, std::ref(Registry))
; }
66 true)PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", &
MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySSAWrapperPass>), false, true); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag
; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag
, initializeMemorySSAWrapperPassPassOnce, std::ref(Registry))
; }
67
68INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry
&Registry) {
69 "Memory SSA Printer", false, false)static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry
&Registry) {
70INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
71INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa"
, &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void
llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag
, initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry
)); }
72 "Memory SSA Printer", false, false)PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa"
, &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void
llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag
, initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry
)); }
73
74static cl::opt<unsigned> MaxCheckLimit(
75 "memssa-check-limit", cl::Hidden, cl::init(100),
76 cl::desc("The maximum number of stores/phis MemorySSA"
77 "will consider trying to walk past (default = 100)"));
78
79static cl::opt<bool>
80 VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
81 cl::desc("Verify MemorySSA in legacy printer pass."));
82
83namespace llvm {
84
85/// \brief An assembly annotator class to print Memory SSA information in
86/// comments.
87class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
88 friend class MemorySSA;
89
90 const MemorySSA *MSSA;
91
92public:
93 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
94
95 void emitBasicBlockStartAnnot(const BasicBlock *BB,
96 formatted_raw_ostream &OS) override {
97 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
98 OS << "; " << *MA << "\n";
99 }
100
101 void emitInstructionAnnot(const Instruction *I,
102 formatted_raw_ostream &OS) override {
103 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
104 OS << "; " << *MA << "\n";
105 }
106};
107
108} // end namespace llvm
109
110namespace {
111
112/// Our current alias analysis API differentiates heavily between calls and
113/// non-calls, and functions called on one usually assert on the other.
114/// This class encapsulates the distinction to simplify other code that wants
115/// "Memory affecting instructions and related data" to use as a key.
116/// For example, this class is used as a densemap key in the use optimizer.
117class MemoryLocOrCall {
118public:
119 bool IsCall = false;
120
121 MemoryLocOrCall() = default;
122 MemoryLocOrCall(MemoryUseOrDef *MUD)
123 : MemoryLocOrCall(MUD->getMemoryInst()) {}
124 MemoryLocOrCall(const MemoryUseOrDef *MUD)
125 : MemoryLocOrCall(MUD->getMemoryInst()) {}
126
127 MemoryLocOrCall(Instruction *Inst) {
128 if (ImmutableCallSite(Inst)) {
129 IsCall = true;
130 CS = ImmutableCallSite(Inst);
131 } else {
132 IsCall = false;
133 // There is no such thing as a memorylocation for a fence inst, and it is
134 // unique in that regard.
135 if (!isa<FenceInst>(Inst))
136 Loc = MemoryLocation::get(Inst);
137 }
138 }
139
140 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
141
142 ImmutableCallSite getCS() const {
143 assert(IsCall)((IsCall) ? static_cast<void> (0) : __assert_fail ("IsCall"
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 143, __PRETTY_FUNCTION__))
;
144 return CS;
145 }
146
147 MemoryLocation getLoc() const {
148 assert(!IsCall)((!IsCall) ? static_cast<void> (0) : __assert_fail ("!IsCall"
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 148, __PRETTY_FUNCTION__))
;
149 return Loc;
150 }
151
152 bool operator==(const MemoryLocOrCall &Other) const {
153 if (IsCall != Other.IsCall)
154 return false;
155
156 if (IsCall)
157 return CS.getCalledValue() == Other.CS.getCalledValue();
158 return Loc == Other.Loc;
159 }
160
161private:
162 union {
163 ImmutableCallSite CS;
164 MemoryLocation Loc;
165 };
166};
167
168} // end anonymous namespace
169
170namespace llvm {
171
172template <> struct DenseMapInfo<MemoryLocOrCall> {
173 static inline MemoryLocOrCall getEmptyKey() {
174 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
175 }
176
177 static inline MemoryLocOrCall getTombstoneKey() {
178 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
179 }
180
181 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
182 if (MLOC.IsCall)
183 return hash_combine(MLOC.IsCall,
184 DenseMapInfo<const Value *>::getHashValue(
185 MLOC.getCS().getCalledValue()));
186 return hash_combine(
187 MLOC.IsCall, DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
188 }
189
190 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
191 return LHS == RHS;
192 }
193};
194
195enum class Reorderability { Always, IfNoAlias, Never };
196
197} // end namespace llvm
198
199/// This does one-way checks to see if Use could theoretically be hoisted above
200/// MayClobber. This will not check the other way around.
201///
202/// This assumes that, for the purposes of MemorySSA, Use comes directly after
203/// MayClobber, with no potentially clobbering operations in between them.
204/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
205static Reorderability getLoadReorderability(const LoadInst *Use,
206 const LoadInst *MayClobber) {
207 bool VolatileUse = Use->isVolatile();
208 bool VolatileClobber = MayClobber->isVolatile();
209 // Volatile operations may never be reordered with other volatile operations.
210 if (VolatileUse && VolatileClobber)
211 return Reorderability::Never;
212
213 // The lang ref allows reordering of volatile and non-volatile operations.
214 // Whether an aliasing nonvolatile load and volatile load can be reordered,
215 // though, is ambiguous. Because it may not be best to exploit this ambiguity,
216 // we only allow volatile/non-volatile reordering if the volatile and
217 // non-volatile operations don't alias.
218 Reorderability Result = VolatileUse || VolatileClobber
219 ? Reorderability::IfNoAlias
220 : Reorderability::Always;
221
222 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
223 // is weaker, it can be moved above other loads. We just need to be sure that
224 // MayClobber isn't an acquire load, because loads can't be moved above
225 // acquire loads.
226 //
227 // Note that this explicitly *does* allow the free reordering of monotonic (or
228 // weaker) loads of the same address.
229 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
230 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
231 AtomicOrdering::Acquire);
232 if (SeqCstUse || MayClobberIsAcquire)
233 return Reorderability::Never;
234 return Result;
235}
236
237static bool instructionClobbersQuery(MemoryDef *MD,
238 const MemoryLocation &UseLoc,
239 const Instruction *UseInst,
240 AliasAnalysis &AA) {
241 Instruction *DefInst = MD->getMemoryInst();
242 assert(DefInst && "Defining instruction not actually an instruction")((DefInst && "Defining instruction not actually an instruction"
) ? static_cast<void> (0) : __assert_fail ("DefInst && \"Defining instruction not actually an instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 242, __PRETTY_FUNCTION__))
;
243 ImmutableCallSite UseCS(UseInst);
244
245 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
246 // These intrinsics will show up as affecting memory, but they are just
247 // markers.
248 switch (II->getIntrinsicID()) {
249 case Intrinsic::lifetime_start:
250 if (UseCS)
251 return false;
252 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), UseLoc);
253 case Intrinsic::lifetime_end:
254 case Intrinsic::invariant_start:
255 case Intrinsic::invariant_end:
256 case Intrinsic::assume:
257 return false;
258 default:
259 break;
260 }
261 }
262
263 if (UseCS) {
264 ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
265 return I != MRI_NoModRef;
266 }
267
268 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) {
269 if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) {
270 switch (getLoadReorderability(UseLoad, DefLoad)) {
271 case Reorderability::Always:
272 return false;
273 case Reorderability::Never:
274 return true;
275 case Reorderability::IfNoAlias:
276 return !AA.isNoAlias(UseLoc, MemoryLocation::get(DefLoad));
277 }
278 }
279 }
280
281 return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod;
282}
283
284static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
285 const MemoryLocOrCall &UseMLOC,
286 AliasAnalysis &AA) {
287 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
288 // to exist while MemoryLocOrCall is pushed through places.
289 if (UseMLOC.IsCall)
290 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
291 AA);
292 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
293 AA);
294}
295
296// Return true when MD may alias MU, return false otherwise.
297bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
298 AliasAnalysis &AA) {
299 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
300}
301
302namespace {
303
304struct UpwardsMemoryQuery {
305 // True if our original query started off as a call
306 bool IsCall = false;
307 // The pointer location we started the query with. This will be empty if
308 // IsCall is true.
309 MemoryLocation StartingLoc;
310 // This is the instruction we were querying about.
311 const Instruction *Inst = nullptr;
312 // The MemoryAccess we actually got called with, used to test local domination
313 const MemoryAccess *OriginalAccess = nullptr;
314
315 UpwardsMemoryQuery() = default;
316
317 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
318 : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
319 if (!IsCall)
320 StartingLoc = MemoryLocation::get(Inst);
321 }
322};
323
324} // end anonymous namespace
325
326static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
327 AliasAnalysis &AA) {
328 Instruction *Inst = MD->getMemoryInst();
329 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
330 switch (II->getIntrinsicID()) {
331 case Intrinsic::lifetime_end:
332 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
333 default:
334 return false;
335 }
336 }
337 return false;
338}
339
340static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
341 const Instruction *I) {
342 // If the memory can't be changed, then loads of the memory can't be
343 // clobbered.
344 //
345 // FIXME: We should handle invariant groups, as well. It's a bit harder,
346 // because we need to pay close attention to invariant group barriers.
347 return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
348 AA.pointsToConstantMemory(cast<LoadInst>(I)->
349 getPointerOperand()));
350}
351
352/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
353/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
354///
355/// This is meant to be as simple and self-contained as possible. Because it
356/// uses no cache, etc., it can be relatively expensive.
357///
358/// \param Start The MemoryAccess that we want to walk from.
359/// \param ClobberAt A clobber for Start.
360/// \param StartLoc The MemoryLocation for Start.
361/// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to.
362/// \param Query The UpwardsMemoryQuery we used for our search.
363/// \param AA The AliasAnalysis we used for our search.
364static void LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__))
365checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
366 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
367 const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
368 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?")((MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?"
) ? static_cast<void> (0) : __assert_fail ("MSSA.dominates(ClobberAt, Start) && \"Clobber doesn't dominate start?\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 368, __PRETTY_FUNCTION__))
;
369
370 if (MSSA.isLiveOnEntryDef(Start)) {
371 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&((MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself"
) ? static_cast<void> (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 372, __PRETTY_FUNCTION__))
372 "liveOnEntry must clobber itself")((MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself"
) ? static_cast<void> (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 372, __PRETTY_FUNCTION__))
;
373 return;
374 }
375
376 bool FoundClobber = false;
377 DenseSet<MemoryAccessPair> VisitedPhis;
378 SmallVector<MemoryAccessPair, 8> Worklist;
379 Worklist.emplace_back(Start, StartLoc);
380 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
381 // is found, complain.
382 while (!Worklist.empty()) {
383 MemoryAccessPair MAP = Worklist.pop_back_val();
384 // All we care about is that nothing from Start to ClobberAt clobbers Start.
385 // We learn nothing from revisiting nodes.
386 if (!VisitedPhis.insert(MAP).second)
387 continue;
388
389 for (MemoryAccess *MA : def_chain(MAP.first)) {
390 if (MA == ClobberAt) {
391 if (auto *MD = dyn_cast<MemoryDef>(MA)) {
392 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
393 // since it won't let us short-circuit.
394 //
395 // Also, note that this can't be hoisted out of the `Worklist` loop,
396 // since MD may only act as a clobber for 1 of N MemoryLocations.
397 FoundClobber =
398 FoundClobber || MSSA.isLiveOnEntryDef(MD) ||
399 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
400 }
401 break;
402 }
403
404 // We should never hit liveOnEntry, unless it's the clobber.
405 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?")((!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?"
) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(MA) && \"Hit liveOnEntry before clobber?\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 405, __PRETTY_FUNCTION__))
;
406
407 if (auto *MD = dyn_cast<MemoryDef>(MA)) {
408 (void)MD;
409 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&
"Found clobber before reaching ClobberAt!") ? static_cast<
void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && \"Found clobber before reaching ClobberAt!\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 410, __PRETTY_FUNCTION__))
410 "Found clobber before reaching ClobberAt!")((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&
"Found clobber before reaching ClobberAt!") ? static_cast<
void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && \"Found clobber before reaching ClobberAt!\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 410, __PRETTY_FUNCTION__))
;
411 continue;
412 }
413
414 assert(isa<MemoryPhi>(MA))((isa<MemoryPhi>(MA)) ? static_cast<void> (0) : __assert_fail
("isa<MemoryPhi>(MA)", "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 414, __PRETTY_FUNCTION__))
;
415 Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
416 }
417 }
418
419 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
420 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
421 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&(((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
"ClobberAt never acted as a clobber") ? static_cast<void>
(0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 422, __PRETTY_FUNCTION__))
422 "ClobberAt never acted as a clobber")(((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
"ClobberAt never acted as a clobber") ? static_cast<void>
(0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 422, __PRETTY_FUNCTION__))
;
423}
424
425namespace {
426
427/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
428/// in one class.
429class ClobberWalker {
430 /// Save a few bytes by using unsigned instead of size_t.
431 using ListIndex = unsigned;
432
433 /// Represents a span of contiguous MemoryDefs, potentially ending in a
434 /// MemoryPhi.
435 struct DefPath {
436 MemoryLocation Loc;
437 // Note that, because we always walk in reverse, Last will always dominate
438 // First. Also note that First and Last are inclusive.
439 MemoryAccess *First;
440 MemoryAccess *Last;
441 Optional<ListIndex> Previous;
442
443 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
444 Optional<ListIndex> Previous)
445 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
446
447 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
448 Optional<ListIndex> Previous)
449 : DefPath(Loc, Init, Init, Previous) {}
450 };
451
452 const MemorySSA &MSSA;
453 AliasAnalysis &AA;
454 DominatorTree &DT;
455 UpwardsMemoryQuery *Query;
456
457 // Phi optimization bookkeeping
458 SmallVector<DefPath, 32> Paths;
459 DenseSet<ConstMemoryAccessPair> VisitedPhis;
460
461 /// Find the nearest def or phi that `From` can legally be optimized to.
462 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
463 assert(From->getNumOperands() && "Phi with no operands?")((From->getNumOperands() && "Phi with no operands?"
) ? static_cast<void> (0) : __assert_fail ("From->getNumOperands() && \"Phi with no operands?\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 463, __PRETTY_FUNCTION__))
;
464
465 BasicBlock *BB = From->getBlock();
466 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
467 DomTreeNode *Node = DT.getNode(BB);
468 while ((Node = Node->getIDom())) {
469 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
470 if (Defs)
471 return &*Defs->rbegin();
472 }
473 return Result;
474 }
475
476 /// Result of calling walkToPhiOrClobber.
477 struct UpwardsWalkResult {
478 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
479 /// both.
480 MemoryAccess *Result;
481 bool IsKnownClobber;
482 };
483
484 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
485 /// This will update Desc.Last as it walks. It will (optionally) also stop at
486 /// StopAt.
487 ///
488 /// This does not test for whether StopAt is a clobber
489 UpwardsWalkResult
490 walkToPhiOrClobber(DefPath &Desc,
491 const MemoryAccess *StopAt = nullptr) const {
492 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world")((!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world"
) ? static_cast<void> (0) : __assert_fail ("!isa<MemoryUse>(Desc.Last) && \"Uses don't exist in my world\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 492, __PRETTY_FUNCTION__))
;
493
494 for (MemoryAccess *Current : def_chain(Desc.Last)) {
495 Desc.Last = Current;
496 if (Current == StopAt)
497 return {Current, false};
498
499 if (auto *MD = dyn_cast<MemoryDef>(Current))
500 if (MSSA.isLiveOnEntryDef(MD) ||
501 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA))
502 return {MD, true};
503 }
504
505 assert(isa<MemoryPhi>(Desc.Last) &&((isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?"
) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 506, __PRETTY_FUNCTION__))
506 "Ended at a non-clobber that's not a phi?")((isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?"
) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 506, __PRETTY_FUNCTION__))
;
507 return {Desc.Last, false};
508 }
509
510 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
511 ListIndex PriorNode) {
512 auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
513 upward_defs_end());
514 for (const MemoryAccessPair &P : UpwardDefs) {
515 PausedSearches.push_back(Paths.size());
516 Paths.emplace_back(P.second, P.first, PriorNode);
517 }
518 }
519
520 /// Represents a search that terminated after finding a clobber. This clobber
521 /// may or may not be present in the path of defs from LastNode..SearchStart,
522 /// since it may have been retrieved from cache.
523 struct TerminatedPath {
524 MemoryAccess *Clobber;
525 ListIndex LastNode;
526 };
527
528 /// Get an access that keeps us from optimizing to the given phi.
529 ///
530 /// PausedSearches is an array of indices into the Paths array. Its incoming
531 /// value is the indices of searches that stopped at the last phi optimization
532 /// target. It's left in an unspecified state.
533 ///
534 /// If this returns None, NewPaused is a vector of searches that terminated
535 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
536 Optional<TerminatedPath>
537 getBlockingAccess(const MemoryAccess *StopWhere,
538 SmallVectorImpl<ListIndex> &PausedSearches,
539 SmallVectorImpl<ListIndex> &NewPaused,
540 SmallVectorImpl<TerminatedPath> &Terminated) {
541 assert(!PausedSearches.empty() && "No searches to continue?")((!PausedSearches.empty() && "No searches to continue?"
) ? static_cast<void> (0) : __assert_fail ("!PausedSearches.empty() && \"No searches to continue?\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 541, __PRETTY_FUNCTION__))
;
542
543 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
544 // PausedSearches as our stack.
545 while (!PausedSearches.empty()) {
546 ListIndex PathIndex = PausedSearches.pop_back_val();
547 DefPath &Node = Paths[PathIndex];
548
549 // If we've already visited this path with this MemoryLocation, we don't
550 // need to do so again.
551 //
552 // NOTE: That we just drop these paths on the ground makes caching
553 // behavior sporadic. e.g. given a diamond:
554 // A
555 // B C
556 // D
557 //
558 // ...If we walk D, B, A, C, we'll only cache the result of phi
559 // optimization for A, B, and D; C will be skipped because it dies here.
560 // This arguably isn't the worst thing ever, since:
561 // - We generally query things in a top-down order, so if we got below D
562 // without needing cache entries for {C, MemLoc}, then chances are
563 // that those cache entries would end up ultimately unused.
564 // - We still cache things for A, so C only needs to walk up a bit.
565 // If this behavior becomes problematic, we can fix without a ton of extra
566 // work.
567 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
568 continue;
569
570 UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
571 if (Res.IsKnownClobber) {
572 assert(Res.Result != StopWhere)((Res.Result != StopWhere) ? static_cast<void> (0) : __assert_fail
("Res.Result != StopWhere", "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 572, __PRETTY_FUNCTION__))
;
573 // If this wasn't a cache hit, we hit a clobber when walking. That's a
574 // failure.
575 TerminatedPath Term{Res.Result, PathIndex};
576 if (!MSSA.dominates(Res.Result, StopWhere))
577 return Term;
578
579 // Otherwise, it's a valid thing to potentially optimize to.
580 Terminated.push_back(Term);
581 continue;
582 }
583
584 if (Res.Result == StopWhere) {
585 // We've hit our target. Save this path off for if we want to continue
586 // walking.
587 NewPaused.push_back(PathIndex);
588 continue;
589 }
590
591 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber")((!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber"
) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Res.Result) && \"liveOnEntry is a clobber\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 591, __PRETTY_FUNCTION__))
;
592 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
593 }
594
595 return None;
596 }
597
598 template <typename T, typename Walker>
599 struct generic_def_path_iterator
600 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
601 std::forward_iterator_tag, T *> {
602 generic_def_path_iterator() = default;
603 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
604
605 T &operator*() const { return curNode(); }
606
607 generic_def_path_iterator &operator++() {
608 N = curNode().Previous;
609 return *this;
610 }
611
612 bool operator==(const generic_def_path_iterator &O) const {
613 if (N.hasValue() != O.N.hasValue())
614 return false;
615 return !N.hasValue() || *N == *O.N;
616 }
617
618 private:
619 T &curNode() const { return W->Paths[*N]; }
620
621 Walker *W = nullptr;
622 Optional<ListIndex> N = None;
623 };
624
625 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
626 using const_def_path_iterator =
627 generic_def_path_iterator<const DefPath, const ClobberWalker>;
628
629 iterator_range<def_path_iterator> def_path(ListIndex From) {
630 return make_range(def_path_iterator(this, From), def_path_iterator());
631 }
632
633 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
634 return make_range(const_def_path_iterator(this, From),
635 const_def_path_iterator());
636 }
637
638 struct OptznResult {
639 /// The path that contains our result.
640 TerminatedPath PrimaryClobber;
641 /// The paths that we can legally cache back from, but that aren't
642 /// necessarily the result of the Phi optimization.
643 SmallVector<TerminatedPath, 4> OtherClobbers;
644 };
645
646 ListIndex defPathIndex(const DefPath &N) const {
647 // The assert looks nicer if we don't need to do &N
648 const DefPath *NP = &N;
649 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&((!Paths.empty() && NP >= &Paths.front() &&
NP <= &Paths.back() && "Out of bounds DefPath!"
) ? static_cast<void> (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 650, __PRETTY_FUNCTION__))
650 "Out of bounds DefPath!")((!Paths.empty() && NP >= &Paths.front() &&
NP <= &Paths.back() && "Out of bounds DefPath!"
) ? static_cast<void> (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 650, __PRETTY_FUNCTION__))
;
651 return NP - &Paths.front();
652 }
653
654 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
655 /// that act as legal clobbers. Note that this won't return *all* clobbers.
656 ///
657 /// Phi optimization algorithm tl;dr:
658 /// - Find the earliest def/phi, A, we can optimize to
659 /// - Find if all paths from the starting memory access ultimately reach A
660 /// - If not, optimization isn't possible.
661 /// - Otherwise, walk from A to another clobber or phi, A'.
662 /// - If A' is a def, we're done.
663 /// - If A' is a phi, try to optimize it.
664 ///
665 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
666 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
667 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
668 const MemoryLocation &Loc) {
669 assert(Paths.empty() && VisitedPhis.empty() &&((Paths.empty() && VisitedPhis.empty() && "Reset the optimization state."
) ? static_cast<void> (0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 670, __PRETTY_FUNCTION__))
670 "Reset the optimization state.")((Paths.empty() && VisitedPhis.empty() && "Reset the optimization state."
) ? static_cast<void> (0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 670, __PRETTY_FUNCTION__))
;
671
672 Paths.emplace_back(Loc, Start, Phi, None);
673 // Stores how many "valid" optimization nodes we had prior to calling
674 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
675 auto PriorPathsSize = Paths.size();
676
677 SmallVector<ListIndex, 16> PausedSearches;
678 SmallVector<ListIndex, 8> NewPaused;
679 SmallVector<TerminatedPath, 4> TerminatedPaths;
680
681 addSearches(Phi, PausedSearches, 0);
682
683 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
684 // Paths.
685 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
686 assert(!Paths.empty() && "Need a path to move")((!Paths.empty() && "Need a path to move") ? static_cast
<void> (0) : __assert_fail ("!Paths.empty() && \"Need a path to move\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 686, __PRETTY_FUNCTION__))
;
687 auto Dom = Paths.begin();
688 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
689 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
690 Dom = I;
691 auto Last = Paths.end() - 1;
692 if (Last != Dom)
693 std::iter_swap(Last, Dom);
694 };
695
696 MemoryPhi *Current = Phi;
697 while (true) {
12
Loop condition is true. Entering loop body
698 assert(!MSSA.isLiveOnEntryDef(Current) &&((!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?"
) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 699, __PRETTY_FUNCTION__))
699 "liveOnEntry wasn't treated as a clobber?")((!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?"
) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 699, __PRETTY_FUNCTION__))
;
700
701 const auto *Target = getWalkTarget(Current);
702 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
703 // optimization for the prior phi.
704 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {((all_of(TerminatedPaths, [&](const TerminatedPath &P
) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast
<void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 706, __PRETTY_FUNCTION__))
705 return MSSA.dominates(P.Clobber, Target);((all_of(TerminatedPaths, [&](const TerminatedPath &P
) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast
<void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 706, __PRETTY_FUNCTION__))
706 }))((all_of(TerminatedPaths, [&](const TerminatedPath &P
) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast
<void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 706, __PRETTY_FUNCTION__))
;
707
708 // FIXME: This is broken, because the Blocker may be reported to be
709 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
710 // For the moment, this is fine, since we do nothing with blocker info.
711 if (Optional<TerminatedPath> Blocker = getBlockingAccess(
13
Taking false branch
712 Target, PausedSearches, NewPaused, TerminatedPaths)) {
713
714 // Find the node we started at. We can't search based on N->Last, since
715 // we may have gone around a loop with a different MemoryLocation.
716 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
717 return defPathIndex(N) < PriorPathsSize;
718 });
719 assert(Iter != def_path_iterator())((Iter != def_path_iterator()) ? static_cast<void> (0) :
__assert_fail ("Iter != def_path_iterator()", "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 719, __PRETTY_FUNCTION__))
;
720
721 DefPath &CurNode = *Iter;
722 assert(CurNode.Last == Current)((CurNode.Last == Current) ? static_cast<void> (0) : __assert_fail
("CurNode.Last == Current", "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 722, __PRETTY_FUNCTION__))
;
723
724 // Two things:
725 // A. We can't reliably cache all of NewPaused back. Consider a case
726 // where we have two paths in NewPaused; one of which can't optimize
727 // above this phi, whereas the other can. If we cache the second path
728 // back, we'll end up with suboptimal cache entries. We can handle
729 // cases like this a bit better when we either try to find all
730 // clobbers that block phi optimization, or when our cache starts
731 // supporting unfinished searches.
732 // B. We can't reliably cache TerminatedPaths back here without doing
733 // extra checks; consider a case like:
734 // T
735 // / \
736 // D C
737 // \ /
738 // S
739 // Where T is our target, C is a node with a clobber on it, D is a
740 // diamond (with a clobber *only* on the left or right node, N), and
741 // S is our start. Say we walk to D, through the node opposite N
742 // (read: ignoring the clobber), and see a cache entry in the top
743 // node of D. That cache entry gets put into TerminatedPaths. We then
744 // walk up to C (N is later in our worklist), find the clobber, and
745 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
746 // the bottom part of D to the cached clobber, ignoring the clobber
747 // in N. Again, this problem goes away if we start tracking all
748 // blockers for a given phi optimization.
749 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
750 return {Result, {}};
751 }
752
753 // If there's nothing left to search, then all paths led to valid clobbers
754 // that we got from our cache; pick the nearest to the start, and allow
755 // the rest to be cached back.
756 if (NewPaused.empty()) {
14
Taking false branch
757 MoveDominatedPathToEnd(TerminatedPaths);
758 TerminatedPath Result = TerminatedPaths.pop_back_val();
759 return {Result, std::move(TerminatedPaths)};
760 }
761
762 MemoryAccess *DefChainEnd = nullptr;
15
'DefChainEnd' initialized to a null pointer value
763 SmallVector<TerminatedPath, 4> Clobbers;
764 for (ListIndex Paused : NewPaused) {
16
Assuming '__begin' is equal to '__end'
765 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
766 if (WR.IsKnownClobber)
767 Clobbers.push_back({WR.Result, Paused});
768 else
769 // Micro-opt: If we hit the end of the chain, save it.
770 DefChainEnd = WR.Result;
771 }
772
773 if (!TerminatedPaths.empty()) {
17
Taking true branch
774 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
775 // do it now.
776 if (!DefChainEnd)
18
Taking true branch
777 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
778 DefChainEnd = MA;
779
780 // If any of the terminated paths don't dominate the phi we'll try to
781 // optimize, we need to figure out what they are and quit.
782 const BasicBlock *ChainBB = DefChainEnd->getBlock();
19
Called C++ object pointer is null
783 for (const TerminatedPath &TP : TerminatedPaths) {
784 // Because we know that DefChainEnd is as "high" as we can go, we
785 // don't need local dominance checks; BB dominance is sufficient.
786 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
787 Clobbers.push_back(TP);
788 }
789 }
790
791 // If we have clobbers in the def chain, find the one closest to Current
792 // and quit.
793 if (!Clobbers.empty()) {
794 MoveDominatedPathToEnd(Clobbers);
795 TerminatedPath Result = Clobbers.pop_back_val();
796 return {Result, std::move(Clobbers)};
797 }
798
799 assert(all_of(NewPaused,((all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last
== DefChainEnd; })) ? static_cast<void> (0) : __assert_fail
("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })"
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 800, __PRETTY_FUNCTION__))
800 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }))((all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last
== DefChainEnd; })) ? static_cast<void> (0) : __assert_fail
("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })"
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 800, __PRETTY_FUNCTION__))
;
801
802 // Because liveOnEntry is a clobber, this must be a phi.
803 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
804
805 PriorPathsSize = Paths.size();
806 PausedSearches.clear();
807 for (ListIndex I : NewPaused)
808 addSearches(DefChainPhi, PausedSearches, I);
809 NewPaused.clear();
810
811 Current = DefChainPhi;
812 }
813 }
814
815 void verifyOptResult(const OptznResult &R) const {
816 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {((all_of(R.OtherClobbers, [&](const TerminatedPath &P
) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber
); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 818, __PRETTY_FUNCTION__))
817 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);((all_of(R.OtherClobbers, [&](const TerminatedPath &P
) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber
); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 818, __PRETTY_FUNCTION__))
818 }))((all_of(R.OtherClobbers, [&](const TerminatedPath &P
) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber
); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 818, __PRETTY_FUNCTION__))
;
819 }
820
821 void resetPhiOptznState() {
822 Paths.clear();
823 VisitedPhis.clear();
824 }
825
826public:
827 ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
828 : MSSA(MSSA), AA(AA), DT(DT) {}
829
830 void reset() {}
831
832 /// Finds the nearest clobber for the given query, optimizing phis if
833 /// possible.
834 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
835 Query = &Q;
836
837 MemoryAccess *Current = Start;
838 // This walker pretends uses don't exist. If we're handed one, silently grab
839 // its def. (This has the nice side-effect of ensuring we never cache uses)
840 if (auto *MU = dyn_cast<MemoryUse>(Start))
9
Taking false branch
841 Current = MU->getDefiningAccess();
842
843 DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
844 // Fast path for the overly-common case (no crazy phi optimization
845 // necessary)
846 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
847 MemoryAccess *Result;
848 if (WalkResult.IsKnownClobber) {
10
Taking false branch
849 Result = WalkResult.Result;
850 } else {
851 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
11
Calling 'ClobberWalker::tryOptimizePhi'
852 Current, Q.StartingLoc);
853 verifyOptResult(OptRes);
854 resetPhiOptznState();
855 Result = OptRes.PrimaryClobber.Clobber;
856 }
857
858#ifdef EXPENSIVE_CHECKS
859 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
860#endif
861 return Result;
862 }
863
864 void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA)((MSSA == &this->MSSA) ? static_cast<void> (0) :
__assert_fail ("MSSA == &this->MSSA", "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 864, __PRETTY_FUNCTION__))
; }
865};
866
867struct RenamePassData {
868 DomTreeNode *DTN;
869 DomTreeNode::const_iterator ChildIt;
870 MemoryAccess *IncomingVal;
871
872 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
873 MemoryAccess *M)
874 : DTN(D), ChildIt(It), IncomingVal(M) {}
875
876 void swap(RenamePassData &RHS) {
877 std::swap(DTN, RHS.DTN);
878 std::swap(ChildIt, RHS.ChildIt);
879 std::swap(IncomingVal, RHS.IncomingVal);
880 }
881};
882
883} // end anonymous namespace
884
885namespace llvm {
886
887/// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no
888/// longer does caching on its own,
889/// but the name has been retained for the moment.
890class MemorySSA::CachingWalker final : public MemorySSAWalker {
891 ClobberWalker Walker;
892 bool AutoResetWalker = true;
893
894 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
895 void verifyRemoved(MemoryAccess *);
896
897public:
898 CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
899 ~CachingWalker() override = default;
900
901 using MemorySSAWalker::getClobberingMemoryAccess;
902
903 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
904 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
905 const MemoryLocation &) override;
906 void invalidateInfo(MemoryAccess *) override;
907
908 /// Whether we call resetClobberWalker() after each time we *actually* walk to
909 /// answer a clobber query.
910 void setAutoResetWalker(bool AutoReset) { AutoResetWalker = AutoReset; }
911
912 /// Drop the walker's persistent data structures.
913 void resetClobberWalker() { Walker.reset(); }
914
915 void verify(const MemorySSA *MSSA) override {
916 MemorySSAWalker::verify(MSSA);
917 Walker.verify(MSSA);
918 }
919};
920
921} // end namespace llvm
922
923void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
924 bool RenameAllUses) {
925 // Pass through values to our successors
926 for (const BasicBlock *S : successors(BB)) {
927 auto It = PerBlockAccesses.find(S);
928 // Rename the phi nodes in our successor block
929 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
930 continue;
931 AccessList *Accesses = It->second.get();
932 auto *Phi = cast<MemoryPhi>(&Accesses->front());
933 if (RenameAllUses) {
934 int PhiIndex = Phi->getBasicBlockIndex(BB);
935 assert(PhiIndex != -1 && "Incomplete phi during partial rename")((PhiIndex != -1 && "Incomplete phi during partial rename"
) ? static_cast<void> (0) : __assert_fail ("PhiIndex != -1 && \"Incomplete phi during partial rename\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 935, __PRETTY_FUNCTION__))
;
936 Phi->setIncomingValue(PhiIndex, IncomingVal);
937 } else
938 Phi->addIncoming(IncomingVal, BB);
939 }
940}
941
942/// \brief Rename a single basic block into MemorySSA form.
943/// Uses the standard SSA renaming algorithm.
944/// \returns The new incoming value.
945MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
946 bool RenameAllUses) {
947 auto It = PerBlockAccesses.find(BB);
948 // Skip most processing if the list is empty.
949 if (It != PerBlockAccesses.end()) {
950 AccessList *Accesses = It->second.get();
951 for (MemoryAccess &L : *Accesses) {
952 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
953 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
954 MUD->setDefiningAccess(IncomingVal);
955 if (isa<MemoryDef>(&L))
956 IncomingVal = &L;
957 } else {
958 IncomingVal = &L;
959 }
960 }
961 }
962 return IncomingVal;
963}
964
965/// \brief This is the standard SSA renaming algorithm.
966///
967/// We walk the dominator tree in preorder, renaming accesses, and then filling
968/// in phi nodes in our successors.
969void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
970 SmallPtrSetImpl<BasicBlock *> &Visited,
971 bool SkipVisited, bool RenameAllUses) {
972 SmallVector<RenamePassData, 32> WorkStack;
973 // Skip everything if we already renamed this block and we are skipping.
974 // Note: You can't sink this into the if, because we need it to occur
975 // regardless of whether we skip blocks or not.
976 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
977 if (SkipVisited && AlreadyVisited)
978 return;
979
980 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
981 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
982 WorkStack.push_back({Root, Root->begin(), IncomingVal});
983
984 while (!WorkStack.empty()) {
985 DomTreeNode *Node = WorkStack.back().DTN;
986 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
987 IncomingVal = WorkStack.back().IncomingVal;
988
989 if (ChildIt == Node->end()) {
990 WorkStack.pop_back();
991 } else {
992 DomTreeNode *Child = *ChildIt;
993 ++WorkStack.back().ChildIt;
994 BasicBlock *BB = Child->getBlock();
995 // Note: You can't sink this into the if, because we need it to occur
996 // regardless of whether we skip blocks or not.
997 AlreadyVisited = !Visited.insert(BB).second;
998 if (SkipVisited && AlreadyVisited) {
999 // We already visited this during our renaming, which can happen when
1000 // being asked to rename multiple blocks. Figure out the incoming val,
1001 // which is the last def.
1002 // Incoming value can only change if there is a block def, and in that
1003 // case, it's the last block def in the list.
1004 if (auto *BlockDefs = getWritableBlockDefs(BB))
1005 IncomingVal = &*BlockDefs->rbegin();
1006 } else
1007 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1008 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1009 WorkStack.push_back({Child, Child->begin(), IncomingVal});
1010 }
1011 }
1012}
1013
1014/// \brief This handles unreachable block accesses by deleting phi nodes in
1015/// unreachable blocks, and marking all other unreachable MemoryAccess's as
1016/// being uses of the live on entry definition.
1017void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1018 assert(!DT->isReachableFromEntry(BB) &&((!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks"
) ? static_cast<void> (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1019, __PRETTY_FUNCTION__))
1019 "Reachable block found while handling unreachable blocks")((!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks"
) ? static_cast<void> (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1019, __PRETTY_FUNCTION__))
;
1020
1021 // Make sure phi nodes in our reachable successors end up with a
1022 // LiveOnEntryDef for our incoming edge, even though our block is forward
1023 // unreachable. We could just disconnect these blocks from the CFG fully,
1024 // but we do not right now.
1025 for (const BasicBlock *S : successors(BB)) {
1026 if (!DT->isReachableFromEntry(S))
1027 continue;
1028 auto It = PerBlockAccesses.find(S);
1029 // Rename the phi nodes in our successor block
1030 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1031 continue;
1032 AccessList *Accesses = It->second.get();
1033 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1034 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1035 }
1036
1037 auto It = PerBlockAccesses.find(BB);
1038 if (It == PerBlockAccesses.end())
1039 return;
1040
1041 auto &Accesses = It->second;
1042 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1043 auto Next = std::next(AI);
1044 // If we have a phi, just remove it. We are going to replace all
1045 // users with live on entry.
1046 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1047 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1048 else
1049 Accesses->erase(AI);
1050 AI = Next;
1051 }
1052}
1053
1054MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1055 : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1056 NextID(INVALID_MEMORYACCESS_ID) {
1057 buildMemorySSA();
1058}
1059
1060MemorySSA::~MemorySSA() {
1061 // Drop all our references
1062 for (const auto &Pair : PerBlockAccesses)
1063 for (MemoryAccess &MA : *Pair.second)
1064 MA.dropAllReferences();
1065}
1066
1067MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1068 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1069
1070 if (Res.second)
1071 Res.first->second = llvm::make_unique<AccessList>();
1072 return Res.first->second.get();
1073}
1074
1075MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1076 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1077
1078 if (Res.second)
1079 Res.first->second = llvm::make_unique<DefsList>();
1080 return Res.first->second.get();
1081}
1082
1083namespace llvm {
1084
1085/// This class is a batch walker of all MemoryUse's in the program, and points
1086/// their defining access at the thing that actually clobbers them. Because it
1087/// is a batch walker that touches everything, it does not operate like the
1088/// other walkers. This walker is basically performing a top-down SSA renaming
1089/// pass, where the version stack is used as the cache. This enables it to be
1090/// significantly more time and memory efficient than using the regular walker,
1091/// which is walking bottom-up.
1092class MemorySSA::OptimizeUses {
1093public:
1094 OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
1095 DominatorTree *DT)
1096 : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1097 Walker = MSSA->getWalker();
1098 }
1099
1100 void optimizeUses();
1101
1102private:
1103 /// This represents where a given memorylocation is in the stack.
1104 struct MemlocStackInfo {
1105 // This essentially is keeping track of versions of the stack. Whenever
1106 // the stack changes due to pushes or pops, these versions increase.
1107 unsigned long StackEpoch;
1108 unsigned long PopEpoch;
1109 // This is the lower bound of places on the stack to check. It is equal to
1110 // the place the last stack walk ended.
1111 // Note: Correctness depends on this being initialized to 0, which densemap
1112 // does
1113 unsigned long LowerBound;
1114 const BasicBlock *LowerBoundBlock;
1115 // This is where the last walk for this memory location ended.
1116 unsigned long LastKill;
1117 bool LastKillValid;
1118 };
1119
1120 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1121 SmallVectorImpl<MemoryAccess *> &,
1122 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1123
1124 MemorySSA *MSSA;
1125 MemorySSAWalker *Walker;
1126 AliasAnalysis *AA;
1127 DominatorTree *DT;
1128};
1129
1130} // end namespace llvm
1131
1132/// Optimize the uses in a given block This is basically the SSA renaming
1133/// algorithm, with one caveat: We are able to use a single stack for all
1134/// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1135/// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1136/// going to be some position in that stack of possible ones.
1137///
1138/// We track the stack positions that each MemoryLocation needs
1139/// to check, and last ended at. This is because we only want to check the
1140/// things that changed since last time. The same MemoryLocation should
1141/// get clobbered by the same store (getModRefInfo does not use invariantness or
1142/// things like this, and if they start, we can modify MemoryLocOrCall to
1143/// include relevant data)
1144void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1145 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1146 SmallVectorImpl<MemoryAccess *> &VersionStack,
1147 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1148
1149 /// If no accesses, nothing to do.
1150 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1151 if (Accesses == nullptr)
1152 return;
1153
1154 // Pop everything that doesn't dominate the current block off the stack,
1155 // increment the PopEpoch to account for this.
1156 while (true) {
1157 assert(((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1159, __PRETTY_FUNCTION__))
1158 !VersionStack.empty() &&((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1159, __PRETTY_FUNCTION__))
1159 "Version stack should have liveOnEntry sentinel dominating everything")((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1159, __PRETTY_FUNCTION__))
;
1160 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1161 if (DT->dominates(BackBlock, BB))
1162 break;
1163 while (VersionStack.back()->getBlock() == BackBlock)
1164 VersionStack.pop_back();
1165 ++PopEpoch;
1166 }
1167
1168 for (MemoryAccess &MA : *Accesses) {
1169 auto *MU = dyn_cast<MemoryUse>(&MA);
1170 if (!MU) {
1171 VersionStack.push_back(&MA);
1172 ++StackEpoch;
1173 continue;
1174 }
1175
1176 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1177 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true);
1178 continue;
1179 }
1180
1181 MemoryLocOrCall UseMLOC(MU);
1182 auto &LocInfo = LocStackInfo[UseMLOC];
1183 // If the pop epoch changed, it means we've removed stuff from top of
1184 // stack due to changing blocks. We may have to reset the lower bound or
1185 // last kill info.
1186 if (LocInfo.PopEpoch != PopEpoch) {
1187 LocInfo.PopEpoch = PopEpoch;
1188 LocInfo.StackEpoch = StackEpoch;
1189 // If the lower bound was in something that no longer dominates us, we
1190 // have to reset it.
1191 // We can't simply track stack size, because the stack may have had
1192 // pushes/pops in the meantime.
1193 // XXX: This is non-optimal, but only is slower cases with heavily
1194 // branching dominator trees. To get the optimal number of queries would
1195 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1196 // the top of that stack dominates us. This does not seem worth it ATM.
1197 // A much cheaper optimization would be to always explore the deepest
1198 // branch of the dominator tree first. This will guarantee this resets on
1199 // the smallest set of blocks.
1200 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1201 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1202 // Reset the lower bound of things to check.
1203 // TODO: Some day we should be able to reset to last kill, rather than
1204 // 0.
1205 LocInfo.LowerBound = 0;
1206 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1207 LocInfo.LastKillValid = false;
1208 }
1209 } else if (LocInfo.StackEpoch != StackEpoch) {
1210 // If all that has changed is the StackEpoch, we only have to check the
1211 // new things on the stack, because we've checked everything before. In
1212 // this case, the lower bound of things to check remains the same.
1213 LocInfo.PopEpoch = PopEpoch;
1214 LocInfo.StackEpoch = StackEpoch;
1215 }
1216 if (!LocInfo.LastKillValid) {
1217 LocInfo.LastKill = VersionStack.size() - 1;
1218 LocInfo.LastKillValid = true;
1219 }
1220
1221 // At this point, we should have corrected last kill and LowerBound to be
1222 // in bounds.
1223 assert(LocInfo.LowerBound < VersionStack.size() &&((LocInfo.LowerBound < VersionStack.size() && "Lower bound out of range"
) ? static_cast<void> (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1224, __PRETTY_FUNCTION__))
1224 "Lower bound out of range")((LocInfo.LowerBound < VersionStack.size() && "Lower bound out of range"
) ? static_cast<void> (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1224, __PRETTY_FUNCTION__))
;
1225 assert(LocInfo.LastKill < VersionStack.size() &&((LocInfo.LastKill < VersionStack.size() && "Last kill info out of range"
) ? static_cast<void> (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1226, __PRETTY_FUNCTION__))
1226 "Last kill info out of range")((LocInfo.LastKill < VersionStack.size() && "Last kill info out of range"
) ? static_cast<void> (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1226, __PRETTY_FUNCTION__))
;
1227 // In any case, the new upper bound is the top of the stack.
1228 unsigned long UpperBound = VersionStack.size() - 1;
1229
1230 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1231 DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1232 << *(MU->getMemoryInst()) << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1233 << " because there are " << UpperBound - LocInfo.LowerBounddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1234 << " stores to disambiguate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
;
1235 // Because we did not walk, LastKill is no longer valid, as this may
1236 // have been a kill.
1237 LocInfo.LastKillValid = false;
1238 continue;
1239 }
1240 bool FoundClobberResult = false;
1241 while (UpperBound > LocInfo.LowerBound) {
1242 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1243 // For phis, use the walker, see where we ended up, go there
1244 Instruction *UseInst = MU->getMemoryInst();
1245 MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1246 // We are guaranteed to find it or something is wrong
1247 while (VersionStack[UpperBound] != Result) {
1248 assert(UpperBound != 0)((UpperBound != 0) ? static_cast<void> (0) : __assert_fail
("UpperBound != 0", "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1248, __PRETTY_FUNCTION__))
;
1249 --UpperBound;
1250 }
1251 FoundClobberResult = true;
1252 break;
1253 }
1254
1255 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1256 // If the lifetime of the pointer ends at this instruction, it's live on
1257 // entry.
1258 if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1259 // Reset UpperBound to liveOnEntryDef's place in the stack
1260 UpperBound = 0;
1261 FoundClobberResult = true;
1262 break;
1263 }
1264 if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) {
1265 FoundClobberResult = true;
1266 break;
1267 }
1268 --UpperBound;
1269 }
1270 // At the end of this loop, UpperBound is either a clobber, or lower bound
1271 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1272 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1273 MU->setDefiningAccess(VersionStack[UpperBound], true);
1274 // We were last killed now by where we got to
1275 LocInfo.LastKill = UpperBound;
1276 } else {
1277 // Otherwise, we checked all the new ones, and now we know we can get to
1278 // LastKill.
1279 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);
1280 }
1281 LocInfo.LowerBound = VersionStack.size() - 1;
1282 LocInfo.LowerBoundBlock = BB;
1283 }
1284}
1285
1286/// Optimize uses to point to their actual clobbering definitions.
1287void MemorySSA::OptimizeUses::optimizeUses() {
1288 SmallVector<MemoryAccess *, 16> VersionStack;
1289 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1290 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1291
1292 unsigned long StackEpoch = 1;
1293 unsigned long PopEpoch = 1;
1294 // We perform a non-recursive top-down dominator tree walk.
1295 for (const auto *DomNode : depth_first(DT->getRootNode()))
1296 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1297 LocStackInfo);
1298}
1299
1300void MemorySSA::placePHINodes(
1301 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks,
1302 const DenseMap<const BasicBlock *, unsigned int> &BBNumbers) {
1303 // Determine where our MemoryPhi's should go
1304 ForwardIDFCalculator IDFs(*DT);
1305 IDFs.setDefiningBlocks(DefiningBlocks);
1306 SmallVector<BasicBlock *, 32> IDFBlocks;
1307 IDFs.calculate(IDFBlocks);
1308
1309 std::sort(IDFBlocks.begin(), IDFBlocks.end(),
1310 [&BBNumbers](const BasicBlock *A, const BasicBlock *B) {
1311 return BBNumbers.lookup(A) < BBNumbers.lookup(B);
1312 });
1313
1314 // Now place MemoryPhi nodes.
1315 for (auto &BB : IDFBlocks)
1316 createMemoryPhi(BB);
1317}
1318
1319void MemorySSA::buildMemorySSA() {
1320 // We create an access to represent "live on entry", for things like
1321 // arguments or users of globals, where the memory they use is defined before
1322 // the beginning of the function. We do not actually insert it into the IR.
1323 // We do not define a live on exit for the immediate uses, and thus our
1324 // semantics do *not* imply that something with no immediate uses can simply
1325 // be removed.
1326 BasicBlock &StartingPoint = F.getEntryBlock();
1327 LiveOnEntryDef =
1328 llvm::make_unique<MemoryDef>(F.getContext(), nullptr, nullptr,
1329 &StartingPoint, NextID++);
1330 DenseMap<const BasicBlock *, unsigned int> BBNumbers;
1331 unsigned NextBBNum = 0;
1332
1333 // We maintain lists of memory accesses per-block, trading memory for time. We
1334 // could just look up the memory access for every possible instruction in the
1335 // stream.
1336 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1337 // Go through each block, figure out where defs occur, and chain together all
1338 // the accesses.
1339 for (BasicBlock &B : F) {
1340 BBNumbers[&B] = NextBBNum++;
1341 bool InsertIntoDef = false;
1342 AccessList *Accesses = nullptr;
1343 DefsList *Defs = nullptr;
1344 for (Instruction &I : B) {
1345 MemoryUseOrDef *MUD = createNewAccess(&I);
1346 if (!MUD)
1347 continue;
1348
1349 if (!Accesses)
1350 Accesses = getOrCreateAccessList(&B);
1351 Accesses->push_back(MUD);
1352 if (isa<MemoryDef>(MUD)) {
1353 InsertIntoDef = true;
1354 if (!Defs)
1355 Defs = getOrCreateDefsList(&B);
1356 Defs->push_back(*MUD);
1357 }
1358 }
1359 if (InsertIntoDef)
1360 DefiningBlocks.insert(&B);
1361 }
1362 placePHINodes(DefiningBlocks, BBNumbers);
1363
1364 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1365 // filled in with all blocks.
1366 SmallPtrSet<BasicBlock *, 16> Visited;
1367 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1368
1369 CachingWalker *Walker = getWalkerImpl();
1370
1371 // We're doing a batch of updates; don't drop useful caches between them.
1372 Walker->setAutoResetWalker(false);
1373 OptimizeUses(this, Walker, AA, DT).optimizeUses();
1374 Walker->setAutoResetWalker(true);
1375 Walker->resetClobberWalker();
1376
1377 // Mark the uses in unreachable blocks as live on entry, so that they go
1378 // somewhere.
1379 for (auto &BB : F)
1380 if (!Visited.count(&BB))
1381 markUnreachableAsLiveOnEntry(&BB);
1382}
1383
1384MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1385
1386MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1387 if (Walker)
1388 return Walker.get();
1389
1390 Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
1391 return Walker.get();
1392}
1393
1394// This is a helper function used by the creation routines. It places NewAccess
1395// into the access and defs lists for a given basic block, at the given
1396// insertion point.
1397void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1398 const BasicBlock *BB,
1399 InsertionPlace Point) {
1400 auto *Accesses = getOrCreateAccessList(BB);
1401 if (Point == Beginning) {
1402 // If it's a phi node, it goes first, otherwise, it goes after any phi
1403 // nodes.
1404 if (isa<MemoryPhi>(NewAccess)) {
1405 Accesses->push_front(NewAccess);
1406 auto *Defs = getOrCreateDefsList(BB);
1407 Defs->push_front(*NewAccess);
1408 } else {
1409 auto AI = find_if_not(
1410 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1411 Accesses->insert(AI, NewAccess);
1412 if (!isa<MemoryUse>(NewAccess)) {
1413 auto *Defs = getOrCreateDefsList(BB);
1414 auto DI = find_if_not(
1415 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1416 Defs->insert(DI, *NewAccess);
1417 }
1418 }
1419 } else {
1420 Accesses->push_back(NewAccess);
1421 if (!isa<MemoryUse>(NewAccess)) {
1422 auto *Defs = getOrCreateDefsList(BB);
1423 Defs->push_back(*NewAccess);
1424 }
1425 }
1426 BlockNumberingValid.erase(BB);
1427}
1428
1429void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1430 AccessList::iterator InsertPt) {
1431 auto *Accesses = getWritableBlockAccesses(BB);
1432 bool WasEnd = InsertPt == Accesses->end();
1433 Accesses->insert(AccessList::iterator(InsertPt), What);
1434 if (!isa<MemoryUse>(What)) {
1435 auto *Defs = getOrCreateDefsList(BB);
1436 // If we got asked to insert at the end, we have an easy job, just shove it
1437 // at the end. If we got asked to insert before an existing def, we also get
1438 // an terator. If we got asked to insert before a use, we have to hunt for
1439 // the next def.
1440 if (WasEnd) {
1441 Defs->push_back(*What);
1442 } else if (isa<MemoryDef>(InsertPt)) {
1443 Defs->insert(InsertPt->getDefsIterator(), *What);
1444 } else {
1445 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1446 ++InsertPt;
1447 // Either we found a def, or we are inserting at the end
1448 if (InsertPt == Accesses->end())
1449 Defs->push_back(*What);
1450 else
1451 Defs->insert(InsertPt->getDefsIterator(), *What);
1452 }
1453 }
1454 BlockNumberingValid.erase(BB);
1455}
1456
1457// Move What before Where in the IR. The end result is taht What will belong to
1458// the right lists and have the right Block set, but will not otherwise be
1459// correct. It will not have the right defining access, and if it is a def,
1460// things below it will not properly be updated.
1461void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1462 AccessList::iterator Where) {
1463 // Keep it in the lookup tables, remove from the lists
1464 removeFromLists(What, false);
1465 What->setBlock(BB);
1466 insertIntoListsBefore(What, BB, Where);
1467}
1468
1469void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1470 InsertionPlace Point) {
1471 removeFromLists(What, false);
1472 What->setBlock(BB);
1473 insertIntoListsForBlock(What, BB, Point);
1474}
1475
1476MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1477 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB")((!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"
) ? static_cast<void> (0) : __assert_fail ("!getMemoryAccess(BB) && \"MemoryPhi already exists for this BB\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1477, __PRETTY_FUNCTION__))
;
1478 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1479 // Phi's always are placed at the front of the block.
1480 insertIntoListsForBlock(Phi, BB, Beginning);
1481 ValueToMemoryAccess[BB] = Phi;
1482 return Phi;
1483}
1484
1485MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1486 MemoryAccess *Definition) {
1487 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI")((!isa<PHINode>(I) && "Cannot create a defined access for a PHI"
) ? static_cast<void> (0) : __assert_fail ("!isa<PHINode>(I) && \"Cannot create a defined access for a PHI\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1487, __PRETTY_FUNCTION__))
;
1488 MemoryUseOrDef *NewAccess = createNewAccess(I);
1489 assert(((NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? static_cast<void> (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1491, __PRETTY_FUNCTION__))
1490 NewAccess != nullptr &&((NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? static_cast<void> (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1491, __PRETTY_FUNCTION__))
1491 "Tried to create a memory access for a non-memory touching instruction")((NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? static_cast<void> (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1491, __PRETTY_FUNCTION__))
;
1492 NewAccess->setDefiningAccess(Definition);
1493 return NewAccess;
1494}
1495
1496// Return true if the instruction has ordering constraints.
1497// Note specifically that this only considers stores and loads
1498// because others are still considered ModRef by getModRefInfo.
1499static inline bool isOrdered(const Instruction *I) {
1500 if (auto *SI = dyn_cast<StoreInst>(I)) {
1501 if (!SI->isUnordered())
1502 return true;
1503 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1504 if (!LI->isUnordered())
1505 return true;
1506 }
1507 return false;
1508}
1509
1510/// \brief Helper function to create new memory accesses
1511MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
1512 // The assume intrinsic has a control dependency which we model by claiming
1513 // that it writes arbitrarily. Ignore that fake memory dependency here.
1514 // FIXME: Replace this special casing with a more accurate modelling of
1515 // assume's control dependency.
1516 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1517 if (II->getIntrinsicID() == Intrinsic::assume)
1518 return nullptr;
1519
1520 // Find out what affect this instruction has on memory.
1521 ModRefInfo ModRef = AA->getModRefInfo(I, None);
1522 // The isOrdered check is used to ensure that volatiles end up as defs
1523 // (atomics end up as ModRef right now anyway). Until we separate the
1524 // ordering chain from the memory chain, this enables people to see at least
1525 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1526 // will still give an answer that bypasses other volatile loads. TODO:
1527 // Separate memory aliasing and ordering into two different chains so that we
1528 // can precisely represent both "what memory will this read/write/is clobbered
1529 // by" and "what instructions can I move this past".
1530 bool Def = bool(ModRef & MRI_Mod) || isOrdered(I);
1531 bool Use = bool(ModRef & MRI_Ref);
1532
1533 // It's possible for an instruction to not modify memory at all. During
1534 // construction, we ignore them.
1535 if (!Def && !Use)
1536 return nullptr;
1537
1538 assert((Def || Use) &&(((Def || Use) && "Trying to create a memory access with a non-memory instruction"
) ? static_cast<void> (0) : __assert_fail ("(Def || Use) && \"Trying to create a memory access with a non-memory instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1539, __PRETTY_FUNCTION__))
1539 "Trying to create a memory access with a non-memory instruction")(((Def || Use) && "Trying to create a memory access with a non-memory instruction"
) ? static_cast<void> (0) : __assert_fail ("(Def || Use) && \"Trying to create a memory access with a non-memory instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1539, __PRETTY_FUNCTION__))
;
1540
1541 MemoryUseOrDef *MUD;
1542 if (Def)
1543 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1544 else
1545 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1546 ValueToMemoryAccess[I] = MUD;
1547 return MUD;
1548}
1549
1550/// \brief Returns true if \p Replacer dominates \p Replacee .
1551bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1552 const MemoryAccess *Replacee) const {
1553 if (isa<MemoryUseOrDef>(Replacee))
1554 return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1555 const auto *MP = cast<MemoryPhi>(Replacee);
1556 // For a phi node, the use occurs in the predecessor block of the phi node.
1557 // Since we may occur multiple times in the phi node, we have to check each
1558 // operand to ensure Replacer dominates each operand where Replacee occurs.
1559 for (const Use &Arg : MP->operands()) {
1560 if (Arg.get() != Replacee &&
1561 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1562 return false;
1563 }
1564 return true;
1565}
1566
1567/// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
1568void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1569 assert(MA->use_empty() &&((MA->use_empty() && "Trying to remove memory access that still has uses"
) ? static_cast<void> (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1570, __PRETTY_FUNCTION__))
1570 "Trying to remove memory access that still has uses")((MA->use_empty() && "Trying to remove memory access that still has uses"
) ? static_cast<void> (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1570, __PRETTY_FUNCTION__))
;
1571 BlockNumbering.erase(MA);
1572 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA))
1573 MUD->setDefiningAccess(nullptr);
1574 // Invalidate our walker's cache if necessary
1575 if (!isa<MemoryUse>(MA))
1576 Walker->invalidateInfo(MA);
1577 // The call below to erase will destroy MA, so we can't change the order we
1578 // are doing things here
1579 Value *MemoryInst;
1580 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1581 MemoryInst = MUD->getMemoryInst();
1582 } else {
1583 MemoryInst = MA->getBlock();
1584 }
1585 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1586 if (VMA->second == MA)
1587 ValueToMemoryAccess.erase(VMA);
1588}
1589
1590/// \brief Properly remove \p MA from all of MemorySSA's lists.
1591///
1592/// Because of the way the intrusive list and use lists work, it is important to
1593/// do removal in the right order.
1594/// ShouldDelete defaults to true, and will cause the memory access to also be
1595/// deleted, not just removed.
1596void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1597 // The access list owns the reference, so we erase it from the non-owning list
1598 // first.
1599 if (!isa<MemoryUse>(MA)) {
1600 auto DefsIt = PerBlockDefs.find(MA->getBlock());
1601 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1602 Defs->remove(*MA);
1603 if (Defs->empty())
1604 PerBlockDefs.erase(DefsIt);
1605 }
1606
1607 // The erase call here will delete it. If we don't want it deleted, we call
1608 // remove instead.
1609 auto AccessIt = PerBlockAccesses.find(MA->getBlock());
1610 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1611 if (ShouldDelete)
1612 Accesses->erase(MA);
1613 else
1614 Accesses->remove(MA);
1615
1616 if (Accesses->empty())
1617 PerBlockAccesses.erase(AccessIt);
1618}
1619
1620void MemorySSA::print(raw_ostream &OS) const {
1621 MemorySSAAnnotatedWriter Writer(this);
1622 F.print(OS, &Writer);
1623}
1624
1625#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1626LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MemorySSA::dump() const { print(dbgs()); }
1627#endif
1628
1629void MemorySSA::verifyMemorySSA() const {
1630 verifyDefUses(F);
1631 verifyDomination(F);
1632 verifyOrdering(F);
1633 Walker->verify(this);
1634}
1635
1636/// \brief Verify that the order and existence of MemoryAccesses matches the
1637/// order and existence of memory affecting instructions.
1638void MemorySSA::verifyOrdering(Function &F) const {
1639 // Walk all the blocks, comparing what the lookups think and what the access
1640 // lists think, as well as the order in the blocks vs the order in the access
1641 // lists.
1642 SmallVector<MemoryAccess *, 32> ActualAccesses;
1643 SmallVector<MemoryAccess *, 32> ActualDefs;
1644 for (BasicBlock &B : F) {
1645 const AccessList *AL = getBlockAccesses(&B);
1646 const auto *DL = getBlockDefs(&B);
1647 MemoryAccess *Phi = getMemoryAccess(&B);
1648 if (Phi) {
1649 ActualAccesses.push_back(Phi);
1650 ActualDefs.push_back(Phi);
1651 }
1652
1653 for (Instruction &I : B) {
1654 MemoryAccess *MA = getMemoryAccess(&I);
1655 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
"We have memory affecting instructions " "in this block but they are not in the "
"access list or defs list") ? static_cast<void> (0) : __assert_fail
("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1658, __PRETTY_FUNCTION__))
1656 "We have memory affecting instructions "(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
"We have memory affecting instructions " "in this block but they are not in the "
"access list or defs list") ? static_cast<void> (0) : __assert_fail
("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1658, __PRETTY_FUNCTION__))
1657 "in this block but they are not in the "(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
"We have memory affecting instructions " "in this block but they are not in the "
"access list or defs list") ? static_cast<void> (0) : __assert_fail
("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1658, __PRETTY_FUNCTION__))
1658 "access list or defs list")(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
"We have memory affecting instructions " "in this block but they are not in the "
"access list or defs list") ? static_cast<void> (0) : __assert_fail
("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1658, __PRETTY_FUNCTION__))
;
1659 if (MA) {
1660 ActualAccesses.push_back(MA);
1661 if (isa<MemoryDef>(MA))
1662 ActualDefs.push_back(MA);
1663 }
1664 }
1665 // Either we hit the assert, really have no accesses, or we have both
1666 // accesses and an access list.
1667 // Same with defs.
1668 if (!AL && !DL)
1669 continue;
1670 assert(AL->size() == ActualAccesses.size() &&((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the "
"access list") ? static_cast<void> (0) : __assert_fail
("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1672, __PRETTY_FUNCTION__))
1671 "We don't have the same number of accesses in the block as on the "((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the "
"access list") ? static_cast<void> (0) : __assert_fail
("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1672, __PRETTY_FUNCTION__))
1672 "access list")((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the "
"access list") ? static_cast<void> (0) : __assert_fail
("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1672, __PRETTY_FUNCTION__))
;
1673 assert((DL || ActualDefs.size() == 0) &&(((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs"
) ? static_cast<void> (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1674, __PRETTY_FUNCTION__))
1674 "Either we should have a defs list, or we should have no defs")(((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs"
) ? static_cast<void> (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1674, __PRETTY_FUNCTION__))
;
1675 assert((!DL || DL->size() == ActualDefs.size()) &&(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1677, __PRETTY_FUNCTION__))
1676 "We don't have the same number of defs in the block as on the "(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1677, __PRETTY_FUNCTION__))
1677 "def list")(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1677, __PRETTY_FUNCTION__))
;
1678 auto ALI = AL->begin();
1679 auto AAI = ActualAccesses.begin();
1680 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1681 assert(&*ALI == *AAI && "Not the same accesses in the same order")((&*ALI == *AAI && "Not the same accesses in the same order"
) ? static_cast<void> (0) : __assert_fail ("&*ALI == *AAI && \"Not the same accesses in the same order\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1681, __PRETTY_FUNCTION__))
;
1682 ++ALI;
1683 ++AAI;
1684 }
1685 ActualAccesses.clear();
1686 if (DL) {
1687 auto DLI = DL->begin();
1688 auto ADI = ActualDefs.begin();
1689 while (DLI != DL->end() && ADI != ActualDefs.end()) {
1690 assert(&*DLI == *ADI && "Not the same defs in the same order")((&*DLI == *ADI && "Not the same defs in the same order"
) ? static_cast<void> (0) : __assert_fail ("&*DLI == *ADI && \"Not the same defs in the same order\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1690, __PRETTY_FUNCTION__))
;
1691 ++DLI;
1692 ++ADI;
1693 }
1694 }
1695 ActualDefs.clear();
1696 }
1697}
1698
1699/// \brief Verify the domination properties of MemorySSA by checking that each
1700/// definition dominates all of its uses.
1701void MemorySSA::verifyDomination(Function &F) const {
1702#ifndef NDEBUG
1703 for (BasicBlock &B : F) {
1704 // Phi nodes are attached to basic blocks
1705 if (MemoryPhi *MP = getMemoryAccess(&B))
1706 for (const Use &U : MP->uses())
1707 assert(dominates(MP, U) && "Memory PHI does not dominate it's uses")((dominates(MP, U) && "Memory PHI does not dominate it's uses"
) ? static_cast<void> (0) : __assert_fail ("dominates(MP, U) && \"Memory PHI does not dominate it's uses\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1707, __PRETTY_FUNCTION__))
;
1708
1709 for (Instruction &I : B) {
1710 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1711 if (!MD)
1712 continue;
1713
1714 for (const Use &U : MD->uses())
1715 assert(dominates(MD, U) && "Memory Def does not dominate it's uses")((dominates(MD, U) && "Memory Def does not dominate it's uses"
) ? static_cast<void> (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1715, __PRETTY_FUNCTION__))
;
1716 }
1717 }
1718#endif
1719}
1720
1721/// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use
1722/// appears in the use list of \p Def.
1723void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1724#ifndef NDEBUG
1725 // The live on entry use may cause us to get a NULL def here
1726 if (!Def)
1727 assert(isLiveOnEntryDef(Use) &&((isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def"
) ? static_cast<void> (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1728, __PRETTY_FUNCTION__))
1728 "Null def but use not point to live on entry def")((isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def"
) ? static_cast<void> (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1728, __PRETTY_FUNCTION__))
;
1729 else
1730 assert(is_contained(Def->users(), Use) &&((is_contained(Def->users(), Use) && "Did not find use in def's use list"
) ? static_cast<void> (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1731, __PRETTY_FUNCTION__))
1731 "Did not find use in def's use list")((is_contained(Def->users(), Use) && "Did not find use in def's use list"
) ? static_cast<void> (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1731, __PRETTY_FUNCTION__))
;
1732#endif
1733}
1734
1735/// \brief Verify the immediate use information, by walking all the memory
1736/// accesses and verifying that, for each use, it appears in the
1737/// appropriate def's use list
1738void MemorySSA::verifyDefUses(Function &F) const {
1739 for (BasicBlock &B : F) {
1740 // Phi nodes are attached to basic blocks
1741 if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1742 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(((Phi->getNumOperands() == static_cast<unsigned>(std
::distance( pred_begin(&B), pred_end(&B))) &&
"Incomplete MemoryPhi Node") ? static_cast<void> (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1744, __PRETTY_FUNCTION__))
1743 pred_begin(&B), pred_end(&B))) &&((Phi->getNumOperands() == static_cast<unsigned>(std
::distance( pred_begin(&B), pred_end(&B))) &&
"Incomplete MemoryPhi Node") ? static_cast<void> (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1744, __PRETTY_FUNCTION__))
1744 "Incomplete MemoryPhi Node")((Phi->getNumOperands() == static_cast<unsigned>(std
::distance( pred_begin(&B), pred_end(&B))) &&
"Incomplete MemoryPhi Node") ? static_cast<void> (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1744, __PRETTY_FUNCTION__))
;
1745 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1746 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1747 }
1748
1749 for (Instruction &I : B) {
1750 if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1751 verifyUseInDefs(MA->getDefiningAccess(), MA);
1752 }
1753 }
1754 }
1755}
1756
1757MemoryUseOrDef *MemorySSA::getMemoryAccess(const Instruction *I) const {
1758 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
1759}
1760
1761MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const {
1762 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
1763}
1764
1765/// Perform a local numbering on blocks so that instruction ordering can be
1766/// determined in constant time.
1767/// TODO: We currently just number in order. If we numbered by N, we could
1768/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1769/// log2(N) sequences of mixed before and after) without needing to invalidate
1770/// the numbering.
1771void MemorySSA::renumberBlock(const BasicBlock *B) const {
1772 // The pre-increment ensures the numbers really start at 1.
1773 unsigned long CurrentNumber = 0;
1774 const AccessList *AL = getBlockAccesses(B);
1775 assert(AL != nullptr && "Asking to renumber an empty block")((AL != nullptr && "Asking to renumber an empty block"
) ? static_cast<void> (0) : __assert_fail ("AL != nullptr && \"Asking to renumber an empty block\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1775, __PRETTY_FUNCTION__))
;
1776 for (const auto &I : *AL)
1777 BlockNumbering[&I] = ++CurrentNumber;
1778 BlockNumberingValid.insert(B);
1779}
1780
1781/// \brief Determine, for two memory accesses in the same block,
1782/// whether \p Dominator dominates \p Dominatee.
1783/// \returns True if \p Dominator dominates \p Dominatee.
1784bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
1785 const MemoryAccess *Dominatee) const {
1786 const BasicBlock *DominatorBlock = Dominator->getBlock();
1787
1788 assert((DominatorBlock == Dominatee->getBlock()) &&(((DominatorBlock == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!"
) ? static_cast<void> (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1789, __PRETTY_FUNCTION__))
1789 "Asking for local domination when accesses are in different blocks!")(((DominatorBlock == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!"
) ? static_cast<void> (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1789, __PRETTY_FUNCTION__))
;
1790 // A node dominates itself.
1791 if (Dominatee == Dominator)
1792 return true;
1793
1794 // When Dominatee is defined on function entry, it is not dominated by another
1795 // memory access.
1796 if (isLiveOnEntryDef(Dominatee))
1797 return false;
1798
1799 // When Dominator is defined on function entry, it dominates the other memory
1800 // access.
1801 if (isLiveOnEntryDef(Dominator))
1802 return true;
1803
1804 if (!BlockNumberingValid.count(DominatorBlock))
1805 renumberBlock(DominatorBlock);
1806
1807 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
1808 // All numbers start with 1
1809 assert(DominatorNum != 0 && "Block was not numbered properly")((DominatorNum != 0 && "Block was not numbered properly"
) ? static_cast<void> (0) : __assert_fail ("DominatorNum != 0 && \"Block was not numbered properly\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1809, __PRETTY_FUNCTION__))
;
1810 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
1811 assert(DominateeNum != 0 && "Block was not numbered properly")((DominateeNum != 0 && "Block was not numbered properly"
) ? static_cast<void> (0) : __assert_fail ("DominateeNum != 0 && \"Block was not numbered properly\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1811, __PRETTY_FUNCTION__))
;
1812 return DominatorNum < DominateeNum;
1813}
1814
1815bool MemorySSA::dominates(const MemoryAccess *Dominator,
1816 const MemoryAccess *Dominatee) const {
1817 if (Dominator == Dominatee)
1818 return true;
1819
1820 if (isLiveOnEntryDef(Dominatee))
1821 return false;
1822
1823 if (Dominator->getBlock() != Dominatee->getBlock())
1824 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
1825 return locallyDominates(Dominator, Dominatee);
1826}
1827
1828bool MemorySSA::dominates(const MemoryAccess *Dominator,
1829 const Use &Dominatee) const {
1830 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
1831 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
1832 // The def must dominate the incoming block of the phi.
1833 if (UseBB != Dominator->getBlock())
1834 return DT->dominates(Dominator->getBlock(), UseBB);
1835 // If the UseBB and the DefBB are the same, compare locally.
1836 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
1837 }
1838 // If it's not a PHI node use, the normal dominates can already handle it.
1839 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
1840}
1841
1842const static char LiveOnEntryStr[] = "liveOnEntry";
1843
1844void MemoryAccess::print(raw_ostream &OS) const {
1845 switch (getValueID()) {
1846 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
1847 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
1848 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
1849 }
1850 llvm_unreachable("invalid value id")::llvm::llvm_unreachable_internal("invalid value id", "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1850)
;
1851}
1852
1853void MemoryDef::print(raw_ostream &OS) const {
1854 MemoryAccess *UO = getDefiningAccess();
1855
1856 OS << getID() << " = MemoryDef(";
1857 if (UO && UO->getID())
1858 OS << UO->getID();
1859 else
1860 OS << LiveOnEntryStr;
1861 OS << ')';
1862}
1863
1864void MemoryPhi::print(raw_ostream &OS) const {
1865 bool First = true;
1866 OS << getID() << " = MemoryPhi(";
1867 for (const auto &Op : operands()) {
1868 BasicBlock *BB = getIncomingBlock(Op);
1869 MemoryAccess *MA = cast<MemoryAccess>(Op);
1870 if (!First)
1871 OS << ',';
1872 else
1873 First = false;
1874
1875 OS << '{';
1876 if (BB->hasName())
1877 OS << BB->getName();
1878 else
1879 BB->printAsOperand(OS, false);
1880 OS << ',';
1881 if (unsigned ID = MA->getID())
1882 OS << ID;
1883 else
1884 OS << LiveOnEntryStr;
1885 OS << '}';
1886 }
1887 OS << ')';
1888}
1889
1890void MemoryUse::print(raw_ostream &OS) const {
1891 MemoryAccess *UO = getDefiningAccess();
1892 OS << "MemoryUse(";
1893 if (UO && UO->getID())
1894 OS << UO->getID();
1895 else
1896 OS << LiveOnEntryStr;
1897 OS << ')';
1898}
1899
1900void MemoryAccess::dump() const {
1901// Cannot completely remove virtual function even in release mode.
1902#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1903 print(dbgs());
1904 dbgs() << "\n";
1905#endif
1906}
1907
1908char MemorySSAPrinterLegacyPass::ID = 0;
1909
1910MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
1911 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
1912}
1913
1914void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
1915 AU.setPreservesAll();
1916 AU.addRequired<MemorySSAWrapperPass>();
1917}
1918
1919bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
1920 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
1921 MSSA.print(dbgs());
1922 if (VerifyMemorySSA)
1923 MSSA.verifyMemorySSA();
1924 return false;
1925}
1926
1927AnalysisKey MemorySSAAnalysis::Key;
1928
1929MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
1930 FunctionAnalysisManager &AM) {
1931 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1932 auto &AA = AM.getResult<AAManager>(F);
1933 return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
1934}
1935
1936PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
1937 FunctionAnalysisManager &AM) {
1938 OS << "MemorySSA for function: " << F.getName() << "\n";
1939 AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
1940
1941 return PreservedAnalyses::all();
1942}
1943
1944PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
1945 FunctionAnalysisManager &AM) {
1946 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
1947
1948 return PreservedAnalyses::all();
1949}
1950
1951char MemorySSAWrapperPass::ID = 0;
1952
1953MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
1954 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
1955}
1956
1957void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
1958
1959void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1960 AU.setPreservesAll();
1961 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1962 AU.addRequiredTransitive<AAResultsWrapperPass>();
1963}
1964
1965bool MemorySSAWrapperPass::runOnFunction(Function &F) {
1966 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1967 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1968 MSSA.reset(new MemorySSA(F, &AA, &DT));
1969 return false;
1970}
1971
1972void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
1973
1974void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
1975 MSSA->print(OS);
1976}
1977
1978MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
1979
1980MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
1981 DominatorTree *D)
1982 : MemorySSAWalker(M), Walker(*M, *A, *D) {}
1983
1984void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
1985 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1986 MUD->resetOptimized();
1987}
1988
1989/// \brief Walk the use-def chains starting at \p MA and find
1990/// the MemoryAccess that actually clobbers Loc.
1991///
1992/// \returns our clobbering memory access
1993MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
1994 MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
1995 MemoryAccess *New = Walker.findClobber(StartingAccess, Q);
8
Calling 'ClobberWalker::findClobber'
1996#ifdef EXPENSIVE_CHECKS
1997 MemoryAccess *NewNoCache = Walker.findClobber(StartingAccess, Q);
1998 assert(NewNoCache == New && "Cache made us hand back a different result?")((NewNoCache == New && "Cache made us hand back a different result?"
) ? static_cast<void> (0) : __assert_fail ("NewNoCache == New && \"Cache made us hand back a different result?\""
, "/build/llvm-toolchain-snapshot-6.0~svn315928/lib/Analysis/MemorySSA.cpp"
, 1998, __PRETTY_FUNCTION__))
;
1999 (void)NewNoCache;
2000#endif
2001 if (AutoResetWalker)
2002 resetClobberWalker();
2003 return New;
2004}
2005
2006MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
2007 MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
2008 if (isa<MemoryPhi>(StartingAccess))
1
Assuming the condition is false
2
Taking false branch
2009 return StartingAccess;
2010
2011 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2012 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
3
Taking false branch
2013 return StartingUseOrDef;
2014
2015 Instruction *I = StartingUseOrDef->getMemoryInst();
2016
2017 // Conservatively, fences are always clobbers, so don't perform the walk if we
2018 // hit a fence.
2019 if (!ImmutableCallSite(I) && I->isFenceLike())
4
Taking false branch
2020 return StartingUseOrDef;
2021
2022 UpwardsMemoryQuery Q;
2023 Q.OriginalAccess = StartingUseOrDef;
2024 Q.StartingLoc = Loc;
2025 Q.Inst = I;
2026 Q.IsCall = false;
2027
2028 // Unlike the other function, do not walk to the def of a def, because we are
2029 // handed something we already believe is the clobbering access.
2030 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
5
Assuming the condition is false
6
'?' condition is false
2031 ? StartingUseOrDef->getDefiningAccess()
2032 : StartingUseOrDef;
2033
2034 MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
7
Calling 'CachingWalker::getClobberingMemoryAccess'
2035 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Starting Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2036 DEBUG(dbgs() << *StartingUseOrDef << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *StartingUseOrDef << "\n"
; } } while (false)
;
2037 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Final Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2038 DEBUG(dbgs() << *Clobber << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *Clobber << "\n"; } } while
(false)
;
2039 return Clobber;
2040}
2041
2042MemoryAccess *
2043MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2044 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2045 // If this is a MemoryPhi, we can't do anything.
2046 if (!StartingAccess)
2047 return MA;
2048
2049 // If this is an already optimized use or def, return the optimized result.
2050 // Note: Currently, we do not store the optimized def result because we'd need
2051 // a separate field, since we can't use it as the defining access.
2052 if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2053 if (MUD->isOptimized())
2054 return MUD->getOptimized();
2055
2056 const Instruction *I = StartingAccess->getMemoryInst();
2057 UpwardsMemoryQuery Q(I, StartingAccess);
2058 // We can't sanely do anything with a fences, they conservatively
2059 // clobber all memory, and have no locations to get pointers from to
2060 // try to disambiguate.
2061 if (!Q.IsCall && I->isFenceLike())
2062 return StartingAccess;
2063
2064 if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
2065 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2066 if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2067 MUD->setOptimized(LiveOnEntry);
2068 return LiveOnEntry;
2069 }
2070
2071 // Start with the thing we already think clobbers this location
2072 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2073
2074 // At this point, DefiningAccess may be the live on entry def.
2075 // If it is, we will not get a better result.
2076 if (MSSA->isLiveOnEntryDef(DefiningAccess))
2077 return DefiningAccess;
2078
2079 MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
2080 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Starting Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2081 DEBUG(dbgs() << *DefiningAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *DefiningAccess << "\n"
; } } while (false)
;
2082 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Final Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2083 DEBUG(dbgs() << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *Result << "\n"; } } while
(false)
;
2084 if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2085 MUD->setOptimized(Result);
2086
2087 return Result;
2088}
2089
2090MemoryAccess *
2091DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2092 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2093 return Use->getDefiningAccess();
2094 return MA;
2095}
2096
2097MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2098 MemoryAccess *StartingAccess, const MemoryLocation &) {
2099 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2100 return Use->getDefiningAccess();
2101 return StartingAccess;
2102}
2103
2104void MemoryPhi::deleteMe(DerivedUser *Self) {
2105 delete static_cast<MemoryPhi *>(Self);
2106}
2107
2108void MemoryDef::deleteMe(DerivedUser *Self) {
2109 delete static_cast<MemoryDef *>(Self);
2110}
2111
2112void MemoryUse::deleteMe(DerivedUser *Self) {
2113 delete static_cast<MemoryUse *>(Self);
2114}