Bug Summary

File:lib/Analysis/MemorySSA.cpp
Warning:line 1109, column 5
Value stored to 'Walker' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MemorySSA.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Analysis -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp
1//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the MemorySSA class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/MemorySSA.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/DenseMapInfo.h"
17#include "llvm/ADT/DenseSet.h"
18#include "llvm/ADT/DepthFirstIterator.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/Optional.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/Analysis/IteratedDominanceFrontier.h"
29#include "llvm/Analysis/MemoryLocation.h"
30#include "llvm/IR/AssemblyAnnotationWriter.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/CallSite.h"
33#include "llvm/IR/Dominators.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/IntrinsicInst.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/PassManager.h"
41#include "llvm/IR/Use.h"
42#include "llvm/Pass.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/CommandLine.h"
46#include "llvm/Support/Compiler.h"
47#include "llvm/Support/Debug.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/FormattedStream.h"
50#include "llvm/Support/raw_ostream.h"
51#include <algorithm>
52#include <cassert>
53#include <iterator>
54#include <memory>
55#include <utility>
56
57using namespace llvm;
58
59#define DEBUG_TYPE"memoryssa" "memoryssa"
60
61INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry
&Registry) {
62 true)static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry
&Registry) {
63INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
64INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
65INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", &
MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySSAWrapperPass>), false, true); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag
; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag
, initializeMemorySSAWrapperPassPassOnce, std::ref(Registry))
; }
66 true)PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", &
MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemorySSAWrapperPass>), false, true); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag
; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag
, initializeMemorySSAWrapperPassPassOnce, std::ref(Registry))
; }
67
68INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry
&Registry) {
69 "Memory SSA Printer", false, false)static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry
&Registry) {
70INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
71INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa"
, &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void
llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag
, initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry
)); }
72 "Memory SSA Printer", false, false)PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa"
, &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void
llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag
, initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry
)); }
73
74static cl::opt<unsigned> MaxCheckLimit(
75 "memssa-check-limit", cl::Hidden, cl::init(100),
76 cl::desc("The maximum number of stores/phis MemorySSA"
77 "will consider trying to walk past (default = 100)"));
78
79static cl::opt<bool>
80 VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
81 cl::desc("Verify MemorySSA in legacy printer pass."));
82
83namespace llvm {
84
85/// \brief An assembly annotator class to print Memory SSA information in
86/// comments.
87class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
88 friend class MemorySSA;
89
90 const MemorySSA *MSSA;
91
92public:
93 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
94
95 void emitBasicBlockStartAnnot(const BasicBlock *BB,
96 formatted_raw_ostream &OS) override {
97 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
98 OS << "; " << *MA << "\n";
99 }
100
101 void emitInstructionAnnot(const Instruction *I,
102 formatted_raw_ostream &OS) override {
103 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
104 OS << "; " << *MA << "\n";
105 }
106};
107
108} // end namespace llvm
109
110namespace {
111
112/// Our current alias analysis API differentiates heavily between calls and
113/// non-calls, and functions called on one usually assert on the other.
114/// This class encapsulates the distinction to simplify other code that wants
115/// "Memory affecting instructions and related data" to use as a key.
116/// For example, this class is used as a densemap key in the use optimizer.
117class MemoryLocOrCall {
118public:
119 bool IsCall = false;
120
121 MemoryLocOrCall() = default;
122 MemoryLocOrCall(MemoryUseOrDef *MUD)
123 : MemoryLocOrCall(MUD->getMemoryInst()) {}
124 MemoryLocOrCall(const MemoryUseOrDef *MUD)
125 : MemoryLocOrCall(MUD->getMemoryInst()) {}
126
127 MemoryLocOrCall(Instruction *Inst) {
128 if (ImmutableCallSite(Inst)) {
129 IsCall = true;
130 CS = ImmutableCallSite(Inst);
131 } else {
132 IsCall = false;
133 // There is no such thing as a memorylocation for a fence inst, and it is
134 // unique in that regard.
135 if (!isa<FenceInst>(Inst))
136 Loc = MemoryLocation::get(Inst);
137 }
138 }
139
140 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
141
142 ImmutableCallSite getCS() const {
143 assert(IsCall)(static_cast <bool> (IsCall) ? void (0) : __assert_fail
("IsCall", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 143, __extension__ __PRETTY_FUNCTION__))
;
144 return CS;
145 }
146
147 MemoryLocation getLoc() const {
148 assert(!IsCall)(static_cast <bool> (!IsCall) ? void (0) : __assert_fail
("!IsCall", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 148, __extension__ __PRETTY_FUNCTION__))
;
149 return Loc;
150 }
151
152 bool operator==(const MemoryLocOrCall &Other) const {
153 if (IsCall != Other.IsCall)
154 return false;
155
156 if (!IsCall)
157 return Loc == Other.Loc;
158
159 if (CS.getCalledValue() != Other.CS.getCalledValue())
160 return false;
161
162 return CS.arg_size() == Other.CS.arg_size() &&
163 std::equal(CS.arg_begin(), CS.arg_end(), Other.CS.arg_begin());
164 }
165
166private:
167 union {
168 ImmutableCallSite CS;
169 MemoryLocation Loc;
170 };
171};
172
173} // end anonymous namespace
174
175namespace llvm {
176
177template <> struct DenseMapInfo<MemoryLocOrCall> {
178 static inline MemoryLocOrCall getEmptyKey() {
179 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
180 }
181
182 static inline MemoryLocOrCall getTombstoneKey() {
183 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
184 }
185
186 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
187 if (!MLOC.IsCall)
188 return hash_combine(
189 MLOC.IsCall,
190 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
191
192 hash_code hash =
193 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
194 MLOC.getCS().getCalledValue()));
195
196 for (const Value *Arg : MLOC.getCS().args())
197 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
198 return hash;
199 }
200
201 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
202 return LHS == RHS;
203 }
204};
205
206} // end namespace llvm
207
208/// This does one-way checks to see if Use could theoretically be hoisted above
209/// MayClobber. This will not check the other way around.
210///
211/// This assumes that, for the purposes of MemorySSA, Use comes directly after
212/// MayClobber, with no potentially clobbering operations in between them.
213/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
214static bool areLoadsReorderable(const LoadInst *Use,
215 const LoadInst *MayClobber) {
216 bool VolatileUse = Use->isVolatile();
217 bool VolatileClobber = MayClobber->isVolatile();
218 // Volatile operations may never be reordered with other volatile operations.
219 if (VolatileUse && VolatileClobber)
220 return false;
221 // Otherwise, volatile doesn't matter here. From the language reference:
222 // 'optimizers may change the order of volatile operations relative to
223 // non-volatile operations.'"
224
225 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
226 // is weaker, it can be moved above other loads. We just need to be sure that
227 // MayClobber isn't an acquire load, because loads can't be moved above
228 // acquire loads.
229 //
230 // Note that this explicitly *does* allow the free reordering of monotonic (or
231 // weaker) loads of the same address.
232 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
233 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
234 AtomicOrdering::Acquire);
235 return !(SeqCstUse || MayClobberIsAcquire);
236}
237
238namespace {
239
240struct ClobberAlias {
241 bool IsClobber;
242 Optional<AliasResult> AR;
243};
244
245} // end anonymous namespace
246
247// Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
248// ignored if IsClobber = false.
249static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
250 const MemoryLocation &UseLoc,
251 const Instruction *UseInst,
252 AliasAnalysis &AA) {
253 Instruction *DefInst = MD->getMemoryInst();
254 assert(DefInst && "Defining instruction not actually an instruction")(static_cast <bool> (DefInst && "Defining instruction not actually an instruction"
) ? void (0) : __assert_fail ("DefInst && \"Defining instruction not actually an instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 254, __extension__ __PRETTY_FUNCTION__))
;
255 ImmutableCallSite UseCS(UseInst);
256 Optional<AliasResult> AR;
257
258 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
259 // These intrinsics will show up as affecting memory, but they are just
260 // markers.
261 switch (II->getIntrinsicID()) {
262 case Intrinsic::lifetime_start:
263 if (UseCS)
264 return {false, NoAlias};
265 AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
266 return {AR == MustAlias, AR};
267 case Intrinsic::lifetime_end:
268 case Intrinsic::invariant_start:
269 case Intrinsic::invariant_end:
270 case Intrinsic::assume:
271 return {false, NoAlias};
272 default:
273 break;
274 }
275 }
276
277 if (UseCS) {
278 ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
279 AR = isMustSet(I) ? MustAlias : MayAlias;
280 return {isModOrRefSet(I), AR};
281 }
282
283 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
284 if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
285 return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
286
287 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
288 AR = isMustSet(I) ? MustAlias : MayAlias;
289 return {isModSet(I), AR};
290}
291
292static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
293 const MemoryUseOrDef *MU,
294 const MemoryLocOrCall &UseMLOC,
295 AliasAnalysis &AA) {
296 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
297 // to exist while MemoryLocOrCall is pushed through places.
298 if (UseMLOC.IsCall)
299 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
300 AA);
301 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
302 AA);
303}
304
305// Return true when MD may alias MU, return false otherwise.
306bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
307 AliasAnalysis &AA) {
308 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
309}
310
311namespace {
312
313struct UpwardsMemoryQuery {
314 // True if our original query started off as a call
315 bool IsCall = false;
316 // The pointer location we started the query with. This will be empty if
317 // IsCall is true.
318 MemoryLocation StartingLoc;
319 // This is the instruction we were querying about.
320 const Instruction *Inst = nullptr;
321 // The MemoryAccess we actually got called with, used to test local domination
322 const MemoryAccess *OriginalAccess = nullptr;
323 Optional<AliasResult> AR = MayAlias;
324
325 UpwardsMemoryQuery() = default;
326
327 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
328 : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
329 if (!IsCall)
330 StartingLoc = MemoryLocation::get(Inst);
331 }
332};
333
334} // end anonymous namespace
335
336static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
337 AliasAnalysis &AA) {
338 Instruction *Inst = MD->getMemoryInst();
339 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
340 switch (II->getIntrinsicID()) {
341 case Intrinsic::lifetime_end:
342 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
343 default:
344 return false;
345 }
346 }
347 return false;
348}
349
350static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
351 const Instruction *I) {
352 // If the memory can't be changed, then loads of the memory can't be
353 // clobbered.
354 //
355 // FIXME: We should handle invariant groups, as well. It's a bit harder,
356 // because we need to pay close attention to invariant group barriers.
357 return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
358 AA.pointsToConstantMemory(cast<LoadInst>(I)->
359 getPointerOperand()));
360}
361
362/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
363/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
364///
365/// This is meant to be as simple and self-contained as possible. Because it
366/// uses no cache, etc., it can be relatively expensive.
367///
368/// \param Start The MemoryAccess that we want to walk from.
369/// \param ClobberAt A clobber for Start.
370/// \param StartLoc The MemoryLocation for Start.
371/// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to.
372/// \param Query The UpwardsMemoryQuery we used for our search.
373/// \param AA The AliasAnalysis we used for our search.
374static void LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__))
375checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
376 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
377 const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
378 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?")(static_cast <bool> (MSSA.dominates(ClobberAt, Start) &&
"Clobber doesn't dominate start?") ? void (0) : __assert_fail
("MSSA.dominates(ClobberAt, Start) && \"Clobber doesn't dominate start?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 378, __extension__ __PRETTY_FUNCTION__))
;
379
380 if (MSSA.isLiveOnEntryDef(Start)) {
381 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) &&
"liveOnEntry must clobber itself") ? void (0) : __assert_fail
("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 382, __extension__ __PRETTY_FUNCTION__))
382 "liveOnEntry must clobber itself")(static_cast <bool> (MSSA.isLiveOnEntryDef(ClobberAt) &&
"liveOnEntry must clobber itself") ? void (0) : __assert_fail
("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 382, __extension__ __PRETTY_FUNCTION__))
;
383 return;
384 }
385
386 bool FoundClobber = false;
387 DenseSet<MemoryAccessPair> VisitedPhis;
388 SmallVector<MemoryAccessPair, 8> Worklist;
389 Worklist.emplace_back(Start, StartLoc);
390 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
391 // is found, complain.
392 while (!Worklist.empty()) {
393 MemoryAccessPair MAP = Worklist.pop_back_val();
394 // All we care about is that nothing from Start to ClobberAt clobbers Start.
395 // We learn nothing from revisiting nodes.
396 if (!VisitedPhis.insert(MAP).second)
397 continue;
398
399 for (MemoryAccess *MA : def_chain(MAP.first)) {
400 if (MA == ClobberAt) {
401 if (auto *MD = dyn_cast<MemoryDef>(MA)) {
402 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
403 // since it won't let us short-circuit.
404 //
405 // Also, note that this can't be hoisted out of the `Worklist` loop,
406 // since MD may only act as a clobber for 1 of N MemoryLocations.
407 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
408 if (!FoundClobber) {
409 ClobberAlias CA =
410 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
411 if (CA.IsClobber) {
412 FoundClobber = true;
413 // Not used: CA.AR;
414 }
415 }
416 }
417 break;
418 }
419
420 // We should never hit liveOnEntry, unless it's the clobber.
421 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(MA) &&
"Hit liveOnEntry before clobber?") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(MA) && \"Hit liveOnEntry before clobber?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 421, __extension__ __PRETTY_FUNCTION__))
;
422
423 if (auto *MD = dyn_cast<MemoryDef>(MA)) {
424 (void)MD;
425 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)(static_cast <bool> (!instructionClobbersQuery(MD, MAP.
second, Query.Inst, AA) .IsClobber && "Found clobber before reaching ClobberAt!"
) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 427, __extension__ __PRETTY_FUNCTION__))
426 .IsClobber &&(static_cast <bool> (!instructionClobbersQuery(MD, MAP.
second, Query.Inst, AA) .IsClobber && "Found clobber before reaching ClobberAt!"
) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 427, __extension__ __PRETTY_FUNCTION__))
427 "Found clobber before reaching ClobberAt!")(static_cast <bool> (!instructionClobbersQuery(MD, MAP.
second, Query.Inst, AA) .IsClobber && "Found clobber before reaching ClobberAt!"
) ? void (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 427, __extension__ __PRETTY_FUNCTION__))
;
428 continue;
429 }
430
431 assert(isa<MemoryPhi>(MA))(static_cast <bool> (isa<MemoryPhi>(MA)) ? void (
0) : __assert_fail ("isa<MemoryPhi>(MA)", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 431, __extension__ __PRETTY_FUNCTION__))
;
432 Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
433 }
434 }
435
436 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
437 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
438 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) ||
FoundClobber) && "ClobberAt never acted as a clobber"
) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 439, __extension__ __PRETTY_FUNCTION__))
439 "ClobberAt never acted as a clobber")(static_cast <bool> ((isa<MemoryPhi>(ClobberAt) ||
FoundClobber) && "ClobberAt never acted as a clobber"
) ? void (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 439, __extension__ __PRETTY_FUNCTION__))
;
440}
441
442namespace {
443
444/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
445/// in one class.
446class ClobberWalker {
447 /// Save a few bytes by using unsigned instead of size_t.
448 using ListIndex = unsigned;
449
450 /// Represents a span of contiguous MemoryDefs, potentially ending in a
451 /// MemoryPhi.
452 struct DefPath {
453 MemoryLocation Loc;
454 // Note that, because we always walk in reverse, Last will always dominate
455 // First. Also note that First and Last are inclusive.
456 MemoryAccess *First;
457 MemoryAccess *Last;
458 Optional<ListIndex> Previous;
459
460 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
461 Optional<ListIndex> Previous)
462 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
463
464 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
465 Optional<ListIndex> Previous)
466 : DefPath(Loc, Init, Init, Previous) {}
467 };
468
469 const MemorySSA &MSSA;
470 AliasAnalysis &AA;
471 DominatorTree &DT;
472 UpwardsMemoryQuery *Query;
473
474 // Phi optimization bookkeeping
475 SmallVector<DefPath, 32> Paths;
476 DenseSet<ConstMemoryAccessPair> VisitedPhis;
477
478 /// Find the nearest def or phi that `From` can legally be optimized to.
479 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
480 assert(From->getNumOperands() && "Phi with no operands?")(static_cast <bool> (From->getNumOperands() &&
"Phi with no operands?") ? void (0) : __assert_fail ("From->getNumOperands() && \"Phi with no operands?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 480, __extension__ __PRETTY_FUNCTION__))
;
481
482 BasicBlock *BB = From->getBlock();
483 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
484 DomTreeNode *Node = DT.getNode(BB);
485 while ((Node = Node->getIDom())) {
486 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
487 if (Defs)
488 return &*Defs->rbegin();
489 }
490 return Result;
491 }
492
493 /// Result of calling walkToPhiOrClobber.
494 struct UpwardsWalkResult {
495 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
496 /// both. Include alias info when clobber found.
497 MemoryAccess *Result;
498 bool IsKnownClobber;
499 Optional<AliasResult> AR;
500 };
501
502 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
503 /// This will update Desc.Last as it walks. It will (optionally) also stop at
504 /// StopAt.
505 ///
506 /// This does not test for whether StopAt is a clobber
507 UpwardsWalkResult
508 walkToPhiOrClobber(DefPath &Desc,
509 const MemoryAccess *StopAt = nullptr) const {
510 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world")(static_cast <bool> (!isa<MemoryUse>(Desc.Last) &&
"Uses don't exist in my world") ? void (0) : __assert_fail (
"!isa<MemoryUse>(Desc.Last) && \"Uses don't exist in my world\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 510, __extension__ __PRETTY_FUNCTION__))
;
511
512 for (MemoryAccess *Current : def_chain(Desc.Last)) {
513 Desc.Last = Current;
514 if (Current == StopAt)
515 return {Current, false, MayAlias};
516
517 if (auto *MD = dyn_cast<MemoryDef>(Current)) {
518 if (MSSA.isLiveOnEntryDef(MD))
519 return {MD, true, MustAlias};
520 ClobberAlias CA =
521 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
522 if (CA.IsClobber)
523 return {MD, true, CA.AR};
524 }
525 }
526
527 assert(isa<MemoryPhi>(Desc.Last) &&(static_cast <bool> (isa<MemoryPhi>(Desc.Last) &&
"Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail
("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 528, __extension__ __PRETTY_FUNCTION__))
528 "Ended at a non-clobber that's not a phi?")(static_cast <bool> (isa<MemoryPhi>(Desc.Last) &&
"Ended at a non-clobber that's not a phi?") ? void (0) : __assert_fail
("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 528, __extension__ __PRETTY_FUNCTION__))
;
529 return {Desc.Last, false, MayAlias};
530 }
531
532 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
533 ListIndex PriorNode) {
534 auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
535 upward_defs_end());
536 for (const MemoryAccessPair &P : UpwardDefs) {
537 PausedSearches.push_back(Paths.size());
538 Paths.emplace_back(P.second, P.first, PriorNode);
539 }
540 }
541
542 /// Represents a search that terminated after finding a clobber. This clobber
543 /// may or may not be present in the path of defs from LastNode..SearchStart,
544 /// since it may have been retrieved from cache.
545 struct TerminatedPath {
546 MemoryAccess *Clobber;
547 ListIndex LastNode;
548 };
549
550 /// Get an access that keeps us from optimizing to the given phi.
551 ///
552 /// PausedSearches is an array of indices into the Paths array. Its incoming
553 /// value is the indices of searches that stopped at the last phi optimization
554 /// target. It's left in an unspecified state.
555 ///
556 /// If this returns None, NewPaused is a vector of searches that terminated
557 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
558 Optional<TerminatedPath>
559 getBlockingAccess(const MemoryAccess *StopWhere,
560 SmallVectorImpl<ListIndex> &PausedSearches,
561 SmallVectorImpl<ListIndex> &NewPaused,
562 SmallVectorImpl<TerminatedPath> &Terminated) {
563 assert(!PausedSearches.empty() && "No searches to continue?")(static_cast <bool> (!PausedSearches.empty() &&
"No searches to continue?") ? void (0) : __assert_fail ("!PausedSearches.empty() && \"No searches to continue?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 563, __extension__ __PRETTY_FUNCTION__))
;
564
565 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
566 // PausedSearches as our stack.
567 while (!PausedSearches.empty()) {
568 ListIndex PathIndex = PausedSearches.pop_back_val();
569 DefPath &Node = Paths[PathIndex];
570
571 // If we've already visited this path with this MemoryLocation, we don't
572 // need to do so again.
573 //
574 // NOTE: That we just drop these paths on the ground makes caching
575 // behavior sporadic. e.g. given a diamond:
576 // A
577 // B C
578 // D
579 //
580 // ...If we walk D, B, A, C, we'll only cache the result of phi
581 // optimization for A, B, and D; C will be skipped because it dies here.
582 // This arguably isn't the worst thing ever, since:
583 // - We generally query things in a top-down order, so if we got below D
584 // without needing cache entries for {C, MemLoc}, then chances are
585 // that those cache entries would end up ultimately unused.
586 // - We still cache things for A, so C only needs to walk up a bit.
587 // If this behavior becomes problematic, we can fix without a ton of extra
588 // work.
589 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
590 continue;
591
592 UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
593 if (Res.IsKnownClobber) {
594 assert(Res.Result != StopWhere)(static_cast <bool> (Res.Result != StopWhere) ? void (0
) : __assert_fail ("Res.Result != StopWhere", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 594, __extension__ __PRETTY_FUNCTION__))
;
595 // If this wasn't a cache hit, we hit a clobber when walking. That's a
596 // failure.
597 TerminatedPath Term{Res.Result, PathIndex};
598 if (!MSSA.dominates(Res.Result, StopWhere))
599 return Term;
600
601 // Otherwise, it's a valid thing to potentially optimize to.
602 Terminated.push_back(Term);
603 continue;
604 }
605
606 if (Res.Result == StopWhere) {
607 // We've hit our target. Save this path off for if we want to continue
608 // walking.
609 NewPaused.push_back(PathIndex);
610 continue;
611 }
612
613 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Res.Result)
&& "liveOnEntry is a clobber") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(Res.Result) && \"liveOnEntry is a clobber\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 613, __extension__ __PRETTY_FUNCTION__))
;
614 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
615 }
616
617 return None;
618 }
619
620 template <typename T, typename Walker>
621 struct generic_def_path_iterator
622 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
623 std::forward_iterator_tag, T *> {
624 generic_def_path_iterator() = default;
625 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
626
627 T &operator*() const { return curNode(); }
628
629 generic_def_path_iterator &operator++() {
630 N = curNode().Previous;
631 return *this;
632 }
633
634 bool operator==(const generic_def_path_iterator &O) const {
635 if (N.hasValue() != O.N.hasValue())
636 return false;
637 return !N.hasValue() || *N == *O.N;
638 }
639
640 private:
641 T &curNode() const { return W->Paths[*N]; }
642
643 Walker *W = nullptr;
644 Optional<ListIndex> N = None;
645 };
646
647 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
648 using const_def_path_iterator =
649 generic_def_path_iterator<const DefPath, const ClobberWalker>;
650
651 iterator_range<def_path_iterator> def_path(ListIndex From) {
652 return make_range(def_path_iterator(this, From), def_path_iterator());
653 }
654
655 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
656 return make_range(const_def_path_iterator(this, From),
657 const_def_path_iterator());
658 }
659
660 struct OptznResult {
661 /// The path that contains our result.
662 TerminatedPath PrimaryClobber;
663 /// The paths that we can legally cache back from, but that aren't
664 /// necessarily the result of the Phi optimization.
665 SmallVector<TerminatedPath, 4> OtherClobbers;
666 };
667
668 ListIndex defPathIndex(const DefPath &N) const {
669 // The assert looks nicer if we don't need to do &N
670 const DefPath *NP = &N;
671 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&(static_cast <bool> (!Paths.empty() && NP >=
&Paths.front() && NP <= &Paths.back() &&
"Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 672, __extension__ __PRETTY_FUNCTION__))
672 "Out of bounds DefPath!")(static_cast <bool> (!Paths.empty() && NP >=
&Paths.front() && NP <= &Paths.back() &&
"Out of bounds DefPath!") ? void (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 672, __extension__ __PRETTY_FUNCTION__))
;
673 return NP - &Paths.front();
674 }
675
676 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
677 /// that act as legal clobbers. Note that this won't return *all* clobbers.
678 ///
679 /// Phi optimization algorithm tl;dr:
680 /// - Find the earliest def/phi, A, we can optimize to
681 /// - Find if all paths from the starting memory access ultimately reach A
682 /// - If not, optimization isn't possible.
683 /// - Otherwise, walk from A to another clobber or phi, A'.
684 /// - If A' is a def, we're done.
685 /// - If A' is a phi, try to optimize it.
686 ///
687 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
688 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
689 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
690 const MemoryLocation &Loc) {
691 assert(Paths.empty() && VisitedPhis.empty() &&(static_cast <bool> (Paths.empty() && VisitedPhis
.empty() && "Reset the optimization state.") ? void (
0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 692, __extension__ __PRETTY_FUNCTION__))
692 "Reset the optimization state.")(static_cast <bool> (Paths.empty() && VisitedPhis
.empty() && "Reset the optimization state.") ? void (
0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 692, __extension__ __PRETTY_FUNCTION__))
;
693
694 Paths.emplace_back(Loc, Start, Phi, None);
695 // Stores how many "valid" optimization nodes we had prior to calling
696 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
697 auto PriorPathsSize = Paths.size();
698
699 SmallVector<ListIndex, 16> PausedSearches;
700 SmallVector<ListIndex, 8> NewPaused;
701 SmallVector<TerminatedPath, 4> TerminatedPaths;
702
703 addSearches(Phi, PausedSearches, 0);
704
705 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
706 // Paths.
707 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
708 assert(!Paths.empty() && "Need a path to move")(static_cast <bool> (!Paths.empty() && "Need a path to move"
) ? void (0) : __assert_fail ("!Paths.empty() && \"Need a path to move\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 708, __extension__ __PRETTY_FUNCTION__))
;
709 auto Dom = Paths.begin();
710 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
711 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
712 Dom = I;
713 auto Last = Paths.end() - 1;
714 if (Last != Dom)
715 std::iter_swap(Last, Dom);
716 };
717
718 MemoryPhi *Current = Phi;
719 while (true) {
720 assert(!MSSA.isLiveOnEntryDef(Current) &&(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) &&
"liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 721, __extension__ __PRETTY_FUNCTION__))
721 "liveOnEntry wasn't treated as a clobber?")(static_cast <bool> (!MSSA.isLiveOnEntryDef(Current) &&
"liveOnEntry wasn't treated as a clobber?") ? void (0) : __assert_fail
("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 721, __extension__ __PRETTY_FUNCTION__))
;
722
723 const auto *Target = getWalkTarget(Current);
724 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
725 // optimization for the prior phi.
726 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(TerminatedPaths, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target
); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 728, __extension__ __PRETTY_FUNCTION__))
727 return MSSA.dominates(P.Clobber, Target);(static_cast <bool> (all_of(TerminatedPaths, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target
); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 728, __extension__ __PRETTY_FUNCTION__))
728 }))(static_cast <bool> (all_of(TerminatedPaths, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target
); })) ? void (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 728, __extension__ __PRETTY_FUNCTION__))
;
729
730 // FIXME: This is broken, because the Blocker may be reported to be
731 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
732 // For the moment, this is fine, since we do nothing with blocker info.
733 if (Optional<TerminatedPath> Blocker = getBlockingAccess(
734 Target, PausedSearches, NewPaused, TerminatedPaths)) {
735
736 // Find the node we started at. We can't search based on N->Last, since
737 // we may have gone around a loop with a different MemoryLocation.
738 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
739 return defPathIndex(N) < PriorPathsSize;
740 });
741 assert(Iter != def_path_iterator())(static_cast <bool> (Iter != def_path_iterator()) ? void
(0) : __assert_fail ("Iter != def_path_iterator()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 741, __extension__ __PRETTY_FUNCTION__))
;
742
743 DefPath &CurNode = *Iter;
744 assert(CurNode.Last == Current)(static_cast <bool> (CurNode.Last == Current) ? void (0
) : __assert_fail ("CurNode.Last == Current", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 744, __extension__ __PRETTY_FUNCTION__))
;
745
746 // Two things:
747 // A. We can't reliably cache all of NewPaused back. Consider a case
748 // where we have two paths in NewPaused; one of which can't optimize
749 // above this phi, whereas the other can. If we cache the second path
750 // back, we'll end up with suboptimal cache entries. We can handle
751 // cases like this a bit better when we either try to find all
752 // clobbers that block phi optimization, or when our cache starts
753 // supporting unfinished searches.
754 // B. We can't reliably cache TerminatedPaths back here without doing
755 // extra checks; consider a case like:
756 // T
757 // / \
758 // D C
759 // \ /
760 // S
761 // Where T is our target, C is a node with a clobber on it, D is a
762 // diamond (with a clobber *only* on the left or right node, N), and
763 // S is our start. Say we walk to D, through the node opposite N
764 // (read: ignoring the clobber), and see a cache entry in the top
765 // node of D. That cache entry gets put into TerminatedPaths. We then
766 // walk up to C (N is later in our worklist), find the clobber, and
767 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
768 // the bottom part of D to the cached clobber, ignoring the clobber
769 // in N. Again, this problem goes away if we start tracking all
770 // blockers for a given phi optimization.
771 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
772 return {Result, {}};
773 }
774
775 // If there's nothing left to search, then all paths led to valid clobbers
776 // that we got from our cache; pick the nearest to the start, and allow
777 // the rest to be cached back.
778 if (NewPaused.empty()) {
779 MoveDominatedPathToEnd(TerminatedPaths);
780 TerminatedPath Result = TerminatedPaths.pop_back_val();
781 return {Result, std::move(TerminatedPaths)};
782 }
783
784 MemoryAccess *DefChainEnd = nullptr;
785 SmallVector<TerminatedPath, 4> Clobbers;
786 for (ListIndex Paused : NewPaused) {
787 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
788 if (WR.IsKnownClobber)
789 Clobbers.push_back({WR.Result, Paused});
790 else
791 // Micro-opt: If we hit the end of the chain, save it.
792 DefChainEnd = WR.Result;
793 }
794
795 if (!TerminatedPaths.empty()) {
796 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
797 // do it now.
798 if (!DefChainEnd)
799 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
800 DefChainEnd = MA;
801
802 // If any of the terminated paths don't dominate the phi we'll try to
803 // optimize, we need to figure out what they are and quit.
804 const BasicBlock *ChainBB = DefChainEnd->getBlock();
805 for (const TerminatedPath &TP : TerminatedPaths) {
806 // Because we know that DefChainEnd is as "high" as we can go, we
807 // don't need local dominance checks; BB dominance is sufficient.
808 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
809 Clobbers.push_back(TP);
810 }
811 }
812
813 // If we have clobbers in the def chain, find the one closest to Current
814 // and quit.
815 if (!Clobbers.empty()) {
816 MoveDominatedPathToEnd(Clobbers);
817 TerminatedPath Result = Clobbers.pop_back_val();
818 return {Result, std::move(Clobbers)};
819 }
820
821 assert(all_of(NewPaused,(static_cast <bool> (all_of(NewPaused, [&](ListIndex
I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail
("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 822, __extension__ __PRETTY_FUNCTION__))
822 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }))(static_cast <bool> (all_of(NewPaused, [&](ListIndex
I) { return Paths[I].Last == DefChainEnd; })) ? void (0) : __assert_fail
("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 822, __extension__ __PRETTY_FUNCTION__))
;
823
824 // Because liveOnEntry is a clobber, this must be a phi.
825 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
826
827 PriorPathsSize = Paths.size();
828 PausedSearches.clear();
829 for (ListIndex I : NewPaused)
830 addSearches(DefChainPhi, PausedSearches, I);
831 NewPaused.clear();
832
833 Current = DefChainPhi;
834 }
835 }
836
837 void verifyOptResult(const OptznResult &R) const {
838 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {(static_cast <bool> (all_of(R.OtherClobbers, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.
PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 840, __extension__ __PRETTY_FUNCTION__))
839 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);(static_cast <bool> (all_of(R.OtherClobbers, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.
PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 840, __extension__ __PRETTY_FUNCTION__))
840 }))(static_cast <bool> (all_of(R.OtherClobbers, [&](const
TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.
PrimaryClobber.Clobber); })) ? void (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 840, __extension__ __PRETTY_FUNCTION__))
;
841 }
842
843 void resetPhiOptznState() {
844 Paths.clear();
845 VisitedPhis.clear();
846 }
847
848public:
849 ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
850 : MSSA(MSSA), AA(AA), DT(DT) {}
851
852 /// Finds the nearest clobber for the given query, optimizing phis if
853 /// possible.
854 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
855 Query = &Q;
856
857 MemoryAccess *Current = Start;
858 // This walker pretends uses don't exist. If we're handed one, silently grab
859 // its def. (This has the nice side-effect of ensuring we never cache uses)
860 if (auto *MU = dyn_cast<MemoryUse>(Start))
861 Current = MU->getDefiningAccess();
862
863 DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
864 // Fast path for the overly-common case (no crazy phi optimization
865 // necessary)
866 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
867 MemoryAccess *Result;
868 if (WalkResult.IsKnownClobber) {
869 Result = WalkResult.Result;
870 Q.AR = WalkResult.AR;
871 } else {
872 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
873 Current, Q.StartingLoc);
874 verifyOptResult(OptRes);
875 resetPhiOptznState();
876 Result = OptRes.PrimaryClobber.Clobber;
877 }
878
879#ifdef EXPENSIVE_CHECKS
880 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
881#endif
882 return Result;
883 }
884
885 void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA)(static_cast <bool> (MSSA == &this->MSSA) ? void
(0) : __assert_fail ("MSSA == &this->MSSA", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 885, __extension__ __PRETTY_FUNCTION__))
; }
886};
887
888struct RenamePassData {
889 DomTreeNode *DTN;
890 DomTreeNode::const_iterator ChildIt;
891 MemoryAccess *IncomingVal;
892
893 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
894 MemoryAccess *M)
895 : DTN(D), ChildIt(It), IncomingVal(M) {}
896
897 void swap(RenamePassData &RHS) {
898 std::swap(DTN, RHS.DTN);
899 std::swap(ChildIt, RHS.ChildIt);
900 std::swap(IncomingVal, RHS.IncomingVal);
901 }
902};
903
904} // end anonymous namespace
905
906namespace llvm {
907
908/// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no
909/// longer does caching on its own,
910/// but the name has been retained for the moment.
911class MemorySSA::CachingWalker final : public MemorySSAWalker {
912 ClobberWalker Walker;
913
914 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
915
916public:
917 CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
918 ~CachingWalker() override = default;
919
920 using MemorySSAWalker::getClobberingMemoryAccess;
921
922 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
923 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
924 const MemoryLocation &) override;
925 void invalidateInfo(MemoryAccess *) override;
926
927 void verify(const MemorySSA *MSSA) override {
928 MemorySSAWalker::verify(MSSA);
929 Walker.verify(MSSA);
930 }
931};
932
933} // end namespace llvm
934
935void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
936 bool RenameAllUses) {
937 // Pass through values to our successors
938 for (const BasicBlock *S : successors(BB)) {
939 auto It = PerBlockAccesses.find(S);
940 // Rename the phi nodes in our successor block
941 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
942 continue;
943 AccessList *Accesses = It->second.get();
944 auto *Phi = cast<MemoryPhi>(&Accesses->front());
945 if (RenameAllUses) {
946 int PhiIndex = Phi->getBasicBlockIndex(BB);
947 assert(PhiIndex != -1 && "Incomplete phi during partial rename")(static_cast <bool> (PhiIndex != -1 && "Incomplete phi during partial rename"
) ? void (0) : __assert_fail ("PhiIndex != -1 && \"Incomplete phi during partial rename\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 947, __extension__ __PRETTY_FUNCTION__))
;
948 Phi->setIncomingValue(PhiIndex, IncomingVal);
949 } else
950 Phi->addIncoming(IncomingVal, BB);
951 }
952}
953
954/// \brief Rename a single basic block into MemorySSA form.
955/// Uses the standard SSA renaming algorithm.
956/// \returns The new incoming value.
957MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
958 bool RenameAllUses) {
959 auto It = PerBlockAccesses.find(BB);
960 // Skip most processing if the list is empty.
961 if (It != PerBlockAccesses.end()) {
962 AccessList *Accesses = It->second.get();
963 for (MemoryAccess &L : *Accesses) {
964 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
965 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
966 MUD->setDefiningAccess(IncomingVal);
967 if (isa<MemoryDef>(&L))
968 IncomingVal = &L;
969 } else {
970 IncomingVal = &L;
971 }
972 }
973 }
974 return IncomingVal;
975}
976
977/// \brief This is the standard SSA renaming algorithm.
978///
979/// We walk the dominator tree in preorder, renaming accesses, and then filling
980/// in phi nodes in our successors.
981void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
982 SmallPtrSetImpl<BasicBlock *> &Visited,
983 bool SkipVisited, bool RenameAllUses) {
984 SmallVector<RenamePassData, 32> WorkStack;
985 // Skip everything if we already renamed this block and we are skipping.
986 // Note: You can't sink this into the if, because we need it to occur
987 // regardless of whether we skip blocks or not.
988 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
989 if (SkipVisited && AlreadyVisited)
990 return;
991
992 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
993 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
994 WorkStack.push_back({Root, Root->begin(), IncomingVal});
995
996 while (!WorkStack.empty()) {
997 DomTreeNode *Node = WorkStack.back().DTN;
998 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
999 IncomingVal = WorkStack.back().IncomingVal;
1000
1001 if (ChildIt == Node->end()) {
1002 WorkStack.pop_back();
1003 } else {
1004 DomTreeNode *Child = *ChildIt;
1005 ++WorkStack.back().ChildIt;
1006 BasicBlock *BB = Child->getBlock();
1007 // Note: You can't sink this into the if, because we need it to occur
1008 // regardless of whether we skip blocks or not.
1009 AlreadyVisited = !Visited.insert(BB).second;
1010 if (SkipVisited && AlreadyVisited) {
1011 // We already visited this during our renaming, which can happen when
1012 // being asked to rename multiple blocks. Figure out the incoming val,
1013 // which is the last def.
1014 // Incoming value can only change if there is a block def, and in that
1015 // case, it's the last block def in the list.
1016 if (auto *BlockDefs = getWritableBlockDefs(BB))
1017 IncomingVal = &*BlockDefs->rbegin();
1018 } else
1019 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1020 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1021 WorkStack.push_back({Child, Child->begin(), IncomingVal});
1022 }
1023 }
1024}
1025
1026/// \brief This handles unreachable block accesses by deleting phi nodes in
1027/// unreachable blocks, and marking all other unreachable MemoryAccess's as
1028/// being uses of the live on entry definition.
1029void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1030 assert(!DT->isReachableFromEntry(BB) &&(static_cast <bool> (!DT->isReachableFromEntry(BB) &&
"Reachable block found while handling unreachable blocks") ?
void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1031, __extension__ __PRETTY_FUNCTION__))
1031 "Reachable block found while handling unreachable blocks")(static_cast <bool> (!DT->isReachableFromEntry(BB) &&
"Reachable block found while handling unreachable blocks") ?
void (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1031, __extension__ __PRETTY_FUNCTION__))
;
1032
1033 // Make sure phi nodes in our reachable successors end up with a
1034 // LiveOnEntryDef for our incoming edge, even though our block is forward
1035 // unreachable. We could just disconnect these blocks from the CFG fully,
1036 // but we do not right now.
1037 for (const BasicBlock *S : successors(BB)) {
1038 if (!DT->isReachableFromEntry(S))
1039 continue;
1040 auto It = PerBlockAccesses.find(S);
1041 // Rename the phi nodes in our successor block
1042 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1043 continue;
1044 AccessList *Accesses = It->second.get();
1045 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1046 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1047 }
1048
1049 auto It = PerBlockAccesses.find(BB);
1050 if (It == PerBlockAccesses.end())
1051 return;
1052
1053 auto &Accesses = It->second;
1054 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1055 auto Next = std::next(AI);
1056 // If we have a phi, just remove it. We are going to replace all
1057 // users with live on entry.
1058 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1059 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1060 else
1061 Accesses->erase(AI);
1062 AI = Next;
1063 }
1064}
1065
1066MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1067 : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1068 NextID(0) {
1069 buildMemorySSA();
1070}
1071
1072MemorySSA::~MemorySSA() {
1073 // Drop all our references
1074 for (const auto &Pair : PerBlockAccesses)
1075 for (MemoryAccess &MA : *Pair.second)
1076 MA.dropAllReferences();
1077}
1078
1079MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1080 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1081
1082 if (Res.second)
1083 Res.first->second = llvm::make_unique<AccessList>();
1084 return Res.first->second.get();
1085}
1086
1087MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1088 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1089
1090 if (Res.second)
1091 Res.first->second = llvm::make_unique<DefsList>();
1092 return Res.first->second.get();
1093}
1094
1095namespace llvm {
1096
1097/// This class is a batch walker of all MemoryUse's in the program, and points
1098/// their defining access at the thing that actually clobbers them. Because it
1099/// is a batch walker that touches everything, it does not operate like the
1100/// other walkers. This walker is basically performing a top-down SSA renaming
1101/// pass, where the version stack is used as the cache. This enables it to be
1102/// significantly more time and memory efficient than using the regular walker,
1103/// which is walking bottom-up.
1104class MemorySSA::OptimizeUses {
1105public:
1106 OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
1107 DominatorTree *DT)
1108 : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1109 Walker = MSSA->getWalker();
Value stored to 'Walker' is never read
1110 }
1111
1112 void optimizeUses();
1113
1114private:
1115 /// This represents where a given memorylocation is in the stack.
1116 struct MemlocStackInfo {
1117 // This essentially is keeping track of versions of the stack. Whenever
1118 // the stack changes due to pushes or pops, these versions increase.
1119 unsigned long StackEpoch;
1120 unsigned long PopEpoch;
1121 // This is the lower bound of places on the stack to check. It is equal to
1122 // the place the last stack walk ended.
1123 // Note: Correctness depends on this being initialized to 0, which densemap
1124 // does
1125 unsigned long LowerBound;
1126 const BasicBlock *LowerBoundBlock;
1127 // This is where the last walk for this memory location ended.
1128 unsigned long LastKill;
1129 bool LastKillValid;
1130 Optional<AliasResult> AR;
1131 };
1132
1133 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1134 SmallVectorImpl<MemoryAccess *> &,
1135 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1136
1137 MemorySSA *MSSA;
1138 MemorySSAWalker *Walker;
1139 AliasAnalysis *AA;
1140 DominatorTree *DT;
1141};
1142
1143} // end namespace llvm
1144
1145/// Optimize the uses in a given block This is basically the SSA renaming
1146/// algorithm, with one caveat: We are able to use a single stack for all
1147/// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1148/// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1149/// going to be some position in that stack of possible ones.
1150///
1151/// We track the stack positions that each MemoryLocation needs
1152/// to check, and last ended at. This is because we only want to check the
1153/// things that changed since last time. The same MemoryLocation should
1154/// get clobbered by the same store (getModRefInfo does not use invariantness or
1155/// things like this, and if they start, we can modify MemoryLocOrCall to
1156/// include relevant data)
1157void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1158 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1159 SmallVectorImpl<MemoryAccess *> &VersionStack,
1160 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1161
1162 /// If no accesses, nothing to do.
1163 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1164 if (Accesses == nullptr)
1165 return;
1166
1167 // Pop everything that doesn't dominate the current block off the stack,
1168 // increment the PopEpoch to account for this.
1169 while (true) {
1170 assert((static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1172, __extension__ __PRETTY_FUNCTION__))
1171 !VersionStack.empty() &&(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1172, __extension__ __PRETTY_FUNCTION__))
1172 "Version stack should have liveOnEntry sentinel dominating everything")(static_cast <bool> (!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything"
) ? void (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1172, __extension__ __PRETTY_FUNCTION__))
;
1173 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1174 if (DT->dominates(BackBlock, BB))
1175 break;
1176 while (VersionStack.back()->getBlock() == BackBlock)
1177 VersionStack.pop_back();
1178 ++PopEpoch;
1179 }
1180
1181 for (MemoryAccess &MA : *Accesses) {
1182 auto *MU = dyn_cast<MemoryUse>(&MA);
1183 if (!MU) {
1184 VersionStack.push_back(&MA);
1185 ++StackEpoch;
1186 continue;
1187 }
1188
1189 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1190 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1191 continue;
1192 }
1193
1194 MemoryLocOrCall UseMLOC(MU);
1195 auto &LocInfo = LocStackInfo[UseMLOC];
1196 // If the pop epoch changed, it means we've removed stuff from top of
1197 // stack due to changing blocks. We may have to reset the lower bound or
1198 // last kill info.
1199 if (LocInfo.PopEpoch != PopEpoch) {
1200 LocInfo.PopEpoch = PopEpoch;
1201 LocInfo.StackEpoch = StackEpoch;
1202 // If the lower bound was in something that no longer dominates us, we
1203 // have to reset it.
1204 // We can't simply track stack size, because the stack may have had
1205 // pushes/pops in the meantime.
1206 // XXX: This is non-optimal, but only is slower cases with heavily
1207 // branching dominator trees. To get the optimal number of queries would
1208 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1209 // the top of that stack dominates us. This does not seem worth it ATM.
1210 // A much cheaper optimization would be to always explore the deepest
1211 // branch of the dominator tree first. This will guarantee this resets on
1212 // the smallest set of blocks.
1213 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1214 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1215 // Reset the lower bound of things to check.
1216 // TODO: Some day we should be able to reset to last kill, rather than
1217 // 0.
1218 LocInfo.LowerBound = 0;
1219 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1220 LocInfo.LastKillValid = false;
1221 }
1222 } else if (LocInfo.StackEpoch != StackEpoch) {
1223 // If all that has changed is the StackEpoch, we only have to check the
1224 // new things on the stack, because we've checked everything before. In
1225 // this case, the lower bound of things to check remains the same.
1226 LocInfo.PopEpoch = PopEpoch;
1227 LocInfo.StackEpoch = StackEpoch;
1228 }
1229 if (!LocInfo.LastKillValid) {
1230 LocInfo.LastKill = VersionStack.size() - 1;
1231 LocInfo.LastKillValid = true;
1232 LocInfo.AR = MayAlias;
1233 }
1234
1235 // At this point, we should have corrected last kill and LowerBound to be
1236 // in bounds.
1237 assert(LocInfo.LowerBound < VersionStack.size() &&(static_cast <bool> (LocInfo.LowerBound < VersionStack
.size() && "Lower bound out of range") ? void (0) : __assert_fail
("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1238, __extension__ __PRETTY_FUNCTION__))
1238 "Lower bound out of range")(static_cast <bool> (LocInfo.LowerBound < VersionStack
.size() && "Lower bound out of range") ? void (0) : __assert_fail
("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1238, __extension__ __PRETTY_FUNCTION__))
;
1239 assert(LocInfo.LastKill < VersionStack.size() &&(static_cast <bool> (LocInfo.LastKill < VersionStack
.size() && "Last kill info out of range") ? void (0) :
__assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1240, __extension__ __PRETTY_FUNCTION__))
1240 "Last kill info out of range")(static_cast <bool> (LocInfo.LastKill < VersionStack
.size() && "Last kill info out of range") ? void (0) :
__assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1240, __extension__ __PRETTY_FUNCTION__))
;
1241 // In any case, the new upper bound is the top of the stack.
1242 unsigned long UpperBound = VersionStack.size() - 1;
1243
1244 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1245 DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1246 << *(MU->getMemoryInst()) << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1247 << " because there are " << UpperBound - LocInfo.LowerBounddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
1248 << " stores to disambiguate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "MemorySSA skipping optimization of "
<< *MU << " (" << *(MU->getMemoryInst()
) << ")" << " because there are " << UpperBound
- LocInfo.LowerBound << " stores to disambiguate\n"; }
} while (false)
;
1249 // Because we did not walk, LastKill is no longer valid, as this may
1250 // have been a kill.
1251 LocInfo.LastKillValid = false;
1252 continue;
1253 }
1254 bool FoundClobberResult = false;
1255 while (UpperBound > LocInfo.LowerBound) {
1256 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1257 // For phis, use the walker, see where we ended up, go there
1258 Instruction *UseInst = MU->getMemoryInst();
1259 MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1260 // We are guaranteed to find it or something is wrong
1261 while (VersionStack[UpperBound] != Result) {
1262 assert(UpperBound != 0)(static_cast <bool> (UpperBound != 0) ? void (0) : __assert_fail
("UpperBound != 0", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1262, __extension__ __PRETTY_FUNCTION__))
;
1263 --UpperBound;
1264 }
1265 FoundClobberResult = true;
1266 break;
1267 }
1268
1269 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1270 // If the lifetime of the pointer ends at this instruction, it's live on
1271 // entry.
1272 if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1273 // Reset UpperBound to liveOnEntryDef's place in the stack
1274 UpperBound = 0;
1275 FoundClobberResult = true;
1276 LocInfo.AR = MustAlias;
1277 break;
1278 }
1279 ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1280 if (CA.IsClobber) {
1281 FoundClobberResult = true;
1282 LocInfo.AR = CA.AR;
1283 break;
1284 }
1285 --UpperBound;
1286 }
1287
1288 // Note: Phis always have AliasResult AR set to MayAlias ATM.
1289
1290 // At the end of this loop, UpperBound is either a clobber, or lower bound
1291 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1292 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1293 // We were last killed now by where we got to
1294 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1295 LocInfo.AR = None;
1296 MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1297 LocInfo.LastKill = UpperBound;
1298 } else {
1299 // Otherwise, we checked all the new ones, and now we know we can get to
1300 // LastKill.
1301 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1302 }
1303 LocInfo.LowerBound = VersionStack.size() - 1;
1304 LocInfo.LowerBoundBlock = BB;
1305 }
1306}
1307
1308/// Optimize uses to point to their actual clobbering definitions.
1309void MemorySSA::OptimizeUses::optimizeUses() {
1310 SmallVector<MemoryAccess *, 16> VersionStack;
1311 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1312 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1313
1314 unsigned long StackEpoch = 1;
1315 unsigned long PopEpoch = 1;
1316 // We perform a non-recursive top-down dominator tree walk.
1317 for (const auto *DomNode : depth_first(DT->getRootNode()))
1318 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1319 LocStackInfo);
1320}
1321
1322void MemorySSA::placePHINodes(
1323 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks,
1324 const DenseMap<const BasicBlock *, unsigned int> &BBNumbers) {
1325 // Determine where our MemoryPhi's should go
1326 ForwardIDFCalculator IDFs(*DT);
1327 IDFs.setDefiningBlocks(DefiningBlocks);
1328 SmallVector<BasicBlock *, 32> IDFBlocks;
1329 IDFs.calculate(IDFBlocks);
1330
1331 llvm::sort(IDFBlocks.begin(), IDFBlocks.end(),
1332 [&BBNumbers](const BasicBlock *A, const BasicBlock *B) {
1333 return BBNumbers.lookup(A) < BBNumbers.lookup(B);
1334 });
1335
1336 // Now place MemoryPhi nodes.
1337 for (auto &BB : IDFBlocks)
1338 createMemoryPhi(BB);
1339}
1340
1341void MemorySSA::buildMemorySSA() {
1342 // We create an access to represent "live on entry", for things like
1343 // arguments or users of globals, where the memory they use is defined before
1344 // the beginning of the function. We do not actually insert it into the IR.
1345 // We do not define a live on exit for the immediate uses, and thus our
1346 // semantics do *not* imply that something with no immediate uses can simply
1347 // be removed.
1348 BasicBlock &StartingPoint = F.getEntryBlock();
1349 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1350 &StartingPoint, NextID++));
1351 DenseMap<const BasicBlock *, unsigned int> BBNumbers;
1352 unsigned NextBBNum = 0;
1353
1354 // We maintain lists of memory accesses per-block, trading memory for time. We
1355 // could just look up the memory access for every possible instruction in the
1356 // stream.
1357 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1358 // Go through each block, figure out where defs occur, and chain together all
1359 // the accesses.
1360 for (BasicBlock &B : F) {
1361 BBNumbers[&B] = NextBBNum++;
1362 bool InsertIntoDef = false;
1363 AccessList *Accesses = nullptr;
1364 DefsList *Defs = nullptr;
1365 for (Instruction &I : B) {
1366 MemoryUseOrDef *MUD = createNewAccess(&I);
1367 if (!MUD)
1368 continue;
1369
1370 if (!Accesses)
1371 Accesses = getOrCreateAccessList(&B);
1372 Accesses->push_back(MUD);
1373 if (isa<MemoryDef>(MUD)) {
1374 InsertIntoDef = true;
1375 if (!Defs)
1376 Defs = getOrCreateDefsList(&B);
1377 Defs->push_back(*MUD);
1378 }
1379 }
1380 if (InsertIntoDef)
1381 DefiningBlocks.insert(&B);
1382 }
1383 placePHINodes(DefiningBlocks, BBNumbers);
1384
1385 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1386 // filled in with all blocks.
1387 SmallPtrSet<BasicBlock *, 16> Visited;
1388 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1389
1390 CachingWalker *Walker = getWalkerImpl();
1391
1392 OptimizeUses(this, Walker, AA, DT).optimizeUses();
1393
1394 // Mark the uses in unreachable blocks as live on entry, so that they go
1395 // somewhere.
1396 for (auto &BB : F)
1397 if (!Visited.count(&BB))
1398 markUnreachableAsLiveOnEntry(&BB);
1399}
1400
1401MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1402
1403MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1404 if (Walker)
1405 return Walker.get();
1406
1407 Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
1408 return Walker.get();
1409}
1410
1411// This is a helper function used by the creation routines. It places NewAccess
1412// into the access and defs lists for a given basic block, at the given
1413// insertion point.
1414void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1415 const BasicBlock *BB,
1416 InsertionPlace Point) {
1417 auto *Accesses = getOrCreateAccessList(BB);
1418 if (Point == Beginning) {
1419 // If it's a phi node, it goes first, otherwise, it goes after any phi
1420 // nodes.
1421 if (isa<MemoryPhi>(NewAccess)) {
1422 Accesses->push_front(NewAccess);
1423 auto *Defs = getOrCreateDefsList(BB);
1424 Defs->push_front(*NewAccess);
1425 } else {
1426 auto AI = find_if_not(
1427 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1428 Accesses->insert(AI, NewAccess);
1429 if (!isa<MemoryUse>(NewAccess)) {
1430 auto *Defs = getOrCreateDefsList(BB);
1431 auto DI = find_if_not(
1432 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1433 Defs->insert(DI, *NewAccess);
1434 }
1435 }
1436 } else {
1437 Accesses->push_back(NewAccess);
1438 if (!isa<MemoryUse>(NewAccess)) {
1439 auto *Defs = getOrCreateDefsList(BB);
1440 Defs->push_back(*NewAccess);
1441 }
1442 }
1443 BlockNumberingValid.erase(BB);
1444}
1445
1446void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1447 AccessList::iterator InsertPt) {
1448 auto *Accesses = getWritableBlockAccesses(BB);
1449 bool WasEnd = InsertPt == Accesses->end();
1450 Accesses->insert(AccessList::iterator(InsertPt), What);
1451 if (!isa<MemoryUse>(What)) {
1452 auto *Defs = getOrCreateDefsList(BB);
1453 // If we got asked to insert at the end, we have an easy job, just shove it
1454 // at the end. If we got asked to insert before an existing def, we also get
1455 // an iterator. If we got asked to insert before a use, we have to hunt for
1456 // the next def.
1457 if (WasEnd) {
1458 Defs->push_back(*What);
1459 } else if (isa<MemoryDef>(InsertPt)) {
1460 Defs->insert(InsertPt->getDefsIterator(), *What);
1461 } else {
1462 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1463 ++InsertPt;
1464 // Either we found a def, or we are inserting at the end
1465 if (InsertPt == Accesses->end())
1466 Defs->push_back(*What);
1467 else
1468 Defs->insert(InsertPt->getDefsIterator(), *What);
1469 }
1470 }
1471 BlockNumberingValid.erase(BB);
1472}
1473
1474// Move What before Where in the IR. The end result is that What will belong to
1475// the right lists and have the right Block set, but will not otherwise be
1476// correct. It will not have the right defining access, and if it is a def,
1477// things below it will not properly be updated.
1478void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1479 AccessList::iterator Where) {
1480 // Keep it in the lookup tables, remove from the lists
1481 removeFromLists(What, false);
1482 What->setBlock(BB);
1483 insertIntoListsBefore(What, BB, Where);
1484}
1485
1486void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1487 InsertionPlace Point) {
1488 removeFromLists(What, false);
1489 What->setBlock(BB);
1490 insertIntoListsForBlock(What, BB, Point);
1491}
1492
1493MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1494 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB")(static_cast <bool> (!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"
) ? void (0) : __assert_fail ("!getMemoryAccess(BB) && \"MemoryPhi already exists for this BB\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1494, __extension__ __PRETTY_FUNCTION__))
;
1495 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1496 // Phi's always are placed at the front of the block.
1497 insertIntoListsForBlock(Phi, BB, Beginning);
1498 ValueToMemoryAccess[BB] = Phi;
1499 return Phi;
1500}
1501
1502MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1503 MemoryAccess *Definition) {
1504 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI")(static_cast <bool> (!isa<PHINode>(I) && "Cannot create a defined access for a PHI"
) ? void (0) : __assert_fail ("!isa<PHINode>(I) && \"Cannot create a defined access for a PHI\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1504, __extension__ __PRETTY_FUNCTION__))
;
1505 MemoryUseOrDef *NewAccess = createNewAccess(I);
1506 assert((static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1508, __extension__ __PRETTY_FUNCTION__))
1507 NewAccess != nullptr &&(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1508, __extension__ __PRETTY_FUNCTION__))
1508 "Tried to create a memory access for a non-memory touching instruction")(static_cast <bool> (NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction"
) ? void (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1508, __extension__ __PRETTY_FUNCTION__))
;
1509 NewAccess->setDefiningAccess(Definition);
1510 return NewAccess;
1511}
1512
1513// Return true if the instruction has ordering constraints.
1514// Note specifically that this only considers stores and loads
1515// because others are still considered ModRef by getModRefInfo.
1516static inline bool isOrdered(const Instruction *I) {
1517 if (auto *SI = dyn_cast<StoreInst>(I)) {
1518 if (!SI->isUnordered())
1519 return true;
1520 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1521 if (!LI->isUnordered())
1522 return true;
1523 }
1524 return false;
1525}
1526
1527/// \brief Helper function to create new memory accesses
1528MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
1529 // The assume intrinsic has a control dependency which we model by claiming
1530 // that it writes arbitrarily. Ignore that fake memory dependency here.
1531 // FIXME: Replace this special casing with a more accurate modelling of
1532 // assume's control dependency.
1533 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1534 if (II->getIntrinsicID() == Intrinsic::assume)
1535 return nullptr;
1536
1537 // Find out what affect this instruction has on memory.
1538 ModRefInfo ModRef = AA->getModRefInfo(I, None);
1539 // The isOrdered check is used to ensure that volatiles end up as defs
1540 // (atomics end up as ModRef right now anyway). Until we separate the
1541 // ordering chain from the memory chain, this enables people to see at least
1542 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1543 // will still give an answer that bypasses other volatile loads. TODO:
1544 // Separate memory aliasing and ordering into two different chains so that we
1545 // can precisely represent both "what memory will this read/write/is clobbered
1546 // by" and "what instructions can I move this past".
1547 bool Def = isModSet(ModRef) || isOrdered(I);
1548 bool Use = isRefSet(ModRef);
1549
1550 // It's possible for an instruction to not modify memory at all. During
1551 // construction, we ignore them.
1552 if (!Def && !Use)
1553 return nullptr;
1554
1555 MemoryUseOrDef *MUD;
1556 if (Def)
1557 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1558 else
1559 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1560 ValueToMemoryAccess[I] = MUD;
1561 return MUD;
1562}
1563
1564/// \brief Returns true if \p Replacer dominates \p Replacee .
1565bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1566 const MemoryAccess *Replacee) const {
1567 if (isa<MemoryUseOrDef>(Replacee))
1568 return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1569 const auto *MP = cast<MemoryPhi>(Replacee);
1570 // For a phi node, the use occurs in the predecessor block of the phi node.
1571 // Since we may occur multiple times in the phi node, we have to check each
1572 // operand to ensure Replacer dominates each operand where Replacee occurs.
1573 for (const Use &Arg : MP->operands()) {
1574 if (Arg.get() != Replacee &&
1575 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1576 return false;
1577 }
1578 return true;
1579}
1580
1581/// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
1582void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1583 assert(MA->use_empty() &&(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses"
) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1584, __extension__ __PRETTY_FUNCTION__))
1584 "Trying to remove memory access that still has uses")(static_cast <bool> (MA->use_empty() && "Trying to remove memory access that still has uses"
) ? void (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1584, __extension__ __PRETTY_FUNCTION__))
;
1585 BlockNumbering.erase(MA);
1586 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA))
1587 MUD->setDefiningAccess(nullptr);
1588 // Invalidate our walker's cache if necessary
1589 if (!isa<MemoryUse>(MA))
1590 Walker->invalidateInfo(MA);
1591 // The call below to erase will destroy MA, so we can't change the order we
1592 // are doing things here
1593 Value *MemoryInst;
1594 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1595 MemoryInst = MUD->getMemoryInst();
1596 } else {
1597 MemoryInst = MA->getBlock();
1598 }
1599 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1600 if (VMA->second == MA)
1601 ValueToMemoryAccess.erase(VMA);
1602}
1603
1604/// \brief Properly remove \p MA from all of MemorySSA's lists.
1605///
1606/// Because of the way the intrusive list and use lists work, it is important to
1607/// do removal in the right order.
1608/// ShouldDelete defaults to true, and will cause the memory access to also be
1609/// deleted, not just removed.
1610void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1611 // The access list owns the reference, so we erase it from the non-owning list
1612 // first.
1613 if (!isa<MemoryUse>(MA)) {
1614 auto DefsIt = PerBlockDefs.find(MA->getBlock());
1615 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1616 Defs->remove(*MA);
1617 if (Defs->empty())
1618 PerBlockDefs.erase(DefsIt);
1619 }
1620
1621 // The erase call here will delete it. If we don't want it deleted, we call
1622 // remove instead.
1623 auto AccessIt = PerBlockAccesses.find(MA->getBlock());
1624 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1625 if (ShouldDelete)
1626 Accesses->erase(MA);
1627 else
1628 Accesses->remove(MA);
1629
1630 if (Accesses->empty())
1631 PerBlockAccesses.erase(AccessIt);
1632}
1633
1634void MemorySSA::print(raw_ostream &OS) const {
1635 MemorySSAAnnotatedWriter Writer(this);
1636 F.print(OS, &Writer);
1637}
1638
1639#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1640LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MemorySSA::dump() const { print(dbgs()); }
1641#endif
1642
1643void MemorySSA::verifyMemorySSA() const {
1644 verifyDefUses(F);
1645 verifyDomination(F);
1646 verifyOrdering(F);
1647 Walker->verify(this);
1648}
1649
1650/// \brief Verify that the order and existence of MemoryAccesses matches the
1651/// order and existence of memory affecting instructions.
1652void MemorySSA::verifyOrdering(Function &F) const {
1653 // Walk all the blocks, comparing what the lookups think and what the access
1654 // lists think, as well as the order in the blocks vs the order in the access
1655 // lists.
1656 SmallVector<MemoryAccess *, 32> ActualAccesses;
1657 SmallVector<MemoryAccess *, 32> ActualDefs;
1658 for (BasicBlock &B : F) {
1659 const AccessList *AL = getBlockAccesses(&B);
1660 const auto *DL = getBlockDefs(&B);
1661 MemoryAccess *Phi = getMemoryAccess(&B);
1662 if (Phi) {
1663 ActualAccesses.push_back(Phi);
1664 ActualDefs.push_back(Phi);
1665 }
1666
1667 for (Instruction &I : B) {
1668 MemoryAccess *MA = getMemoryAccess(&I);
1669 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1672, __extension__ __PRETTY_FUNCTION__))
1670 "We have memory affecting instructions "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1672, __extension__ __PRETTY_FUNCTION__))
1671 "in this block but they are not in the "(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1672, __extension__ __PRETTY_FUNCTION__))
1672 "access list or defs list")(static_cast <bool> ((!MA || (AL && (isa<MemoryUse
>(MA) || DL))) && "We have memory affecting instructions "
"in this block but they are not in the " "access list or defs list"
) ? void (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1672, __extension__ __PRETTY_FUNCTION__))
;
1673 if (MA) {
1674 ActualAccesses.push_back(MA);
1675 if (isa<MemoryDef>(MA))
1676 ActualDefs.push_back(MA);
1677 }
1678 }
1679 // Either we hit the assert, really have no accesses, or we have both
1680 // accesses and an access list.
1681 // Same with defs.
1682 if (!AL && !DL)
1683 continue;
1684 assert(AL->size() == ActualAccesses.size() &&(static_cast <bool> (AL->size() == ActualAccesses.size
() && "We don't have the same number of accesses in the block as on the "
"access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1686, __extension__ __PRETTY_FUNCTION__))
1685 "We don't have the same number of accesses in the block as on the "(static_cast <bool> (AL->size() == ActualAccesses.size
() && "We don't have the same number of accesses in the block as on the "
"access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1686, __extension__ __PRETTY_FUNCTION__))
1686 "access list")(static_cast <bool> (AL->size() == ActualAccesses.size
() && "We don't have the same number of accesses in the block as on the "
"access list") ? void (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1686, __extension__ __PRETTY_FUNCTION__))
;
1687 assert((DL || ActualDefs.size() == 0) &&(static_cast <bool> ((DL || ActualDefs.size() == 0) &&
"Either we should have a defs list, or we should have no defs"
) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1688, __extension__ __PRETTY_FUNCTION__))
1688 "Either we should have a defs list, or we should have no defs")(static_cast <bool> ((DL || ActualDefs.size() == 0) &&
"Either we should have a defs list, or we should have no defs"
) ? void (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1688, __extension__ __PRETTY_FUNCTION__))
;
1689 assert((!DL || DL->size() == ActualDefs.size()) &&(static_cast <bool> ((!DL || DL->size() == ActualDefs
.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1691, __extension__ __PRETTY_FUNCTION__))
1690 "We don't have the same number of defs in the block as on the "(static_cast <bool> ((!DL || DL->size() == ActualDefs
.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1691, __extension__ __PRETTY_FUNCTION__))
1691 "def list")(static_cast <bool> ((!DL || DL->size() == ActualDefs
.size()) && "We don't have the same number of defs in the block as on the "
"def list") ? void (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1691, __extension__ __PRETTY_FUNCTION__))
;
1692 auto ALI = AL->begin();
1693 auto AAI = ActualAccesses.begin();
1694 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1695 assert(&*ALI == *AAI && "Not the same accesses in the same order")(static_cast <bool> (&*ALI == *AAI && "Not the same accesses in the same order"
) ? void (0) : __assert_fail ("&*ALI == *AAI && \"Not the same accesses in the same order\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1695, __extension__ __PRETTY_FUNCTION__))
;
1696 ++ALI;
1697 ++AAI;
1698 }
1699 ActualAccesses.clear();
1700 if (DL) {
1701 auto DLI = DL->begin();
1702 auto ADI = ActualDefs.begin();
1703 while (DLI != DL->end() && ADI != ActualDefs.end()) {
1704 assert(&*DLI == *ADI && "Not the same defs in the same order")(static_cast <bool> (&*DLI == *ADI && "Not the same defs in the same order"
) ? void (0) : __assert_fail ("&*DLI == *ADI && \"Not the same defs in the same order\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1704, __extension__ __PRETTY_FUNCTION__))
;
1705 ++DLI;
1706 ++ADI;
1707 }
1708 }
1709 ActualDefs.clear();
1710 }
1711}
1712
1713/// \brief Verify the domination properties of MemorySSA by checking that each
1714/// definition dominates all of its uses.
1715void MemorySSA::verifyDomination(Function &F) const {
1716#ifndef NDEBUG
1717 for (BasicBlock &B : F) {
1718 // Phi nodes are attached to basic blocks
1719 if (MemoryPhi *MP = getMemoryAccess(&B))
1720 for (const Use &U : MP->uses())
1721 assert(dominates(MP, U) && "Memory PHI does not dominate it's uses")(static_cast <bool> (dominates(MP, U) && "Memory PHI does not dominate it's uses"
) ? void (0) : __assert_fail ("dominates(MP, U) && \"Memory PHI does not dominate it's uses\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1721, __extension__ __PRETTY_FUNCTION__))
;
1722
1723 for (Instruction &I : B) {
1724 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1725 if (!MD)
1726 continue;
1727
1728 for (const Use &U : MD->uses())
1729 assert(dominates(MD, U) && "Memory Def does not dominate it's uses")(static_cast <bool> (dominates(MD, U) && "Memory Def does not dominate it's uses"
) ? void (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1729, __extension__ __PRETTY_FUNCTION__))
;
1730 }
1731 }
1732#endif
1733}
1734
1735/// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use
1736/// appears in the use list of \p Def.
1737void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1738#ifndef NDEBUG
1739 // The live on entry use may cause us to get a NULL def here
1740 if (!Def)
1741 assert(isLiveOnEntryDef(Use) &&(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def"
) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1742, __extension__ __PRETTY_FUNCTION__))
1742 "Null def but use not point to live on entry def")(static_cast <bool> (isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def"
) ? void (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1742, __extension__ __PRETTY_FUNCTION__))
;
1743 else
1744 assert(is_contained(Def->users(), Use) &&(static_cast <bool> (is_contained(Def->users(), Use)
&& "Did not find use in def's use list") ? void (0) :
__assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1745, __extension__ __PRETTY_FUNCTION__))
1745 "Did not find use in def's use list")(static_cast <bool> (is_contained(Def->users(), Use)
&& "Did not find use in def's use list") ? void (0) :
__assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1745, __extension__ __PRETTY_FUNCTION__))
;
1746#endif
1747}
1748
1749/// \brief Verify the immediate use information, by walking all the memory
1750/// accesses and verifying that, for each use, it appears in the
1751/// appropriate def's use list
1752void MemorySSA::verifyDefUses(Function &F) const {
1753 for (BasicBlock &B : F) {
1754 // Phi nodes are attached to basic blocks
1755 if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1756 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance((static_cast <bool> (Phi->getNumOperands() == static_cast
<unsigned>(std::distance( pred_begin(&B), pred_end(
&B))) && "Incomplete MemoryPhi Node") ? void (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1758, __extension__ __PRETTY_FUNCTION__))
1757 pred_begin(&B), pred_end(&B))) &&(static_cast <bool> (Phi->getNumOperands() == static_cast
<unsigned>(std::distance( pred_begin(&B), pred_end(
&B))) && "Incomplete MemoryPhi Node") ? void (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1758, __extension__ __PRETTY_FUNCTION__))
1758 "Incomplete MemoryPhi Node")(static_cast <bool> (Phi->getNumOperands() == static_cast
<unsigned>(std::distance( pred_begin(&B), pred_end(
&B))) && "Incomplete MemoryPhi Node") ? void (0) :
__assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1758, __extension__ __PRETTY_FUNCTION__))
;
1759 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1760 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1761 }
1762
1763 for (Instruction &I : B) {
1764 if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1765 verifyUseInDefs(MA->getDefiningAccess(), MA);
1766 }
1767 }
1768 }
1769}
1770
1771MemoryUseOrDef *MemorySSA::getMemoryAccess(const Instruction *I) const {
1772 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
1773}
1774
1775MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const {
1776 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
1777}
1778
1779/// Perform a local numbering on blocks so that instruction ordering can be
1780/// determined in constant time.
1781/// TODO: We currently just number in order. If we numbered by N, we could
1782/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1783/// log2(N) sequences of mixed before and after) without needing to invalidate
1784/// the numbering.
1785void MemorySSA::renumberBlock(const BasicBlock *B) const {
1786 // The pre-increment ensures the numbers really start at 1.
1787 unsigned long CurrentNumber = 0;
1788 const AccessList *AL = getBlockAccesses(B);
1789 assert(AL != nullptr && "Asking to renumber an empty block")(static_cast <bool> (AL != nullptr && "Asking to renumber an empty block"
) ? void (0) : __assert_fail ("AL != nullptr && \"Asking to renumber an empty block\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1789, __extension__ __PRETTY_FUNCTION__))
;
1790 for (const auto &I : *AL)
1791 BlockNumbering[&I] = ++CurrentNumber;
1792 BlockNumberingValid.insert(B);
1793}
1794
1795/// \brief Determine, for two memory accesses in the same block,
1796/// whether \p Dominator dominates \p Dominatee.
1797/// \returns True if \p Dominator dominates \p Dominatee.
1798bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
1799 const MemoryAccess *Dominatee) const {
1800 const BasicBlock *DominatorBlock = Dominator->getBlock();
1801
1802 assert((DominatorBlock == Dominatee->getBlock()) &&(static_cast <bool> ((DominatorBlock == Dominatee->getBlock
()) && "Asking for local domination when accesses are in different blocks!"
) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1803, __extension__ __PRETTY_FUNCTION__))
1803 "Asking for local domination when accesses are in different blocks!")(static_cast <bool> ((DominatorBlock == Dominatee->getBlock
()) && "Asking for local domination when accesses are in different blocks!"
) ? void (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1803, __extension__ __PRETTY_FUNCTION__))
;
1804 // A node dominates itself.
1805 if (Dominatee == Dominator)
1806 return true;
1807
1808 // When Dominatee is defined on function entry, it is not dominated by another
1809 // memory access.
1810 if (isLiveOnEntryDef(Dominatee))
1811 return false;
1812
1813 // When Dominator is defined on function entry, it dominates the other memory
1814 // access.
1815 if (isLiveOnEntryDef(Dominator))
1816 return true;
1817
1818 if (!BlockNumberingValid.count(DominatorBlock))
1819 renumberBlock(DominatorBlock);
1820
1821 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
1822 // All numbers start with 1
1823 assert(DominatorNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominatorNum != 0 && "Block was not numbered properly"
) ? void (0) : __assert_fail ("DominatorNum != 0 && \"Block was not numbered properly\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1823, __extension__ __PRETTY_FUNCTION__))
;
1824 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
1825 assert(DominateeNum != 0 && "Block was not numbered properly")(static_cast <bool> (DominateeNum != 0 && "Block was not numbered properly"
) ? void (0) : __assert_fail ("DominateeNum != 0 && \"Block was not numbered properly\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1825, __extension__ __PRETTY_FUNCTION__))
;
1826 return DominatorNum < DominateeNum;
1827}
1828
1829bool MemorySSA::dominates(const MemoryAccess *Dominator,
1830 const MemoryAccess *Dominatee) const {
1831 if (Dominator == Dominatee)
1832 return true;
1833
1834 if (isLiveOnEntryDef(Dominatee))
1835 return false;
1836
1837 if (Dominator->getBlock() != Dominatee->getBlock())
1838 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
1839 return locallyDominates(Dominator, Dominatee);
1840}
1841
1842bool MemorySSA::dominates(const MemoryAccess *Dominator,
1843 const Use &Dominatee) const {
1844 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
1845 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
1846 // The def must dominate the incoming block of the phi.
1847 if (UseBB != Dominator->getBlock())
1848 return DT->dominates(Dominator->getBlock(), UseBB);
1849 // If the UseBB and the DefBB are the same, compare locally.
1850 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
1851 }
1852 // If it's not a PHI node use, the normal dominates can already handle it.
1853 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
1854}
1855
1856const static char LiveOnEntryStr[] = "liveOnEntry";
1857
1858void MemoryAccess::print(raw_ostream &OS) const {
1859 switch (getValueID()) {
1860 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
1861 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
1862 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
1863 }
1864 llvm_unreachable("invalid value id")::llvm::llvm_unreachable_internal("invalid value id", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/MemorySSA.cpp"
, 1864)
;
1865}
1866
1867void MemoryDef::print(raw_ostream &OS) const {
1868 MemoryAccess *UO = getDefiningAccess();
1869
1870 OS << getID() << " = MemoryDef(";
1871 if (UO && UO->getID())
1872 OS << UO->getID();
1873 else
1874 OS << LiveOnEntryStr;
1875 OS << ')';
1876}
1877
1878void MemoryPhi::print(raw_ostream &OS) const {
1879 bool First = true;
1880 OS << getID() << " = MemoryPhi(";
1881 for (const auto &Op : operands()) {
1882 BasicBlock *BB = getIncomingBlock(Op);
1883 MemoryAccess *MA = cast<MemoryAccess>(Op);
1884 if (!First)
1885 OS << ',';
1886 else
1887 First = false;
1888
1889 OS << '{';
1890 if (BB->hasName())
1891 OS << BB->getName();
1892 else
1893 BB->printAsOperand(OS, false);
1894 OS << ',';
1895 if (unsigned ID = MA->getID())
1896 OS << ID;
1897 else
1898 OS << LiveOnEntryStr;
1899 OS << '}';
1900 }
1901 OS << ')';
1902}
1903
1904void MemoryUse::print(raw_ostream &OS) const {
1905 MemoryAccess *UO = getDefiningAccess();
1906 OS << "MemoryUse(";
1907 if (UO && UO->getID())
1908 OS << UO->getID();
1909 else
1910 OS << LiveOnEntryStr;
1911 OS << ')';
1912}
1913
1914void MemoryAccess::dump() const {
1915// Cannot completely remove virtual function even in release mode.
1916#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1917 print(dbgs());
1918 dbgs() << "\n";
1919#endif
1920}
1921
1922char MemorySSAPrinterLegacyPass::ID = 0;
1923
1924MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
1925 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
1926}
1927
1928void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
1929 AU.setPreservesAll();
1930 AU.addRequired<MemorySSAWrapperPass>();
1931}
1932
1933bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
1934 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
1935 MSSA.print(dbgs());
1936 if (VerifyMemorySSA)
1937 MSSA.verifyMemorySSA();
1938 return false;
1939}
1940
1941AnalysisKey MemorySSAAnalysis::Key;
1942
1943MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
1944 FunctionAnalysisManager &AM) {
1945 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1946 auto &AA = AM.getResult<AAManager>(F);
1947 return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
1948}
1949
1950PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
1951 FunctionAnalysisManager &AM) {
1952 OS << "MemorySSA for function: " << F.getName() << "\n";
1953 AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
1954
1955 return PreservedAnalyses::all();
1956}
1957
1958PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
1959 FunctionAnalysisManager &AM) {
1960 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
1961
1962 return PreservedAnalyses::all();
1963}
1964
1965char MemorySSAWrapperPass::ID = 0;
1966
1967MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
1968 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
1969}
1970
1971void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
1972
1973void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1974 AU.setPreservesAll();
1975 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1976 AU.addRequiredTransitive<AAResultsWrapperPass>();
1977}
1978
1979bool MemorySSAWrapperPass::runOnFunction(Function &F) {
1980 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1981 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1982 MSSA.reset(new MemorySSA(F, &AA, &DT));
1983 return false;
1984}
1985
1986void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
1987
1988void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
1989 MSSA->print(OS);
1990}
1991
1992MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
1993
1994MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
1995 DominatorTree *D)
1996 : MemorySSAWalker(M), Walker(*M, *A, *D) {}
1997
1998void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
1999 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
2000 MUD->resetOptimized();
2001}
2002
2003/// \brief Walk the use-def chains starting at \p MA and find
2004/// the MemoryAccess that actually clobbers Loc.
2005///
2006/// \returns our clobbering memory access
2007MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
2008 MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
2009 return Walker.findClobber(StartingAccess, Q);
2010}
2011
2012MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
2013 MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
2014 if (isa<MemoryPhi>(StartingAccess))
2015 return StartingAccess;
2016
2017 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2018 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2019 return StartingUseOrDef;
2020
2021 Instruction *I = StartingUseOrDef->getMemoryInst();
2022
2023 // Conservatively, fences are always clobbers, so don't perform the walk if we
2024 // hit a fence.
2025 if (!ImmutableCallSite(I) && I->isFenceLike())
2026 return StartingUseOrDef;
2027
2028 UpwardsMemoryQuery Q;
2029 Q.OriginalAccess = StartingUseOrDef;
2030 Q.StartingLoc = Loc;
2031 Q.Inst = I;
2032 Q.IsCall = false;
2033
2034 // Unlike the other function, do not walk to the def of a def, because we are
2035 // handed something we already believe is the clobbering access.
2036 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2037 ? StartingUseOrDef->getDefiningAccess()
2038 : StartingUseOrDef;
2039
2040 MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
2041 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Starting Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2042 DEBUG(dbgs() << *StartingUseOrDef << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *StartingUseOrDef << "\n"
; } } while (false)
;
2043 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Final Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2044 DEBUG(dbgs() << *Clobber << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *Clobber << "\n"; } } while
(false)
;
2045 return Clobber;
2046}
2047
2048MemoryAccess *
2049MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2050 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2051 // If this is a MemoryPhi, we can't do anything.
2052 if (!StartingAccess)
2053 return MA;
2054
2055 // If this is an already optimized use or def, return the optimized result.
2056 // Note: Currently, we store the optimized def result in a separate field,
2057 // since we can't use the defining access.
2058 if (StartingAccess->isOptimized())
2059 return StartingAccess->getOptimized();
2060
2061 const Instruction *I = StartingAccess->getMemoryInst();
2062 UpwardsMemoryQuery Q(I, StartingAccess);
2063 // We can't sanely do anything with a fence, since they conservatively clobber
2064 // all memory, and have no locations to get pointers from to try to
2065 // disambiguate.
2066 if (!Q.IsCall && I->isFenceLike())
2067 return StartingAccess;
2068
2069 if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
2070 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2071 StartingAccess->setOptimized(LiveOnEntry);
2072 StartingAccess->setOptimizedAccessType(None);
2073 return LiveOnEntry;
2074 }
2075
2076 // Start with the thing we already think clobbers this location
2077 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2078
2079 // At this point, DefiningAccess may be the live on entry def.
2080 // If it is, we will not get a better result.
2081 if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2082 StartingAccess->setOptimized(DefiningAccess);
2083 StartingAccess->setOptimizedAccessType(None);
2084 return DefiningAccess;
2085 }
2086
2087 MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
2088 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Starting Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2089 DEBUG(dbgs() << *DefiningAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *DefiningAccess << "\n"
; } } while (false)
;
2090 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << "Final Memory SSA clobber for "
<< *I << " is "; } } while (false)
;
2091 DEBUG(dbgs() << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memoryssa")) { dbgs() << *Result << "\n"; } } while
(false)
;
2092
2093 StartingAccess->setOptimized(Result);
2094 if (MSSA->isLiveOnEntryDef(Result))
2095 StartingAccess->setOptimizedAccessType(None);
2096 else if (Q.AR == MustAlias)
2097 StartingAccess->setOptimizedAccessType(MustAlias);
2098
2099 return Result;
2100}
2101
2102MemoryAccess *
2103DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2104 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2105 return Use->getDefiningAccess();
2106 return MA;
2107}
2108
2109MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2110 MemoryAccess *StartingAccess, const MemoryLocation &) {
2111 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2112 return Use->getDefiningAccess();
2113 return StartingAccess;
2114}
2115
2116void MemoryPhi::deleteMe(DerivedUser *Self) {
2117 delete static_cast<MemoryPhi *>(Self);
2118}
2119
2120void MemoryDef::deleteMe(DerivedUser *Self) {
2121 delete static_cast<MemoryDef *>(Self);
2122}
2123
2124void MemoryUse::deleteMe(DerivedUser *Self) {
2125 delete static_cast<MemoryUse *>(Self);
2126}