LLVM  7.0.0svn
MemorySSA.cpp
Go to the documentation of this file.
1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the MemorySSA class.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/PassManager.h"
41 #include "llvm/IR/Use.h"
42 #include "llvm/Pass.h"
44 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/Debug.h"
51 #include <algorithm>
52 #include <cassert>
53 #include <iterator>
54 #include <memory>
55 #include <utility>
56 
57 using namespace llvm;
58 
59 #define DEBUG_TYPE "memoryssa"
60 
61 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
62  true)
66  true)
67 
69  "Memory SSA Printer", false, false)
70 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
71 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
72  "Memory SSA Printer", false, false)
73 
74 static cl::opt<unsigned> MaxCheckLimit(
75  "memssa-check-limit", cl::Hidden, cl::init(100),
76  cl::desc("The maximum number of stores/phis MemorySSA"
77  "will consider trying to walk past (default = 100)"));
78 
79 static cl::opt<bool>
80  VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
81  cl::desc("Verify MemorySSA in legacy printer pass."));
82 
83 namespace llvm {
84 
85 /// \brief An assembly annotator class to print Memory SSA information in
86 /// comments.
88  friend class MemorySSA;
89 
90  const MemorySSA *MSSA;
91 
92 public:
93  MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
94 
96  formatted_raw_ostream &OS) override {
97  if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
98  OS << "; " << *MA << "\n";
99  }
100 
102  formatted_raw_ostream &OS) override {
103  if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
104  OS << "; " << *MA << "\n";
105  }
106 };
107 
108 } // end namespace llvm
109 
110 namespace {
111 
112 /// Our current alias analysis API differentiates heavily between calls and
113 /// non-calls, and functions called on one usually assert on the other.
114 /// This class encapsulates the distinction to simplify other code that wants
115 /// "Memory affecting instructions and related data" to use as a key.
116 /// For example, this class is used as a densemap key in the use optimizer.
117 class MemoryLocOrCall {
118 public:
119  bool IsCall = false;
120 
121  MemoryLocOrCall() = default;
122  MemoryLocOrCall(MemoryUseOrDef *MUD)
123  : MemoryLocOrCall(MUD->getMemoryInst()) {}
124  MemoryLocOrCall(const MemoryUseOrDef *MUD)
125  : MemoryLocOrCall(MUD->getMemoryInst()) {}
126 
127  MemoryLocOrCall(Instruction *Inst) {
128  if (ImmutableCallSite(Inst)) {
129  IsCall = true;
130  CS = ImmutableCallSite(Inst);
131  } else {
132  IsCall = false;
133  // There is no such thing as a memorylocation for a fence inst, and it is
134  // unique in that regard.
135  if (!isa<FenceInst>(Inst))
136  Loc = MemoryLocation::get(Inst);
137  }
138  }
139 
140  explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
141 
142  ImmutableCallSite getCS() const {
143  assert(IsCall);
144  return CS;
145  }
146 
147  MemoryLocation getLoc() const {
148  assert(!IsCall);
149  return Loc;
150  }
151 
152  bool operator==(const MemoryLocOrCall &Other) const {
153  if (IsCall != Other.IsCall)
154  return false;
155 
156  if (IsCall)
157  return CS.getCalledValue() == Other.CS.getCalledValue();
158  return Loc == Other.Loc;
159  }
160 
161 private:
162  union {
164  MemoryLocation Loc;
165  };
166 };
167 
168 } // end anonymous namespace
169 
170 namespace llvm {
171 
172 template <> struct DenseMapInfo<MemoryLocOrCall> {
173  static inline MemoryLocOrCall getEmptyKey() {
174  return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
175  }
176 
177  static inline MemoryLocOrCall getTombstoneKey() {
178  return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
179  }
180 
181  static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
182  if (MLOC.IsCall)
183  return hash_combine(MLOC.IsCall,
185  MLOC.getCS().getCalledValue()));
186  return hash_combine(
187  MLOC.IsCall, DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
188  }
189 
190  static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
191  return LHS == RHS;
192  }
193 };
194 
195 } // end namespace llvm
196 
197 /// This does one-way checks to see if Use could theoretically be hoisted above
198 /// MayClobber. This will not check the other way around.
199 ///
200 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
201 /// MayClobber, with no potentially clobbering operations in between them.
202 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
203 static bool areLoadsReorderable(const LoadInst *Use,
204  const LoadInst *MayClobber) {
205  bool VolatileUse = Use->isVolatile();
206  bool VolatileClobber = MayClobber->isVolatile();
207  // Volatile operations may never be reordered with other volatile operations.
208  if (VolatileUse && VolatileClobber)
209  return false;
210  // Otherwise, volatile doesn't matter here. From the language reference:
211  // 'optimizers may change the order of volatile operations relative to
212  // non-volatile operations.'"
213 
214  // If a load is seq_cst, it cannot be moved above other loads. If its ordering
215  // is weaker, it can be moved above other loads. We just need to be sure that
216  // MayClobber isn't an acquire load, because loads can't be moved above
217  // acquire loads.
218  //
219  // Note that this explicitly *does* allow the free reordering of monotonic (or
220  // weaker) loads of the same address.
221  bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
222  bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
224  return !(SeqCstUse || MayClobberIsAcquire);
225 }
226 
228  const MemoryLocation &UseLoc,
229  const Instruction *UseInst,
230  AliasAnalysis &AA) {
231  Instruction *DefInst = MD->getMemoryInst();
232  assert(DefInst && "Defining instruction not actually an instruction");
233  ImmutableCallSite UseCS(UseInst);
234 
235  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
236  // These intrinsics will show up as affecting memory, but they are just
237  // markers.
238  switch (II->getIntrinsicID()) {
239  case Intrinsic::lifetime_start:
240  if (UseCS)
241  return false;
242  return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), UseLoc);
243  case Intrinsic::lifetime_end:
244  case Intrinsic::invariant_start:
245  case Intrinsic::invariant_end:
246  case Intrinsic::assume:
247  return false;
248  default:
249  break;
250  }
251  }
252 
253  if (UseCS) {
254  ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
255  return isModOrRefSet(I);
256  }
257 
258  if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
259  if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
260  return !areLoadsReorderable(UseLoad, DefLoad);
261 
262  return isModSet(AA.getModRefInfo(DefInst, UseLoc));
263 }
264 
266  const MemoryLocOrCall &UseMLOC,
267  AliasAnalysis &AA) {
268  // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
269  // to exist while MemoryLocOrCall is pushed through places.
270  if (UseMLOC.IsCall)
272  AA);
273  return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
274  AA);
275 }
276 
277 // Return true when MD may alias MU, return false otherwise.
279  AliasAnalysis &AA) {
280  return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
281 }
282 
283 namespace {
284 
285 struct UpwardsMemoryQuery {
286  // True if our original query started off as a call
287  bool IsCall = false;
288  // The pointer location we started the query with. This will be empty if
289  // IsCall is true.
290  MemoryLocation StartingLoc;
291  // This is the instruction we were querying about.
292  const Instruction *Inst = nullptr;
293  // The MemoryAccess we actually got called with, used to test local domination
294  const MemoryAccess *OriginalAccess = nullptr;
295 
296  UpwardsMemoryQuery() = default;
297 
298  UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
299  : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
300  if (!IsCall)
301  StartingLoc = MemoryLocation::get(Inst);
302  }
303 };
304 
305 } // end anonymous namespace
306 
307 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
308  AliasAnalysis &AA) {
309  Instruction *Inst = MD->getMemoryInst();
310  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
311  switch (II->getIntrinsicID()) {
312  case Intrinsic::lifetime_end:
313  return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
314  default:
315  return false;
316  }
317  }
318  return false;
319 }
320 
322  const Instruction *I) {
323  // If the memory can't be changed, then loads of the memory can't be
324  // clobbered.
325  //
326  // FIXME: We should handle invariant groups, as well. It's a bit harder,
327  // because we need to pay close attention to invariant group barriers.
328  return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
329  AA.pointsToConstantMemory(cast<LoadInst>(I)->
330  getPointerOperand()));
331 }
332 
333 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
334 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
335 ///
336 /// This is meant to be as simple and self-contained as possible. Because it
337 /// uses no cache, etc., it can be relatively expensive.
338 ///
339 /// \param Start The MemoryAccess that we want to walk from.
340 /// \param ClobberAt A clobber for Start.
341 /// \param StartLoc The MemoryLocation for Start.
342 /// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to.
343 /// \param Query The UpwardsMemoryQuery we used for our search.
344 /// \param AA The AliasAnalysis we used for our search.
345 static void LLVM_ATTRIBUTE_UNUSED
347  const MemoryLocation &StartLoc, const MemorySSA &MSSA,
348  const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
349  assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
350 
351  if (MSSA.isLiveOnEntryDef(Start)) {
352  assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
353  "liveOnEntry must clobber itself");
354  return;
355  }
356 
357  bool FoundClobber = false;
358  DenseSet<MemoryAccessPair> VisitedPhis;
360  Worklist.emplace_back(Start, StartLoc);
361  // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
362  // is found, complain.
363  while (!Worklist.empty()) {
364  MemoryAccessPair MAP = Worklist.pop_back_val();
365  // All we care about is that nothing from Start to ClobberAt clobbers Start.
366  // We learn nothing from revisiting nodes.
367  if (!VisitedPhis.insert(MAP).second)
368  continue;
369 
370  for (MemoryAccess *MA : def_chain(MAP.first)) {
371  if (MA == ClobberAt) {
372  if (auto *MD = dyn_cast<MemoryDef>(MA)) {
373  // instructionClobbersQuery isn't essentially free, so don't use `|=`,
374  // since it won't let us short-circuit.
375  //
376  // Also, note that this can't be hoisted out of the `Worklist` loop,
377  // since MD may only act as a clobber for 1 of N MemoryLocations.
378  FoundClobber =
379  FoundClobber || MSSA.isLiveOnEntryDef(MD) ||
380  instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
381  }
382  break;
383  }
384 
385  // We should never hit liveOnEntry, unless it's the clobber.
386  assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
387 
388  if (auto *MD = dyn_cast<MemoryDef>(MA)) {
389  (void)MD;
390  assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&
391  "Found clobber before reaching ClobberAt!");
392  continue;
393  }
394 
395  assert(isa<MemoryPhi>(MA));
396  Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
397  }
398  }
399 
400  // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
401  // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
402  assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
403  "ClobberAt never acted as a clobber");
404 }
405 
406 namespace {
407 
408 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
409 /// in one class.
410 class ClobberWalker {
411  /// Save a few bytes by using unsigned instead of size_t.
412  using ListIndex = unsigned;
413 
414  /// Represents a span of contiguous MemoryDefs, potentially ending in a
415  /// MemoryPhi.
416  struct DefPath {
417  MemoryLocation Loc;
418  // Note that, because we always walk in reverse, Last will always dominate
419  // First. Also note that First and Last are inclusive.
420  MemoryAccess *First;
421  MemoryAccess *Last;
422  Optional<ListIndex> Previous;
423 
424  DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
425  Optional<ListIndex> Previous)
426  : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
427 
428  DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
429  Optional<ListIndex> Previous)
430  : DefPath(Loc, Init, Init, Previous) {}
431  };
432 
433  const MemorySSA &MSSA;
434  AliasAnalysis &AA;
435  DominatorTree &DT;
436  UpwardsMemoryQuery *Query;
437 
438  // Phi optimization bookkeeping
441 
442  /// Find the nearest def or phi that `From` can legally be optimized to.
443  const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
444  assert(From->getNumOperands() && "Phi with no operands?");
445 
446  BasicBlock *BB = From->getBlock();
447  MemoryAccess *Result = MSSA.getLiveOnEntryDef();
448  DomTreeNode *Node = DT.getNode(BB);
449  while ((Node = Node->getIDom())) {
450  auto *Defs = MSSA.getBlockDefs(Node->getBlock());
451  if (Defs)
452  return &*Defs->rbegin();
453  }
454  return Result;
455  }
456 
457  /// Result of calling walkToPhiOrClobber.
458  struct UpwardsWalkResult {
459  /// The "Result" of the walk. Either a clobber, the last thing we walked, or
460  /// both.
461  MemoryAccess *Result;
462  bool IsKnownClobber;
463  };
464 
465  /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
466  /// This will update Desc.Last as it walks. It will (optionally) also stop at
467  /// StopAt.
468  ///
469  /// This does not test for whether StopAt is a clobber
470  UpwardsWalkResult
471  walkToPhiOrClobber(DefPath &Desc,
472  const MemoryAccess *StopAt = nullptr) const {
473  assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
474 
475  for (MemoryAccess *Current : def_chain(Desc.Last)) {
476  Desc.Last = Current;
477  if (Current == StopAt)
478  return {Current, false};
479 
480  if (auto *MD = dyn_cast<MemoryDef>(Current))
481  if (MSSA.isLiveOnEntryDef(MD) ||
482  instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA))
483  return {MD, true};
484  }
485 
486  assert(isa<MemoryPhi>(Desc.Last) &&
487  "Ended at a non-clobber that's not a phi?");
488  return {Desc.Last, false};
489  }
490 
491  void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
492  ListIndex PriorNode) {
493  auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
494  upward_defs_end());
495  for (const MemoryAccessPair &P : UpwardDefs) {
496  PausedSearches.push_back(Paths.size());
497  Paths.emplace_back(P.second, P.first, PriorNode);
498  }
499  }
500 
501  /// Represents a search that terminated after finding a clobber. This clobber
502  /// may or may not be present in the path of defs from LastNode..SearchStart,
503  /// since it may have been retrieved from cache.
504  struct TerminatedPath {
505  MemoryAccess *Clobber;
506  ListIndex LastNode;
507  };
508 
509  /// Get an access that keeps us from optimizing to the given phi.
510  ///
511  /// PausedSearches is an array of indices into the Paths array. Its incoming
512  /// value is the indices of searches that stopped at the last phi optimization
513  /// target. It's left in an unspecified state.
514  ///
515  /// If this returns None, NewPaused is a vector of searches that terminated
516  /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
518  getBlockingAccess(const MemoryAccess *StopWhere,
519  SmallVectorImpl<ListIndex> &PausedSearches,
520  SmallVectorImpl<ListIndex> &NewPaused,
521  SmallVectorImpl<TerminatedPath> &Terminated) {
522  assert(!PausedSearches.empty() && "No searches to continue?");
523 
524  // BFS vs DFS really doesn't make a difference here, so just do a DFS with
525  // PausedSearches as our stack.
526  while (!PausedSearches.empty()) {
527  ListIndex PathIndex = PausedSearches.pop_back_val();
528  DefPath &Node = Paths[PathIndex];
529 
530  // If we've already visited this path with this MemoryLocation, we don't
531  // need to do so again.
532  //
533  // NOTE: That we just drop these paths on the ground makes caching
534  // behavior sporadic. e.g. given a diamond:
535  // A
536  // B C
537  // D
538  //
539  // ...If we walk D, B, A, C, we'll only cache the result of phi
540  // optimization for A, B, and D; C will be skipped because it dies here.
541  // This arguably isn't the worst thing ever, since:
542  // - We generally query things in a top-down order, so if we got below D
543  // without needing cache entries for {C, MemLoc}, then chances are
544  // that those cache entries would end up ultimately unused.
545  // - We still cache things for A, so C only needs to walk up a bit.
546  // If this behavior becomes problematic, we can fix without a ton of extra
547  // work.
548  if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
549  continue;
550 
551  UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
552  if (Res.IsKnownClobber) {
553  assert(Res.Result != StopWhere);
554  // If this wasn't a cache hit, we hit a clobber when walking. That's a
555  // failure.
556  TerminatedPath Term{Res.Result, PathIndex};
557  if (!MSSA.dominates(Res.Result, StopWhere))
558  return Term;
559 
560  // Otherwise, it's a valid thing to potentially optimize to.
561  Terminated.push_back(Term);
562  continue;
563  }
564 
565  if (Res.Result == StopWhere) {
566  // We've hit our target. Save this path off for if we want to continue
567  // walking.
568  NewPaused.push_back(PathIndex);
569  continue;
570  }
571 
572  assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
573  addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
574  }
575 
576  return None;
577  }
578 
579  template <typename T, typename Walker>
580  struct generic_def_path_iterator
581  : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
582  std::forward_iterator_tag, T *> {
583  generic_def_path_iterator() = default;
584  generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
585 
586  T &operator*() const { return curNode(); }
587 
588  generic_def_path_iterator &operator++() {
589  N = curNode().Previous;
590  return *this;
591  }
592 
593  bool operator==(const generic_def_path_iterator &O) const {
594  if (N.hasValue() != O.N.hasValue())
595  return false;
596  return !N.hasValue() || *N == *O.N;
597  }
598 
599  private:
600  T &curNode() const { return W->Paths[*N]; }
601 
602  Walker *W = nullptr;
604  };
605 
606  using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
607  using const_def_path_iterator =
608  generic_def_path_iterator<const DefPath, const ClobberWalker>;
609 
610  iterator_range<def_path_iterator> def_path(ListIndex From) {
611  return make_range(def_path_iterator(this, From), def_path_iterator());
612  }
613 
614  iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
615  return make_range(const_def_path_iterator(this, From),
616  const_def_path_iterator());
617  }
618 
619  struct OptznResult {
620  /// The path that contains our result.
621  TerminatedPath PrimaryClobber;
622  /// The paths that we can legally cache back from, but that aren't
623  /// necessarily the result of the Phi optimization.
624  SmallVector<TerminatedPath, 4> OtherClobbers;
625  };
626 
627  ListIndex defPathIndex(const DefPath &N) const {
628  // The assert looks nicer if we don't need to do &N
629  const DefPath *NP = &N;
630  assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
631  "Out of bounds DefPath!");
632  return NP - &Paths.front();
633  }
634 
635  /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
636  /// that act as legal clobbers. Note that this won't return *all* clobbers.
637  ///
638  /// Phi optimization algorithm tl;dr:
639  /// - Find the earliest def/phi, A, we can optimize to
640  /// - Find if all paths from the starting memory access ultimately reach A
641  /// - If not, optimization isn't possible.
642  /// - Otherwise, walk from A to another clobber or phi, A'.
643  /// - If A' is a def, we're done.
644  /// - If A' is a phi, try to optimize it.
645  ///
646  /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
647  /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
648  OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
649  const MemoryLocation &Loc) {
650  assert(Paths.empty() && VisitedPhis.empty() &&
651  "Reset the optimization state.");
652 
653  Paths.emplace_back(Loc, Start, Phi, None);
654  // Stores how many "valid" optimization nodes we had prior to calling
655  // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
656  auto PriorPathsSize = Paths.size();
657 
658  SmallVector<ListIndex, 16> PausedSearches;
659  SmallVector<ListIndex, 8> NewPaused;
660  SmallVector<TerminatedPath, 4> TerminatedPaths;
661 
662  addSearches(Phi, PausedSearches, 0);
663 
664  // Moves the TerminatedPath with the "most dominated" Clobber to the end of
665  // Paths.
666  auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
667  assert(!Paths.empty() && "Need a path to move");
668  auto Dom = Paths.begin();
669  for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
670  if (!MSSA.dominates(I->Clobber, Dom->Clobber))
671  Dom = I;
672  auto Last = Paths.end() - 1;
673  if (Last != Dom)
674  std::iter_swap(Last, Dom);
675  };
676 
677  MemoryPhi *Current = Phi;
678  while (true) {
679  assert(!MSSA.isLiveOnEntryDef(Current) &&
680  "liveOnEntry wasn't treated as a clobber?");
681 
682  const auto *Target = getWalkTarget(Current);
683  // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
684  // optimization for the prior phi.
685  assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
686  return MSSA.dominates(P.Clobber, Target);
687  }));
688 
689  // FIXME: This is broken, because the Blocker may be reported to be
690  // liveOnEntry, and we'll happily wait for that to disappear (read: never)
691  // For the moment, this is fine, since we do nothing with blocker info.
692  if (Optional<TerminatedPath> Blocker = getBlockingAccess(
693  Target, PausedSearches, NewPaused, TerminatedPaths)) {
694 
695  // Find the node we started at. We can't search based on N->Last, since
696  // we may have gone around a loop with a different MemoryLocation.
697  auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
698  return defPathIndex(N) < PriorPathsSize;
699  });
700  assert(Iter != def_path_iterator());
701 
702  DefPath &CurNode = *Iter;
703  assert(CurNode.Last == Current);
704 
705  // Two things:
706  // A. We can't reliably cache all of NewPaused back. Consider a case
707  // where we have two paths in NewPaused; one of which can't optimize
708  // above this phi, whereas the other can. If we cache the second path
709  // back, we'll end up with suboptimal cache entries. We can handle
710  // cases like this a bit better when we either try to find all
711  // clobbers that block phi optimization, or when our cache starts
712  // supporting unfinished searches.
713  // B. We can't reliably cache TerminatedPaths back here without doing
714  // extra checks; consider a case like:
715  // T
716  // / \
717  // D C
718  // \ /
719  // S
720  // Where T is our target, C is a node with a clobber on it, D is a
721  // diamond (with a clobber *only* on the left or right node, N), and
722  // S is our start. Say we walk to D, through the node opposite N
723  // (read: ignoring the clobber), and see a cache entry in the top
724  // node of D. That cache entry gets put into TerminatedPaths. We then
725  // walk up to C (N is later in our worklist), find the clobber, and
726  // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
727  // the bottom part of D to the cached clobber, ignoring the clobber
728  // in N. Again, this problem goes away if we start tracking all
729  // blockers for a given phi optimization.
730  TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
731  return {Result, {}};
732  }
733 
734  // If there's nothing left to search, then all paths led to valid clobbers
735  // that we got from our cache; pick the nearest to the start, and allow
736  // the rest to be cached back.
737  if (NewPaused.empty()) {
738  MoveDominatedPathToEnd(TerminatedPaths);
739  TerminatedPath Result = TerminatedPaths.pop_back_val();
740  return {Result, std::move(TerminatedPaths)};
741  }
742 
743  MemoryAccess *DefChainEnd = nullptr;
745  for (ListIndex Paused : NewPaused) {
746  UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
747  if (WR.IsKnownClobber)
748  Clobbers.push_back({WR.Result, Paused});
749  else
750  // Micro-opt: If we hit the end of the chain, save it.
751  DefChainEnd = WR.Result;
752  }
753 
754  if (!TerminatedPaths.empty()) {
755  // If we couldn't find the dominating phi/liveOnEntry in the above loop,
756  // do it now.
757  if (!DefChainEnd)
758  for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
759  DefChainEnd = MA;
760 
761  // If any of the terminated paths don't dominate the phi we'll try to
762  // optimize, we need to figure out what they are and quit.
763  const BasicBlock *ChainBB = DefChainEnd->getBlock();
764  for (const TerminatedPath &TP : TerminatedPaths) {
765  // Because we know that DefChainEnd is as "high" as we can go, we
766  // don't need local dominance checks; BB dominance is sufficient.
767  if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
768  Clobbers.push_back(TP);
769  }
770  }
771 
772  // If we have clobbers in the def chain, find the one closest to Current
773  // and quit.
774  if (!Clobbers.empty()) {
775  MoveDominatedPathToEnd(Clobbers);
776  TerminatedPath Result = Clobbers.pop_back_val();
777  return {Result, std::move(Clobbers)};
778  }
779 
780  assert(all_of(NewPaused,
781  [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
782 
783  // Because liveOnEntry is a clobber, this must be a phi.
784  auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
785 
786  PriorPathsSize = Paths.size();
787  PausedSearches.clear();
788  for (ListIndex I : NewPaused)
789  addSearches(DefChainPhi, PausedSearches, I);
790  NewPaused.clear();
791 
792  Current = DefChainPhi;
793  }
794  }
795 
796  void verifyOptResult(const OptznResult &R) const {
797  assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
798  return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
799  }));
800  }
801 
802  void resetPhiOptznState() {
803  Paths.clear();
804  VisitedPhis.clear();
805  }
806 
807 public:
808  ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
809  : MSSA(MSSA), AA(AA), DT(DT) {}
810 
811  void reset() {}
812 
813  /// Finds the nearest clobber for the given query, optimizing phis if
814  /// possible.
815  MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
816  Query = &Q;
817 
818  MemoryAccess *Current = Start;
819  // This walker pretends uses don't exist. If we're handed one, silently grab
820  // its def. (This has the nice side-effect of ensuring we never cache uses)
821  if (auto *MU = dyn_cast<MemoryUse>(Start))
822  Current = MU->getDefiningAccess();
823 
824  DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
825  // Fast path for the overly-common case (no crazy phi optimization
826  // necessary)
827  UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
828  MemoryAccess *Result;
829  if (WalkResult.IsKnownClobber) {
830  Result = WalkResult.Result;
831  } else {
832  OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
833  Current, Q.StartingLoc);
834  verifyOptResult(OptRes);
835  resetPhiOptznState();
836  Result = OptRes.PrimaryClobber.Clobber;
837  }
838 
839 #ifdef EXPENSIVE_CHECKS
840  checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
841 #endif
842  return Result;
843  }
844 
845  void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); }
846 };
847 
848 struct RenamePassData {
849  DomTreeNode *DTN;
851  MemoryAccess *IncomingVal;
852 
853  RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
854  MemoryAccess *M)
855  : DTN(D), ChildIt(It), IncomingVal(M) {}
856 
857  void swap(RenamePassData &RHS) {
858  std::swap(DTN, RHS.DTN);
859  std::swap(ChildIt, RHS.ChildIt);
860  std::swap(IncomingVal, RHS.IncomingVal);
861  }
862 };
863 
864 } // end anonymous namespace
865 
866 namespace llvm {
867 
868 /// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no
869 /// longer does caching on its own,
870 /// but the name has been retained for the moment.
872  ClobberWalker Walker;
873  bool AutoResetWalker = true;
874 
875  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
876 
877 public:
879  ~CachingWalker() override = default;
880 
882 
883  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
884  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
885  const MemoryLocation &) override;
886  void invalidateInfo(MemoryAccess *) override;
887 
888  /// Whether we call resetClobberWalker() after each time we *actually* walk to
889  /// answer a clobber query.
890  void setAutoResetWalker(bool AutoReset) { AutoResetWalker = AutoReset; }
891 
892  /// Drop the walker's persistent data structures.
893  void resetClobberWalker() { Walker.reset(); }
894 
895  void verify(const MemorySSA *MSSA) override {
897  Walker.verify(MSSA);
898  }
899 };
900 
901 } // end namespace llvm
902 
903 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
904  bool RenameAllUses) {
905  // Pass through values to our successors
906  for (const BasicBlock *S : successors(BB)) {
907  auto It = PerBlockAccesses.find(S);
908  // Rename the phi nodes in our successor block
909  if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
910  continue;
911  AccessList *Accesses = It->second.get();
912  auto *Phi = cast<MemoryPhi>(&Accesses->front());
913  if (RenameAllUses) {
914  int PhiIndex = Phi->getBasicBlockIndex(BB);
915  assert(PhiIndex != -1 && "Incomplete phi during partial rename");
916  Phi->setIncomingValue(PhiIndex, IncomingVal);
917  } else
918  Phi->addIncoming(IncomingVal, BB);
919  }
920 }
921 
922 /// \brief Rename a single basic block into MemorySSA form.
923 /// Uses the standard SSA renaming algorithm.
924 /// \returns The new incoming value.
925 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
926  bool RenameAllUses) {
927  auto It = PerBlockAccesses.find(BB);
928  // Skip most processing if the list is empty.
929  if (It != PerBlockAccesses.end()) {
930  AccessList *Accesses = It->second.get();
931  for (MemoryAccess &L : *Accesses) {
932  if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
933  if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
934  MUD->setDefiningAccess(IncomingVal);
935  if (isa<MemoryDef>(&L))
936  IncomingVal = &L;
937  } else {
938  IncomingVal = &L;
939  }
940  }
941  }
942  return IncomingVal;
943 }
944 
945 /// \brief This is the standard SSA renaming algorithm.
946 ///
947 /// We walk the dominator tree in preorder, renaming accesses, and then filling
948 /// in phi nodes in our successors.
949 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
951  bool SkipVisited, bool RenameAllUses) {
953  // Skip everything if we already renamed this block and we are skipping.
954  // Note: You can't sink this into the if, because we need it to occur
955  // regardless of whether we skip blocks or not.
956  bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
957  if (SkipVisited && AlreadyVisited)
958  return;
959 
960  IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
961  renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
962  WorkStack.push_back({Root, Root->begin(), IncomingVal});
963 
964  while (!WorkStack.empty()) {
965  DomTreeNode *Node = WorkStack.back().DTN;
966  DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
967  IncomingVal = WorkStack.back().IncomingVal;
968 
969  if (ChildIt == Node->end()) {
970  WorkStack.pop_back();
971  } else {
972  DomTreeNode *Child = *ChildIt;
973  ++WorkStack.back().ChildIt;
974  BasicBlock *BB = Child->getBlock();
975  // Note: You can't sink this into the if, because we need it to occur
976  // regardless of whether we skip blocks or not.
977  AlreadyVisited = !Visited.insert(BB).second;
978  if (SkipVisited && AlreadyVisited) {
979  // We already visited this during our renaming, which can happen when
980  // being asked to rename multiple blocks. Figure out the incoming val,
981  // which is the last def.
982  // Incoming value can only change if there is a block def, and in that
983  // case, it's the last block def in the list.
984  if (auto *BlockDefs = getWritableBlockDefs(BB))
985  IncomingVal = &*BlockDefs->rbegin();
986  } else
987  IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
988  renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
989  WorkStack.push_back({Child, Child->begin(), IncomingVal});
990  }
991  }
992 }
993 
994 /// \brief This handles unreachable block accesses by deleting phi nodes in
995 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
996 /// being uses of the live on entry definition.
997 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
998  assert(!DT->isReachableFromEntry(BB) &&
999  "Reachable block found while handling unreachable blocks");
1000 
1001  // Make sure phi nodes in our reachable successors end up with a
1002  // LiveOnEntryDef for our incoming edge, even though our block is forward
1003  // unreachable. We could just disconnect these blocks from the CFG fully,
1004  // but we do not right now.
1005  for (const BasicBlock *S : successors(BB)) {
1006  if (!DT->isReachableFromEntry(S))
1007  continue;
1008  auto It = PerBlockAccesses.find(S);
1009  // Rename the phi nodes in our successor block
1010  if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1011  continue;
1012  AccessList *Accesses = It->second.get();
1013  auto *Phi = cast<MemoryPhi>(&Accesses->front());
1014  Phi->addIncoming(LiveOnEntryDef.get(), BB);
1015  }
1016 
1017  auto It = PerBlockAccesses.find(BB);
1018  if (It == PerBlockAccesses.end())
1019  return;
1020 
1021  auto &Accesses = It->second;
1022  for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1023  auto Next = std::next(AI);
1024  // If we have a phi, just remove it. We are going to replace all
1025  // users with live on entry.
1026  if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1027  UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1028  else
1029  Accesses->erase(AI);
1030  AI = Next;
1031  }
1032 }
1033 
1035  : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1036  NextID(0) {
1037  buildMemorySSA();
1038 }
1039 
1041  // Drop all our references
1042  for (const auto &Pair : PerBlockAccesses)
1043  for (MemoryAccess &MA : *Pair.second)
1044  MA.dropAllReferences();
1045 }
1046 
1047 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1048  auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1049 
1050  if (Res.second)
1051  Res.first->second = llvm::make_unique<AccessList>();
1052  return Res.first->second.get();
1053 }
1054 
1055 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1056  auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1057 
1058  if (Res.second)
1059  Res.first->second = llvm::make_unique<DefsList>();
1060  return Res.first->second.get();
1061 }
1062 
1063 namespace llvm {
1064 
1065 /// This class is a batch walker of all MemoryUse's in the program, and points
1066 /// their defining access at the thing that actually clobbers them. Because it
1067 /// is a batch walker that touches everything, it does not operate like the
1068 /// other walkers. This walker is basically performing a top-down SSA renaming
1069 /// pass, where the version stack is used as the cache. This enables it to be
1070 /// significantly more time and memory efficient than using the regular walker,
1071 /// which is walking bottom-up.
1073 public:
1075  DominatorTree *DT)
1076  : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1077  Walker = MSSA->getWalker();
1078  }
1079 
1080  void optimizeUses();
1081 
1082 private:
1083  /// This represents where a given memorylocation is in the stack.
1084  struct MemlocStackInfo {
1085  // This essentially is keeping track of versions of the stack. Whenever
1086  // the stack changes due to pushes or pops, these versions increase.
1087  unsigned long StackEpoch;
1088  unsigned long PopEpoch;
1089  // This is the lower bound of places on the stack to check. It is equal to
1090  // the place the last stack walk ended.
1091  // Note: Correctness depends on this being initialized to 0, which densemap
1092  // does
1093  unsigned long LowerBound;
1094  const BasicBlock *LowerBoundBlock;
1095  // This is where the last walk for this memory location ended.
1096  unsigned long LastKill;
1097  bool LastKillValid;
1098  };
1099 
1100  void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1103 
1104  MemorySSA *MSSA;
1105  MemorySSAWalker *Walker;
1106  AliasAnalysis *AA;
1107  DominatorTree *DT;
1108 };
1109 
1110 } // end namespace llvm
1111 
1112 /// Optimize the uses in a given block This is basically the SSA renaming
1113 /// algorithm, with one caveat: We are able to use a single stack for all
1114 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1115 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1116 /// going to be some position in that stack of possible ones.
1117 ///
1118 /// We track the stack positions that each MemoryLocation needs
1119 /// to check, and last ended at. This is because we only want to check the
1120 /// things that changed since last time. The same MemoryLocation should
1121 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1122 /// things like this, and if they start, we can modify MemoryLocOrCall to
1123 /// include relevant data)
1124 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1125  const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1126  SmallVectorImpl<MemoryAccess *> &VersionStack,
1128 
1129  /// If no accesses, nothing to do.
1130  MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1131  if (Accesses == nullptr)
1132  return;
1133 
1134  // Pop everything that doesn't dominate the current block off the stack,
1135  // increment the PopEpoch to account for this.
1136  while (true) {
1137  assert(
1138  !VersionStack.empty() &&
1139  "Version stack should have liveOnEntry sentinel dominating everything");
1140  BasicBlock *BackBlock = VersionStack.back()->getBlock();
1141  if (DT->dominates(BackBlock, BB))
1142  break;
1143  while (VersionStack.back()->getBlock() == BackBlock)
1144  VersionStack.pop_back();
1145  ++PopEpoch;
1146  }
1147 
1148  for (MemoryAccess &MA : *Accesses) {
1149  auto *MU = dyn_cast<MemoryUse>(&MA);
1150  if (!MU) {
1151  VersionStack.push_back(&MA);
1152  ++StackEpoch;
1153  continue;
1154  }
1155 
1156  if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1157  MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true);
1158  continue;
1159  }
1160 
1161  MemoryLocOrCall UseMLOC(MU);
1162  auto &LocInfo = LocStackInfo[UseMLOC];
1163  // If the pop epoch changed, it means we've removed stuff from top of
1164  // stack due to changing blocks. We may have to reset the lower bound or
1165  // last kill info.
1166  if (LocInfo.PopEpoch != PopEpoch) {
1167  LocInfo.PopEpoch = PopEpoch;
1168  LocInfo.StackEpoch = StackEpoch;
1169  // If the lower bound was in something that no longer dominates us, we
1170  // have to reset it.
1171  // We can't simply track stack size, because the stack may have had
1172  // pushes/pops in the meantime.
1173  // XXX: This is non-optimal, but only is slower cases with heavily
1174  // branching dominator trees. To get the optimal number of queries would
1175  // be to make lowerbound and lastkill a per-loc stack, and pop it until
1176  // the top of that stack dominates us. This does not seem worth it ATM.
1177  // A much cheaper optimization would be to always explore the deepest
1178  // branch of the dominator tree first. This will guarantee this resets on
1179  // the smallest set of blocks.
1180  if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1181  !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1182  // Reset the lower bound of things to check.
1183  // TODO: Some day we should be able to reset to last kill, rather than
1184  // 0.
1185  LocInfo.LowerBound = 0;
1186  LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1187  LocInfo.LastKillValid = false;
1188  }
1189  } else if (LocInfo.StackEpoch != StackEpoch) {
1190  // If all that has changed is the StackEpoch, we only have to check the
1191  // new things on the stack, because we've checked everything before. In
1192  // this case, the lower bound of things to check remains the same.
1193  LocInfo.PopEpoch = PopEpoch;
1194  LocInfo.StackEpoch = StackEpoch;
1195  }
1196  if (!LocInfo.LastKillValid) {
1197  LocInfo.LastKill = VersionStack.size() - 1;
1198  LocInfo.LastKillValid = true;
1199  }
1200 
1201  // At this point, we should have corrected last kill and LowerBound to be
1202  // in bounds.
1203  assert(LocInfo.LowerBound < VersionStack.size() &&
1204  "Lower bound out of range");
1205  assert(LocInfo.LastKill < VersionStack.size() &&
1206  "Last kill info out of range");
1207  // In any case, the new upper bound is the top of the stack.
1208  unsigned long UpperBound = VersionStack.size() - 1;
1209 
1210  if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1211  DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1212  << *(MU->getMemoryInst()) << ")"
1213  << " because there are " << UpperBound - LocInfo.LowerBound
1214  << " stores to disambiguate\n");
1215  // Because we did not walk, LastKill is no longer valid, as this may
1216  // have been a kill.
1217  LocInfo.LastKillValid = false;
1218  continue;
1219  }
1220  bool FoundClobberResult = false;
1221  while (UpperBound > LocInfo.LowerBound) {
1222  if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1223  // For phis, use the walker, see where we ended up, go there
1224  Instruction *UseInst = MU->getMemoryInst();
1225  MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1226  // We are guaranteed to find it or something is wrong
1227  while (VersionStack[UpperBound] != Result) {
1228  assert(UpperBound != 0);
1229  --UpperBound;
1230  }
1231  FoundClobberResult = true;
1232  break;
1233  }
1234 
1235  MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1236  // If the lifetime of the pointer ends at this instruction, it's live on
1237  // entry.
1238  if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1239  // Reset UpperBound to liveOnEntryDef's place in the stack
1240  UpperBound = 0;
1241  FoundClobberResult = true;
1242  break;
1243  }
1244  if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) {
1245  FoundClobberResult = true;
1246  break;
1247  }
1248  --UpperBound;
1249  }
1250  // At the end of this loop, UpperBound is either a clobber, or lower bound
1251  // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1252  if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1253  MU->setDefiningAccess(VersionStack[UpperBound], true);
1254  // We were last killed now by where we got to
1255  LocInfo.LastKill = UpperBound;
1256  } else {
1257  // Otherwise, we checked all the new ones, and now we know we can get to
1258  // LastKill.
1259  MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);
1260  }
1261  LocInfo.LowerBound = VersionStack.size() - 1;
1262  LocInfo.LowerBoundBlock = BB;
1263  }
1264 }
1265 
1266 /// Optimize uses to point to their actual clobbering definitions.
1268  SmallVector<MemoryAccess *, 16> VersionStack;
1270  VersionStack.push_back(MSSA->getLiveOnEntryDef());
1271 
1272  unsigned long StackEpoch = 1;
1273  unsigned long PopEpoch = 1;
1274  // We perform a non-recursive top-down dominator tree walk.
1275  for (const auto *DomNode : depth_first(DT->getRootNode()))
1276  optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1277  LocStackInfo);
1278 }
1279 
1280 void MemorySSA::placePHINodes(
1281  const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks,
1282  const DenseMap<const BasicBlock *, unsigned int> &BBNumbers) {
1283  // Determine where our MemoryPhi's should go
1284  ForwardIDFCalculator IDFs(*DT);
1285  IDFs.setDefiningBlocks(DefiningBlocks);
1287  IDFs.calculate(IDFBlocks);
1288 
1289  std::sort(IDFBlocks.begin(), IDFBlocks.end(),
1290  [&BBNumbers](const BasicBlock *A, const BasicBlock *B) {
1291  return BBNumbers.lookup(A) < BBNumbers.lookup(B);
1292  });
1293 
1294  // Now place MemoryPhi nodes.
1295  for (auto &BB : IDFBlocks)
1296  createMemoryPhi(BB);
1297 }
1298 
1299 void MemorySSA::buildMemorySSA() {
1300  // We create an access to represent "live on entry", for things like
1301  // arguments or users of globals, where the memory they use is defined before
1302  // the beginning of the function. We do not actually insert it into the IR.
1303  // We do not define a live on exit for the immediate uses, and thus our
1304  // semantics do *not* imply that something with no immediate uses can simply
1305  // be removed.
1306  BasicBlock &StartingPoint = F.getEntryBlock();
1307  LiveOnEntryDef =
1308  llvm::make_unique<MemoryDef>(F.getContext(), nullptr, nullptr,
1309  &StartingPoint, NextID++);
1311  unsigned NextBBNum = 0;
1312 
1313  // We maintain lists of memory accesses per-block, trading memory for time. We
1314  // could just look up the memory access for every possible instruction in the
1315  // stream.
1316  SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1317  // Go through each block, figure out where defs occur, and chain together all
1318  // the accesses.
1319  for (BasicBlock &B : F) {
1320  BBNumbers[&B] = NextBBNum++;
1321  bool InsertIntoDef = false;
1322  AccessList *Accesses = nullptr;
1323  DefsList *Defs = nullptr;
1324  for (Instruction &I : B) {
1325  MemoryUseOrDef *MUD = createNewAccess(&I);
1326  if (!MUD)
1327  continue;
1328 
1329  if (!Accesses)
1330  Accesses = getOrCreateAccessList(&B);
1331  Accesses->push_back(MUD);
1332  if (isa<MemoryDef>(MUD)) {
1333  InsertIntoDef = true;
1334  if (!Defs)
1335  Defs = getOrCreateDefsList(&B);
1336  Defs->push_back(*MUD);
1337  }
1338  }
1339  if (InsertIntoDef)
1340  DefiningBlocks.insert(&B);
1341  }
1342  placePHINodes(DefiningBlocks, BBNumbers);
1343 
1344  // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1345  // filled in with all blocks.
1347  renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1348 
1349  CachingWalker *Walker = getWalkerImpl();
1350 
1351  // We're doing a batch of updates; don't drop useful caches between them.
1352  Walker->setAutoResetWalker(false);
1353  OptimizeUses(this, Walker, AA, DT).optimizeUses();
1354  Walker->setAutoResetWalker(true);
1355  Walker->resetClobberWalker();
1356 
1357  // Mark the uses in unreachable blocks as live on entry, so that they go
1358  // somewhere.
1359  for (auto &BB : F)
1360  if (!Visited.count(&BB))
1361  markUnreachableAsLiveOnEntry(&BB);
1362 }
1363 
1364 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1365 
1366 MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1367  if (Walker)
1368  return Walker.get();
1369 
1370  Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
1371  return Walker.get();
1372 }
1373 
1374 // This is a helper function used by the creation routines. It places NewAccess
1375 // into the access and defs lists for a given basic block, at the given
1376 // insertion point.
1378  const BasicBlock *BB,
1379  InsertionPlace Point) {
1380  auto *Accesses = getOrCreateAccessList(BB);
1381  if (Point == Beginning) {
1382  // If it's a phi node, it goes first, otherwise, it goes after any phi
1383  // nodes.
1384  if (isa<MemoryPhi>(NewAccess)) {
1385  Accesses->push_front(NewAccess);
1386  auto *Defs = getOrCreateDefsList(BB);
1387  Defs->push_front(*NewAccess);
1388  } else {
1389  auto AI = find_if_not(
1390  *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1391  Accesses->insert(AI, NewAccess);
1392  if (!isa<MemoryUse>(NewAccess)) {
1393  auto *Defs = getOrCreateDefsList(BB);
1394  auto DI = find_if_not(
1395  *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1396  Defs->insert(DI, *NewAccess);
1397  }
1398  }
1399  } else {
1400  Accesses->push_back(NewAccess);
1401  if (!isa<MemoryUse>(NewAccess)) {
1402  auto *Defs = getOrCreateDefsList(BB);
1403  Defs->push_back(*NewAccess);
1404  }
1405  }
1406  BlockNumberingValid.erase(BB);
1407 }
1408 
1410  AccessList::iterator InsertPt) {
1411  auto *Accesses = getWritableBlockAccesses(BB);
1412  bool WasEnd = InsertPt == Accesses->end();
1413  Accesses->insert(AccessList::iterator(InsertPt), What);
1414  if (!isa<MemoryUse>(What)) {
1415  auto *Defs = getOrCreateDefsList(BB);
1416  // If we got asked to insert at the end, we have an easy job, just shove it
1417  // at the end. If we got asked to insert before an existing def, we also get
1418  // an terator. If we got asked to insert before a use, we have to hunt for
1419  // the next def.
1420  if (WasEnd) {
1421  Defs->push_back(*What);
1422  } else if (isa<MemoryDef>(InsertPt)) {
1423  Defs->insert(InsertPt->getDefsIterator(), *What);
1424  } else {
1425  while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1426  ++InsertPt;
1427  // Either we found a def, or we are inserting at the end
1428  if (InsertPt == Accesses->end())
1429  Defs->push_back(*What);
1430  else
1431  Defs->insert(InsertPt->getDefsIterator(), *What);
1432  }
1433  }
1434  BlockNumberingValid.erase(BB);
1435 }
1436 
1437 // Move What before Where in the IR. The end result is taht What will belong to
1438 // the right lists and have the right Block set, but will not otherwise be
1439 // correct. It will not have the right defining access, and if it is a def,
1440 // things below it will not properly be updated.
1442  AccessList::iterator Where) {
1443  // Keep it in the lookup tables, remove from the lists
1444  removeFromLists(What, false);
1445  What->setBlock(BB);
1446  insertIntoListsBefore(What, BB, Where);
1447 }
1448 
1450  InsertionPlace Point) {
1451  removeFromLists(What, false);
1452  What->setBlock(BB);
1453  insertIntoListsForBlock(What, BB, Point);
1454 }
1455 
1456 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1457  assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1458  MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1459  // Phi's always are placed at the front of the block.
1461  ValueToMemoryAccess[BB] = Phi;
1462  return Phi;
1463 }
1464 
1466  MemoryAccess *Definition) {
1467  assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1468  MemoryUseOrDef *NewAccess = createNewAccess(I);
1469  assert(
1470  NewAccess != nullptr &&
1471  "Tried to create a memory access for a non-memory touching instruction");
1472  NewAccess->setDefiningAccess(Definition);
1473  return NewAccess;
1474 }
1475 
1476 // Return true if the instruction has ordering constraints.
1477 // Note specifically that this only considers stores and loads
1478 // because others are still considered ModRef by getModRefInfo.
1479 static inline bool isOrdered(const Instruction *I) {
1480  if (auto *SI = dyn_cast<StoreInst>(I)) {
1481  if (!SI->isUnordered())
1482  return true;
1483  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1484  if (!LI->isUnordered())
1485  return true;
1486  }
1487  return false;
1488 }
1489 
1490 /// \brief Helper function to create new memory accesses
1491 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
1492  // The assume intrinsic has a control dependency which we model by claiming
1493  // that it writes arbitrarily. Ignore that fake memory dependency here.
1494  // FIXME: Replace this special casing with a more accurate modelling of
1495  // assume's control dependency.
1496  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1497  if (II->getIntrinsicID() == Intrinsic::assume)
1498  return nullptr;
1499 
1500  // Find out what affect this instruction has on memory.
1501  ModRefInfo ModRef = AA->getModRefInfo(I, None);
1502  // The isOrdered check is used to ensure that volatiles end up as defs
1503  // (atomics end up as ModRef right now anyway). Until we separate the
1504  // ordering chain from the memory chain, this enables people to see at least
1505  // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1506  // will still give an answer that bypasses other volatile loads. TODO:
1507  // Separate memory aliasing and ordering into two different chains so that we
1508  // can precisely represent both "what memory will this read/write/is clobbered
1509  // by" and "what instructions can I move this past".
1510  bool Def = isModSet(ModRef) || isOrdered(I);
1511  bool Use = isRefSet(ModRef);
1512 
1513  // It's possible for an instruction to not modify memory at all. During
1514  // construction, we ignore them.
1515  if (!Def && !Use)
1516  return nullptr;
1517 
1518  assert((Def || Use) &&
1519  "Trying to create a memory access with a non-memory instruction");
1520 
1521  MemoryUseOrDef *MUD;
1522  if (Def)
1523  MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1524  else
1525  MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1526  ValueToMemoryAccess[I] = MUD;
1527  return MUD;
1528 }
1529 
1530 /// \brief Returns true if \p Replacer dominates \p Replacee .
1531 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1532  const MemoryAccess *Replacee) const {
1533  if (isa<MemoryUseOrDef>(Replacee))
1534  return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1535  const auto *MP = cast<MemoryPhi>(Replacee);
1536  // For a phi node, the use occurs in the predecessor block of the phi node.
1537  // Since we may occur multiple times in the phi node, we have to check each
1538  // operand to ensure Replacer dominates each operand where Replacee occurs.
1539  for (const Use &Arg : MP->operands()) {
1540  if (Arg.get() != Replacee &&
1541  !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1542  return false;
1543  }
1544  return true;
1545 }
1546 
1547 /// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
1549  assert(MA->use_empty() &&
1550  "Trying to remove memory access that still has uses");
1551  BlockNumbering.erase(MA);
1552  if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA))
1553  MUD->setDefiningAccess(nullptr);
1554  // Invalidate our walker's cache if necessary
1555  if (!isa<MemoryUse>(MA))
1556  Walker->invalidateInfo(MA);
1557  // The call below to erase will destroy MA, so we can't change the order we
1558  // are doing things here
1559  Value *MemoryInst;
1560  if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1561  MemoryInst = MUD->getMemoryInst();
1562  } else {
1563  MemoryInst = MA->getBlock();
1564  }
1565  auto VMA = ValueToMemoryAccess.find(MemoryInst);
1566  if (VMA->second == MA)
1567  ValueToMemoryAccess.erase(VMA);
1568 }
1569 
1570 /// \brief Properly remove \p MA from all of MemorySSA's lists.
1571 ///
1572 /// Because of the way the intrusive list and use lists work, it is important to
1573 /// do removal in the right order.
1574 /// ShouldDelete defaults to true, and will cause the memory access to also be
1575 /// deleted, not just removed.
1576 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1577  // The access list owns the reference, so we erase it from the non-owning list
1578  // first.
1579  if (!isa<MemoryUse>(MA)) {
1580  auto DefsIt = PerBlockDefs.find(MA->getBlock());
1581  std::unique_ptr<DefsList> &Defs = DefsIt->second;
1582  Defs->remove(*MA);
1583  if (Defs->empty())
1584  PerBlockDefs.erase(DefsIt);
1585  }
1586 
1587  // The erase call here will delete it. If we don't want it deleted, we call
1588  // remove instead.
1589  auto AccessIt = PerBlockAccesses.find(MA->getBlock());
1590  std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1591  if (ShouldDelete)
1592  Accesses->erase(MA);
1593  else
1594  Accesses->remove(MA);
1595 
1596  if (Accesses->empty())
1597  PerBlockAccesses.erase(AccessIt);
1598 }
1599 
1601  MemorySSAAnnotatedWriter Writer(this);
1602  F.print(OS, &Writer);
1603 }
1604 
1605 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1607 #endif
1608 
1610  verifyDefUses(F);
1612  verifyOrdering(F);
1613  Walker->verify(this);
1614 }
1615 
1616 /// \brief Verify that the order and existence of MemoryAccesses matches the
1617 /// order and existence of memory affecting instructions.
1619  // Walk all the blocks, comparing what the lookups think and what the access
1620  // lists think, as well as the order in the blocks vs the order in the access
1621  // lists.
1622  SmallVector<MemoryAccess *, 32> ActualAccesses;
1624  for (BasicBlock &B : F) {
1625  const AccessList *AL = getBlockAccesses(&B);
1626  const auto *DL = getBlockDefs(&B);
1627  MemoryAccess *Phi = getMemoryAccess(&B);
1628  if (Phi) {
1629  ActualAccesses.push_back(Phi);
1630  ActualDefs.push_back(Phi);
1631  }
1632 
1633  for (Instruction &I : B) {
1634  MemoryAccess *MA = getMemoryAccess(&I);
1635  assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1636  "We have memory affecting instructions "
1637  "in this block but they are not in the "
1638  "access list or defs list");
1639  if (MA) {
1640  ActualAccesses.push_back(MA);
1641  if (isa<MemoryDef>(MA))
1642  ActualDefs.push_back(MA);
1643  }
1644  }
1645  // Either we hit the assert, really have no accesses, or we have both
1646  // accesses and an access list.
1647  // Same with defs.
1648  if (!AL && !DL)
1649  continue;
1650  assert(AL->size() == ActualAccesses.size() &&
1651  "We don't have the same number of accesses in the block as on the "
1652  "access list");
1653  assert((DL || ActualDefs.size() == 0) &&
1654  "Either we should have a defs list, or we should have no defs");
1655  assert((!DL || DL->size() == ActualDefs.size()) &&
1656  "We don't have the same number of defs in the block as on the "
1657  "def list");
1658  auto ALI = AL->begin();
1659  auto AAI = ActualAccesses.begin();
1660  while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1661  assert(&*ALI == *AAI && "Not the same accesses in the same order");
1662  ++ALI;
1663  ++AAI;
1664  }
1665  ActualAccesses.clear();
1666  if (DL) {
1667  auto DLI = DL->begin();
1668  auto ADI = ActualDefs.begin();
1669  while (DLI != DL->end() && ADI != ActualDefs.end()) {
1670  assert(&*DLI == *ADI && "Not the same defs in the same order");
1671  ++DLI;
1672  ++ADI;
1673  }
1674  }
1675  ActualDefs.clear();
1676  }
1677 }
1678 
1679 /// \brief Verify the domination properties of MemorySSA by checking that each
1680 /// definition dominates all of its uses.
1682 #ifndef NDEBUG
1683  for (BasicBlock &B : F) {
1684  // Phi nodes are attached to basic blocks
1685  if (MemoryPhi *MP = getMemoryAccess(&B))
1686  for (const Use &U : MP->uses())
1687  assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
1688 
1689  for (Instruction &I : B) {
1690  MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1691  if (!MD)
1692  continue;
1693 
1694  for (const Use &U : MD->uses())
1695  assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
1696  }
1697  }
1698 #endif
1699 }
1700 
1701 /// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use
1702 /// appears in the use list of \p Def.
1703 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1704 #ifndef NDEBUG
1705  // The live on entry use may cause us to get a NULL def here
1706  if (!Def)
1707  assert(isLiveOnEntryDef(Use) &&
1708  "Null def but use not point to live on entry def");
1709  else
1710  assert(is_contained(Def->users(), Use) &&
1711  "Did not find use in def's use list");
1712 #endif
1713 }
1714 
1715 /// \brief Verify the immediate use information, by walking all the memory
1716 /// accesses and verifying that, for each use, it appears in the
1717 /// appropriate def's use list
1719  for (BasicBlock &B : F) {
1720  // Phi nodes are attached to basic blocks
1721  if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1722  assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1723  pred_begin(&B), pred_end(&B))) &&
1724  "Incomplete MemoryPhi Node");
1725  for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1726  verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1727  }
1728 
1729  for (Instruction &I : B) {
1730  if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1731  verifyUseInDefs(MA->getDefiningAccess(), MA);
1732  }
1733  }
1734  }
1735 }
1736 
1738  return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
1739 }
1740 
1742  return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
1743 }
1744 
1745 /// Perform a local numbering on blocks so that instruction ordering can be
1746 /// determined in constant time.
1747 /// TODO: We currently just number in order. If we numbered by N, we could
1748 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1749 /// log2(N) sequences of mixed before and after) without needing to invalidate
1750 /// the numbering.
1751 void MemorySSA::renumberBlock(const BasicBlock *B) const {
1752  // The pre-increment ensures the numbers really start at 1.
1753  unsigned long CurrentNumber = 0;
1754  const AccessList *AL = getBlockAccesses(B);
1755  assert(AL != nullptr && "Asking to renumber an empty block");
1756  for (const auto &I : *AL)
1757  BlockNumbering[&I] = ++CurrentNumber;
1758  BlockNumberingValid.insert(B);
1759 }
1760 
1761 /// \brief Determine, for two memory accesses in the same block,
1762 /// whether \p Dominator dominates \p Dominatee.
1763 /// \returns True if \p Dominator dominates \p Dominatee.
1765  const MemoryAccess *Dominatee) const {
1766  const BasicBlock *DominatorBlock = Dominator->getBlock();
1767 
1768  assert((DominatorBlock == Dominatee->getBlock()) &&
1769  "Asking for local domination when accesses are in different blocks!");
1770  // A node dominates itself.
1771  if (Dominatee == Dominator)
1772  return true;
1773 
1774  // When Dominatee is defined on function entry, it is not dominated by another
1775  // memory access.
1776  if (isLiveOnEntryDef(Dominatee))
1777  return false;
1778 
1779  // When Dominator is defined on function entry, it dominates the other memory
1780  // access.
1781  if (isLiveOnEntryDef(Dominator))
1782  return true;
1783 
1784  if (!BlockNumberingValid.count(DominatorBlock))
1785  renumberBlock(DominatorBlock);
1786 
1787  unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
1788  // All numbers start with 1
1789  assert(DominatorNum != 0 && "Block was not numbered properly");
1790  unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
1791  assert(DominateeNum != 0 && "Block was not numbered properly");
1792  return DominatorNum < DominateeNum;
1793 }
1794 
1795 bool MemorySSA::dominates(const MemoryAccess *Dominator,
1796  const MemoryAccess *Dominatee) const {
1797  if (Dominator == Dominatee)
1798  return true;
1799 
1800  if (isLiveOnEntryDef(Dominatee))
1801  return false;
1802 
1803  if (Dominator->getBlock() != Dominatee->getBlock())
1804  return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
1805  return locallyDominates(Dominator, Dominatee);
1806 }
1807 
1808 bool MemorySSA::dominates(const MemoryAccess *Dominator,
1809  const Use &Dominatee) const {
1810  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
1811  BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
1812  // The def must dominate the incoming block of the phi.
1813  if (UseBB != Dominator->getBlock())
1814  return DT->dominates(Dominator->getBlock(), UseBB);
1815  // If the UseBB and the DefBB are the same, compare locally.
1816  return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
1817  }
1818  // If it's not a PHI node use, the normal dominates can already handle it.
1819  return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
1820 }
1821 
1822 const static char LiveOnEntryStr[] = "liveOnEntry";
1823 
1825  switch (getValueID()) {
1826  case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
1827  case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
1828  case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
1829  }
1830  llvm_unreachable("invalid value id");
1831 }
1832 
1834  MemoryAccess *UO = getDefiningAccess();
1835 
1836  OS << getID() << " = MemoryDef(";
1837  if (UO && UO->getID())
1838  OS << UO->getID();
1839  else
1840  OS << LiveOnEntryStr;
1841  OS << ')';
1842 }
1843 
1845  bool First = true;
1846  OS << getID() << " = MemoryPhi(";
1847  for (const auto &Op : operands()) {
1848  BasicBlock *BB = getIncomingBlock(Op);
1849  MemoryAccess *MA = cast<MemoryAccess>(Op);
1850  if (!First)
1851  OS << ',';
1852  else
1853  First = false;
1854 
1855  OS << '{';
1856  if (BB->hasName())
1857  OS << BB->getName();
1858  else
1859  BB->printAsOperand(OS, false);
1860  OS << ',';
1861  if (unsigned ID = MA->getID())
1862  OS << ID;
1863  else
1864  OS << LiveOnEntryStr;
1865  OS << '}';
1866  }
1867  OS << ')';
1868 }
1869 
1871  MemoryAccess *UO = getDefiningAccess();
1872  OS << "MemoryUse(";
1873  if (UO && UO->getID())
1874  OS << UO->getID();
1875  else
1876  OS << LiveOnEntryStr;
1877  OS << ')';
1878 }
1879 
1880 void MemoryAccess::dump() const {
1881 // Cannot completely remove virtual function even in release mode.
1882 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1883  print(dbgs());
1884  dbgs() << "\n";
1885 #endif
1886 }
1887 
1889 
1892 }
1893 
1895  AU.setPreservesAll();
1897 }
1898 
1900  auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
1901  MSSA.print(dbgs());
1902  if (VerifyMemorySSA)
1903  MSSA.verifyMemorySSA();
1904  return false;
1905 }
1906 
1907 AnalysisKey MemorySSAAnalysis::Key;
1908 
1911  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1912  auto &AA = AM.getResult<AAManager>(F);
1913  return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
1914 }
1915 
1918  OS << "MemorySSA for function: " << F.getName() << "\n";
1919  AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
1920 
1921  return PreservedAnalyses::all();
1922 }
1923 
1926  AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
1927 
1928  return PreservedAnalyses::all();
1929 }
1930 
1931 char MemorySSAWrapperPass::ID = 0;
1932 
1935 }
1936 
1937 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
1938 
1940  AU.setPreservesAll();
1943 }
1944 
1946  auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1947  auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1948  MSSA.reset(new MemorySSA(F, &AA, &DT));
1949  return false;
1950 }
1951 
1952 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
1953 
1955  MSSA->print(OS);
1956 }
1957 
1959 
1961  DominatorTree *D)
1962  : MemorySSAWalker(M), Walker(*M, *A, *D) {}
1963 
1965  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1966  MUD->resetOptimized();
1967 }
1968 
1969 /// \brief Walk the use-def chains starting at \p MA and find
1970 /// the MemoryAccess that actually clobbers Loc.
1971 ///
1972 /// \returns our clobbering memory access
1973 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
1974  MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
1975  MemoryAccess *New = Walker.findClobber(StartingAccess, Q);
1976 #ifdef EXPENSIVE_CHECKS
1977  MemoryAccess *NewNoCache = Walker.findClobber(StartingAccess, Q);
1978  assert(NewNoCache == New && "Cache made us hand back a different result?");
1979  (void)NewNoCache;
1980 #endif
1981  if (AutoResetWalker)
1983  return New;
1984 }
1985 
1986 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
1987  MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
1988  if (isa<MemoryPhi>(StartingAccess))
1989  return StartingAccess;
1990 
1991  auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
1992  if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
1993  return StartingUseOrDef;
1994 
1995  Instruction *I = StartingUseOrDef->getMemoryInst();
1996 
1997  // Conservatively, fences are always clobbers, so don't perform the walk if we
1998  // hit a fence.
1999  if (!ImmutableCallSite(I) && I->isFenceLike())
2000  return StartingUseOrDef;
2001 
2002  UpwardsMemoryQuery Q;
2003  Q.OriginalAccess = StartingUseOrDef;
2004  Q.StartingLoc = Loc;
2005  Q.Inst = I;
2006  Q.IsCall = false;
2007 
2008  // Unlike the other function, do not walk to the def of a def, because we are
2009  // handed something we already believe is the clobbering access.
2010  MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2011  ? StartingUseOrDef->getDefiningAccess()
2012  : StartingUseOrDef;
2013 
2014  MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
2015  DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2016  DEBUG(dbgs() << *StartingUseOrDef << "\n");
2017  DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2018  DEBUG(dbgs() << *Clobber << "\n");
2019  return Clobber;
2020 }
2021 
2022 MemoryAccess *
2023 MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2024  auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2025  // If this is a MemoryPhi, we can't do anything.
2026  if (!StartingAccess)
2027  return MA;
2028 
2029  // If this is an already optimized use or def, return the optimized result.
2030  // Note: Currently, we do not store the optimized def result because we'd need
2031  // a separate field, since we can't use it as the defining access.
2032  if (StartingAccess->isOptimized())
2033  return StartingAccess->getOptimized();
2034 
2035  const Instruction *I = StartingAccess->getMemoryInst();
2036  UpwardsMemoryQuery Q(I, StartingAccess);
2037  // We can't sanely do anything with a fences, they conservatively
2038  // clobber all memory, and have no locations to get pointers from to
2039  // try to disambiguate.
2040  if (!Q.IsCall && I->isFenceLike())
2041  return StartingAccess;
2042 
2044  MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2045  if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2046  MUD->setOptimized(LiveOnEntry);
2047  return LiveOnEntry;
2048  }
2049 
2050  // Start with the thing we already think clobbers this location
2051  MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2052 
2053  // At this point, DefiningAccess may be the live on entry def.
2054  // If it is, we will not get a better result.
2055  if (MSSA->isLiveOnEntryDef(DefiningAccess))
2056  return DefiningAccess;
2057 
2058  MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
2059  DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2060  DEBUG(dbgs() << *DefiningAccess << "\n");
2061  DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2062  DEBUG(dbgs() << *Result << "\n");
2063  if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2064  MUD->setOptimized(Result);
2065 
2066  return Result;
2067 }
2068 
2069 MemoryAccess *
2071  if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2072  return Use->getDefiningAccess();
2073  return MA;
2074 }
2075 
2077  MemoryAccess *StartingAccess, const MemoryLocation &) {
2078  if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2079  return Use->getDefiningAccess();
2080  return StartingAccess;
2081 }
2082 
2083 void MemoryPhi::deleteMe(DerivedUser *Self) {
2084  delete static_cast<MemoryPhi *>(Self);
2085 }
2086 
2087 void MemoryDef::deleteMe(DerivedUser *Self) {
2088  delete static_cast<MemoryDef *>(Self);
2089 }
2090 
2091 void MemoryUse::deleteMe(DerivedUser *Self) {
2092  delete static_cast<MemoryUse *>(Self);
2093 }
MemorySSAWalker * getWalker()
Definition: MemorySSA.cpp:1364
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Definition: MemorySSA.cpp:1945
DomTreeNodeBase< NodeT > * getNode(NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
MemorySSA * MSSA
Definition: MemorySSA.h:946
void initializeMemorySSAWrapperPassPass(PassRegistry &)
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:699
typename std::vector< DomTreeNodeBase *>::const_iterator const_iterator
Safe Stack instrumentation pass
Definition: SafeStack.cpp:898
iterator_range< use_iterator > uses()
Definition: Value.h:360
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void verify(const MemorySSA *MSSA)
Definition: MemorySSA.h:941
void dropAllReferences()
Drop all references to operands.
Definition: User.h:279
Atomic ordering constants.
bool isFenceLike() const
Return true if this instruction behaves like a memory fence: it can load or store to memory location ...
Definition: Instruction.h:515
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:687
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:1844
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:449
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:1795
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
void push_back(reference Node)
Insert a node at the back; never copies.
Definition: simple_ilist.h:148
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
This class provides various memory handling functions that manipulate MemoryBlock instances...
Definition: Memory.h:46
Implements a dense probed hash-table based set.
Definition: DenseSet.h:221
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *)
Definition: MemorySSA.cpp:1465
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess&#39;s for a given basic block.
Definition: MemorySSA.h:656
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: MemorySSA.cpp:1894
Extension point for the Value hierarchy.
Definition: DerivedUser.h:28
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:350
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:233
static const char LiveOnEntryStr[]
Definition: MemorySSA.cpp:1822
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:738
unsigned second
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: MemorySSA.cpp:1939
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:814
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:231
F(f)
OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA, DominatorTree *DT)
Definition: MemorySSA.cpp:1074
An instruction for reading from memory.
Definition: Instructions.h:164
memoryssa
Definition: MemorySSA.cpp:65
This defines the Use class.
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:1833
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
Definition: MemorySSA.cpp:1034
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:33
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, true) INITIALIZE_PASS_END(MemorySSAWrapperPass
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
Represents read-only accesses to memory.
Definition: MemorySSA.h:295
This class is a batch walker of all MemoryUse&#39;s in the program, and points their defining access at t...
Definition: MemorySSA.cpp:1072
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:850
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:217
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
static Value * getPointerOperand(Instruction &Inst)
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
Definition: MemorySSA.h:718
A MemorySSAWalker that does AA walks to disambiguate accesses.
Definition: MemorySSA.cpp:871
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
Definition: MemorySSA.cpp:278
MemorySSAAnnotatedWriter(const MemorySSA *M)
Definition: MemorySSA.cpp:93
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:612
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef&#39;s and MemoryPhi&#39;s for a given basic block.
Definition: MemorySSA.h:664
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition: MemorySSA.cpp:1377
static void LLVM_ATTRIBUTE_UNUSED checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, AliasAnalysis &AA)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
Definition: MemorySSA.cpp:346
static bool isOrdered(const Instruction *I)
Definition: MemorySSA.cpp:1479
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:766
APInt operator*(APInt a, uint64_t RHS)
Definition: APInt.h:2070
void verifyDefUses(Function &F) const
Verify the immediate use information, by walking all the memory accesses and verifying that...
Definition: MemorySSA.cpp:1718
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
Definition: MemorySSA.cpp:1952
A simple intrusive list implementation.
Definition: simple_ilist.h:79
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:41
#define F(x, y, z)
Definition: MD5.cpp:55
ppc ctr loops PowerPC CTR Loops Verify
static int getID(struct InternalInstruction *insn, const void *miiArg)
upward_defs_iterator upward_defs_end()
Definition: MemorySSA.h:1135
early cse memssa
Definition: EarlyCSE.cpp:1185
Memory SSA
Definition: MemorySSA.cpp:65
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:195
void emitBasicBlockStartAnnot(const BasicBlock *BB, formatted_raw_ostream &OS) override
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
Definition: MemorySSA.cpp:95
void dump() const
Definition: MemorySSA.cpp:1606
This is the generic walker interface for walkers of MemorySSA.
Definition: MemorySSA.h:881
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:68
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:142
static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, AliasAnalysis &AA)
Definition: MemorySSA.cpp:307
bool isAtLeastOrStrongerThan(AtomicOrdering ao, AtomicOrdering other)
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
An assembly annotator class to print Memory SSA information in comments.
Definition: MemorySSA.cpp:87
void setAutoResetWalker(bool AutoReset)
Whether we call resetClobberWalker() after each time we actually walk to answer a clobber query...
Definition: MemorySSA.cpp:890
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA&#39;s lookup tables.
Definition: MemorySSA.cpp:1548
NodeT * getBlock() const
#define P(N)
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
bool hasName() const
Definition: Value.h:251
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
A manager for alias analyses.
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
Definition: MemorySSA.cpp:190
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
early cse Early CSE w MemorySSA
Definition: EarlyCSE.cpp:1185
void dump() const
Definition: MemorySSA.cpp:1880
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
The access may reference and may modify the value stored in memory.
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:686
Represent the analysis usage information of a pass.
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:1824
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:144
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
void verifyDomination(Function &F) const
Verify the domination properties of MemorySSA by checking that each definition dominates all of its u...
Definition: MemorySSA.cpp:1681
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
Memory true print Memory SSA Printer
Definition: MemorySSA.cpp:71
static bool instructionClobbersQuery(MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysis &AA)
Definition: MemorySSA.cpp:227
void invalidateInfo(MemoryAccess *) override
Given a memory access, invalidate anything this walker knows about that access.
Definition: MemorySSA.cpp:1964
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:842
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:1924
auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Definition: STLExtras.h:847
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:159
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:403
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:3582
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool verify(const TargetRegisterInfo &TRI) const
Check that information hold by this instance make sense for the given TRI.
Memory true print Memory SSA static false cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
MemoryUseOrDef * getMemoryAccess(const Instruction *) const
Given a memory Mod/Ref&#39;ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.cpp:1737
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
Definition: MemorySSA.cpp:1267
Representation for a specific memory location.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void verifyOrdering(Function &F) const
Verify that the order and existence of MemoryAccesses matches the order and existence of memory affec...
Definition: MemorySSA.cpp:1618
unsigned getNumOperands() const
Definition: User.h:176
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
void verifyMemorySSA() const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1609
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition: MemorySSA.cpp:1764
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:862
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:243
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Definition: MemorySSA.cpp:1899
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:814
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:383
BasicBlock * getBlock() const
Definition: MemorySSA.h:156
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
void verify(const MemorySSA *MSSA) override
Definition: MemorySSA.cpp:895
#define MAP(n)
void emitInstructionAnnot(const Instruction *I, formatted_raw_ostream &OS) override
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
Definition: MemorySSA.cpp:101
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:640
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:1870
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:924
A range adaptor for a pair of iterators.
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA&#39;s lists.
Definition: MemorySSA.cpp:1576
Target - Wrapper for Target specific information.
void push_back(pointer val)
Definition: ilist.h:326
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:602
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:236
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
getModRefInfo (for call sites) - Return information about whether a particular call site modifies or ...
void setPreservesAll()
Set by analyses that do not transform their input at all.
iterator_range< user_iterator > users()
Definition: Value.h:405
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are must-alias.
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:243
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:396
void resetClobberWalker()
Drop the walker&#39;s persistent data structures.
Definition: MemorySSA.cpp:893
amdgpu Simplify well known AMD library false Value Value * Arg
void initializeMemorySSAPrinterLegacyPassPass(PassRegistry &)
Basic Alias true
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:120
MemorySSAWalker(MemorySSA *)
Definition: MemorySSA.cpp:1958
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
Definition: MemorySSA.h:1186
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:654
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
Definition: MemorySSA.cpp:1937
This file provides utility analysis objects describing memory locations.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:224
Establish a view to a call site for examination.
Definition: CallSite.h:713
CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *)
Definition: MemorySSA.cpp:1960
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
Definition: MemorySSA.h:273
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
Definition: MemorySSA.cpp:1441
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
Definition: MemorySSA.cpp:2070
static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read, bool &Write, bool &Effects, bool &StackPointer)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:636
AnalysisUsage & addRequiredTransitive()
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:181
iterator_range< df_iterator< T > > depth_first(const T &G)
MemoryAccess * getClobberingMemoryAccess(const Instruction *I)
Given a memory Mod/Ref/ModRef&#39;ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:910
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:1916
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static cl::opt< bool > VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden, cl::desc("Verify MemorySSA in legacy printer pass."))
Result run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:1909
static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
Definition: MemorySSA.cpp:203
LLVM Value Representation.
Definition: Value.h:73
static MemoryLocOrCall getTombstoneKey()
Definition: MemorySSA.cpp:177
succ_range successors(BasicBlock *BB)
Definition: CFG.h:143
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair)
Definition: MemorySSA.h:1131
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
Definition: MemorySSA.h:573
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, const Instruction *I)
Definition: MemorySSA.cpp:321
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:44
#define DEBUG(X)
Definition: Debug.h:118
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
Definition: MemorySSA.cpp:181
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
hexagon cext opt
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:260
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Definition: MemorySSA.h:209
void sort(Policy policy, RandomAccessIterator Start, RandomAccessIterator End, const Comparator &Comp=Comparator())
Definition: Parallel.h:199
LLVM_NODISCARD bool isModOrRefSet(const ModRefInfo MRI)
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:1946
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:431
void print(raw_ostream &) const
Definition: MemorySSA.cpp:1600
This header defines various interfaces for pass management in LLVM.
static MemoryLocOrCall getEmptyKey()
Definition: MemorySSA.cpp:173
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
Definition: MemorySSA.cpp:1409
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:70
bool use_empty() const
Definition: Value.h:328
hexagon widen stores
reverse_iterator rbegin()
Definition: simple_ilist.h:122
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
Definition: MemorySSA.h:962
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
const BasicBlock * getParent() const
Definition: Instruction.h:67
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
Definition: MemorySSA.cpp:1954
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:873
LLVM_NODISCARD bool isRefSet(const ModRefInfo MRI)