LLVM  6.0.0svn
MemorySSA.cpp
Go to the documentation of this file.
1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the MemorySSA class.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/PassManager.h"
41 #include "llvm/IR/Use.h"
42 #include "llvm/Pass.h"
44 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/Debug.h"
51 #include <algorithm>
52 #include <cassert>
53 #include <iterator>
54 #include <memory>
55 #include <utility>
56 
57 using namespace llvm;
58 
59 #define DEBUG_TYPE "memoryssa"
60 
61 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
62  true)
66  true)
67 
69  "Memory SSA Printer", false, false)
70 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
71 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
72  "Memory SSA Printer", false, false)
73 
74 static cl::opt<unsigned> MaxCheckLimit(
75  "memssa-check-limit", cl::Hidden, cl::init(100),
76  cl::desc("The maximum number of stores/phis MemorySSA"
77  "will consider trying to walk past (default = 100)"));
78 
79 static cl::opt<bool>
80  VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
81  cl::desc("Verify MemorySSA in legacy printer pass."));
82 
83 namespace llvm {
84 
85 /// \brief An assembly annotator class to print Memory SSA information in
86 /// comments.
88  friend class MemorySSA;
89 
90  const MemorySSA *MSSA;
91 
92 public:
93  MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
94 
96  formatted_raw_ostream &OS) override {
97  if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
98  OS << "; " << *MA << "\n";
99  }
100 
102  formatted_raw_ostream &OS) override {
103  if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
104  OS << "; " << *MA << "\n";
105  }
106 };
107 
108 } // end namespace llvm
109 
110 namespace {
111 
112 /// Our current alias analysis API differentiates heavily between calls and
113 /// non-calls, and functions called on one usually assert on the other.
114 /// This class encapsulates the distinction to simplify other code that wants
115 /// "Memory affecting instructions and related data" to use as a key.
116 /// For example, this class is used as a densemap key in the use optimizer.
117 class MemoryLocOrCall {
118 public:
119  bool IsCall = false;
120 
121  MemoryLocOrCall() = default;
122  MemoryLocOrCall(MemoryUseOrDef *MUD)
123  : MemoryLocOrCall(MUD->getMemoryInst()) {}
124  MemoryLocOrCall(const MemoryUseOrDef *MUD)
125  : MemoryLocOrCall(MUD->getMemoryInst()) {}
126 
127  MemoryLocOrCall(Instruction *Inst) {
128  if (ImmutableCallSite(Inst)) {
129  IsCall = true;
130  CS = ImmutableCallSite(Inst);
131  } else {
132  IsCall = false;
133  // There is no such thing as a memorylocation for a fence inst, and it is
134  // unique in that regard.
135  if (!isa<FenceInst>(Inst))
136  Loc = MemoryLocation::get(Inst);
137  }
138  }
139 
140  explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
141 
142  ImmutableCallSite getCS() const {
143  assert(IsCall);
144  return CS;
145  }
146 
147  MemoryLocation getLoc() const {
148  assert(!IsCall);
149  return Loc;
150  }
151 
152  bool operator==(const MemoryLocOrCall &Other) const {
153  if (IsCall != Other.IsCall)
154  return false;
155 
156  if (IsCall)
157  return CS.getCalledValue() == Other.CS.getCalledValue();
158  return Loc == Other.Loc;
159  }
160 
161 private:
162  union {
164  MemoryLocation Loc;
165  };
166 };
167 
168 } // end anonymous namespace
169 
170 namespace llvm {
171 
172 template <> struct DenseMapInfo<MemoryLocOrCall> {
173  static inline MemoryLocOrCall getEmptyKey() {
174  return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
175  }
176 
177  static inline MemoryLocOrCall getTombstoneKey() {
178  return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
179  }
180 
181  static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
182  if (MLOC.IsCall)
183  return hash_combine(MLOC.IsCall,
185  MLOC.getCS().getCalledValue()));
186  return hash_combine(
187  MLOC.IsCall, DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
188  }
189 
190  static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
191  return LHS == RHS;
192  }
193 };
194 
196 
197 } // end namespace llvm
198 
199 /// This does one-way checks to see if Use could theoretically be hoisted above
200 /// MayClobber. This will not check the other way around.
201 ///
202 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
203 /// MayClobber, with no potentially clobbering operations in between them.
204 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
206  const LoadInst *MayClobber) {
207  bool VolatileUse = Use->isVolatile();
208  bool VolatileClobber = MayClobber->isVolatile();
209  // Volatile operations may never be reordered with other volatile operations.
210  if (VolatileUse && VolatileClobber)
211  return Reorderability::Never;
212 
213  // The lang ref allows reordering of volatile and non-volatile operations.
214  // Whether an aliasing nonvolatile load and volatile load can be reordered,
215  // though, is ambiguous. Because it may not be best to exploit this ambiguity,
216  // we only allow volatile/non-volatile reordering if the volatile and
217  // non-volatile operations don't alias.
218  Reorderability Result = VolatileUse || VolatileClobber
221 
222  // If a load is seq_cst, it cannot be moved above other loads. If its ordering
223  // is weaker, it can be moved above other loads. We just need to be sure that
224  // MayClobber isn't an acquire load, because loads can't be moved above
225  // acquire loads.
226  //
227  // Note that this explicitly *does* allow the free reordering of monotonic (or
228  // weaker) loads of the same address.
229  bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
230  bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
232  if (SeqCstUse || MayClobberIsAcquire)
233  return Reorderability::Never;
234  return Result;
235 }
236 
238  const MemoryLocation &UseLoc,
239  const Instruction *UseInst,
240  AliasAnalysis &AA) {
241  Instruction *DefInst = MD->getMemoryInst();
242  assert(DefInst && "Defining instruction not actually an instruction");
243  ImmutableCallSite UseCS(UseInst);
244 
245  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
246  // These intrinsics will show up as affecting memory, but they are just
247  // markers.
248  switch (II->getIntrinsicID()) {
249  case Intrinsic::lifetime_start:
250  if (UseCS)
251  return false;
252  return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), UseLoc);
253  case Intrinsic::lifetime_end:
254  case Intrinsic::invariant_start:
255  case Intrinsic::invariant_end:
256  case Intrinsic::assume:
257  return false;
258  default:
259  break;
260  }
261  }
262 
263  if (UseCS) {
264  ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
265  return I != MRI_NoModRef;
266  }
267 
268  if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) {
269  if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) {
270  switch (getLoadReorderability(UseLoad, DefLoad)) {
272  return false;
274  return true;
276  return !AA.isNoAlias(UseLoc, MemoryLocation::get(DefLoad));
277  }
278  }
279  }
280 
281  return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod;
282 }
283 
285  const MemoryLocOrCall &UseMLOC,
286  AliasAnalysis &AA) {
287  // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
288  // to exist while MemoryLocOrCall is pushed through places.
289  if (UseMLOC.IsCall)
291  AA);
292  return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
293  AA);
294 }
295 
296 // Return true when MD may alias MU, return false otherwise.
298  AliasAnalysis &AA) {
299  return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
300 }
301 
302 namespace {
303 
304 struct UpwardsMemoryQuery {
305  // True if our original query started off as a call
306  bool IsCall = false;
307  // The pointer location we started the query with. This will be empty if
308  // IsCall is true.
309  MemoryLocation StartingLoc;
310  // This is the instruction we were querying about.
311  const Instruction *Inst = nullptr;
312  // The MemoryAccess we actually got called with, used to test local domination
313  const MemoryAccess *OriginalAccess = nullptr;
314 
315  UpwardsMemoryQuery() = default;
316 
317  UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
318  : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
319  if (!IsCall)
320  StartingLoc = MemoryLocation::get(Inst);
321  }
322 };
323 
324 } // end anonymous namespace
325 
326 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
327  AliasAnalysis &AA) {
328  Instruction *Inst = MD->getMemoryInst();
329  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
330  switch (II->getIntrinsicID()) {
331  case Intrinsic::lifetime_end:
332  return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
333  default:
334  return false;
335  }
336  }
337  return false;
338 }
339 
341  const Instruction *I) {
342  // If the memory can't be changed, then loads of the memory can't be
343  // clobbered.
344  //
345  // FIXME: We should handle invariant groups, as well. It's a bit harder,
346  // because we need to pay close attention to invariant group barriers.
347  return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
348  AA.pointsToConstantMemory(cast<LoadInst>(I)->
349  getPointerOperand()));
350 }
351 
352 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
353 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
354 ///
355 /// This is meant to be as simple and self-contained as possible. Because it
356 /// uses no cache, etc., it can be relatively expensive.
357 ///
358 /// \param Start The MemoryAccess that we want to walk from.
359 /// \param ClobberAt A clobber for Start.
360 /// \param StartLoc The MemoryLocation for Start.
361 /// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to.
362 /// \param Query The UpwardsMemoryQuery we used for our search.
363 /// \param AA The AliasAnalysis we used for our search.
364 static void LLVM_ATTRIBUTE_UNUSED
366  const MemoryLocation &StartLoc, const MemorySSA &MSSA,
367  const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
368  assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
369 
370  if (MSSA.isLiveOnEntryDef(Start)) {
371  assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
372  "liveOnEntry must clobber itself");
373  return;
374  }
375 
376  bool FoundClobber = false;
377  DenseSet<MemoryAccessPair> VisitedPhis;
379  Worklist.emplace_back(Start, StartLoc);
380  // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
381  // is found, complain.
382  while (!Worklist.empty()) {
383  MemoryAccessPair MAP = Worklist.pop_back_val();
384  // All we care about is that nothing from Start to ClobberAt clobbers Start.
385  // We learn nothing from revisiting nodes.
386  if (!VisitedPhis.insert(MAP).second)
387  continue;
388 
389  for (MemoryAccess *MA : def_chain(MAP.first)) {
390  if (MA == ClobberAt) {
391  if (auto *MD = dyn_cast<MemoryDef>(MA)) {
392  // instructionClobbersQuery isn't essentially free, so don't use `|=`,
393  // since it won't let us short-circuit.
394  //
395  // Also, note that this can't be hoisted out of the `Worklist` loop,
396  // since MD may only act as a clobber for 1 of N MemoryLocations.
397  FoundClobber =
398  FoundClobber || MSSA.isLiveOnEntryDef(MD) ||
399  instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
400  }
401  break;
402  }
403 
404  // We should never hit liveOnEntry, unless it's the clobber.
405  assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
406 
407  if (auto *MD = dyn_cast<MemoryDef>(MA)) {
408  (void)MD;
409  assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&
410  "Found clobber before reaching ClobberAt!");
411  continue;
412  }
413 
414  assert(isa<MemoryPhi>(MA));
415  Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
416  }
417  }
418 
419  // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
420  // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
421  assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
422  "ClobberAt never acted as a clobber");
423 }
424 
425 namespace {
426 
427 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
428 /// in one class.
429 class ClobberWalker {
430  /// Save a few bytes by using unsigned instead of size_t.
431  using ListIndex = unsigned;
432 
433  /// Represents a span of contiguous MemoryDefs, potentially ending in a
434  /// MemoryPhi.
435  struct DefPath {
436  MemoryLocation Loc;
437  // Note that, because we always walk in reverse, Last will always dominate
438  // First. Also note that First and Last are inclusive.
439  MemoryAccess *First;
440  MemoryAccess *Last;
441  Optional<ListIndex> Previous;
442 
443  DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
444  Optional<ListIndex> Previous)
445  : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
446 
447  DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
448  Optional<ListIndex> Previous)
449  : DefPath(Loc, Init, Init, Previous) {}
450  };
451 
452  const MemorySSA &MSSA;
453  AliasAnalysis &AA;
454  DominatorTree &DT;
455  UpwardsMemoryQuery *Query;
456 
457  // Phi optimization bookkeeping
460 
461  /// Find the nearest def or phi that `From` can legally be optimized to.
462  const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
463  assert(From->getNumOperands() && "Phi with no operands?");
464 
465  BasicBlock *BB = From->getBlock();
466  MemoryAccess *Result = MSSA.getLiveOnEntryDef();
467  DomTreeNode *Node = DT.getNode(BB);
468  while ((Node = Node->getIDom())) {
469  auto *Defs = MSSA.getBlockDefs(Node->getBlock());
470  if (Defs)
471  return &*Defs->rbegin();
472  }
473  return Result;
474  }
475 
476  /// Result of calling walkToPhiOrClobber.
477  struct UpwardsWalkResult {
478  /// The "Result" of the walk. Either a clobber, the last thing we walked, or
479  /// both.
480  MemoryAccess *Result;
481  bool IsKnownClobber;
482  };
483 
484  /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
485  /// This will update Desc.Last as it walks. It will (optionally) also stop at
486  /// StopAt.
487  ///
488  /// This does not test for whether StopAt is a clobber
489  UpwardsWalkResult
490  walkToPhiOrClobber(DefPath &Desc,
491  const MemoryAccess *StopAt = nullptr) const {
492  assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
493 
494  for (MemoryAccess *Current : def_chain(Desc.Last)) {
495  Desc.Last = Current;
496  if (Current == StopAt)
497  return {Current, false};
498 
499  if (auto *MD = dyn_cast<MemoryDef>(Current))
500  if (MSSA.isLiveOnEntryDef(MD) ||
501  instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA))
502  return {MD, true};
503  }
504 
505  assert(isa<MemoryPhi>(Desc.Last) &&
506  "Ended at a non-clobber that's not a phi?");
507  return {Desc.Last, false};
508  }
509 
510  void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
511  ListIndex PriorNode) {
512  auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
513  upward_defs_end());
514  for (const MemoryAccessPair &P : UpwardDefs) {
515  PausedSearches.push_back(Paths.size());
516  Paths.emplace_back(P.second, P.first, PriorNode);
517  }
518  }
519 
520  /// Represents a search that terminated after finding a clobber. This clobber
521  /// may or may not be present in the path of defs from LastNode..SearchStart,
522  /// since it may have been retrieved from cache.
523  struct TerminatedPath {
524  MemoryAccess *Clobber;
525  ListIndex LastNode;
526  };
527 
528  /// Get an access that keeps us from optimizing to the given phi.
529  ///
530  /// PausedSearches is an array of indices into the Paths array. Its incoming
531  /// value is the indices of searches that stopped at the last phi optimization
532  /// target. It's left in an unspecified state.
533  ///
534  /// If this returns None, NewPaused is a vector of searches that terminated
535  /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
537  getBlockingAccess(const MemoryAccess *StopWhere,
538  SmallVectorImpl<ListIndex> &PausedSearches,
539  SmallVectorImpl<ListIndex> &NewPaused,
540  SmallVectorImpl<TerminatedPath> &Terminated) {
541  assert(!PausedSearches.empty() && "No searches to continue?");
542 
543  // BFS vs DFS really doesn't make a difference here, so just do a DFS with
544  // PausedSearches as our stack.
545  while (!PausedSearches.empty()) {
546  ListIndex PathIndex = PausedSearches.pop_back_val();
547  DefPath &Node = Paths[PathIndex];
548 
549  // If we've already visited this path with this MemoryLocation, we don't
550  // need to do so again.
551  //
552  // NOTE: That we just drop these paths on the ground makes caching
553  // behavior sporadic. e.g. given a diamond:
554  // A
555  // B C
556  // D
557  //
558  // ...If we walk D, B, A, C, we'll only cache the result of phi
559  // optimization for A, B, and D; C will be skipped because it dies here.
560  // This arguably isn't the worst thing ever, since:
561  // - We generally query things in a top-down order, so if we got below D
562  // without needing cache entries for {C, MemLoc}, then chances are
563  // that those cache entries would end up ultimately unused.
564  // - We still cache things for A, so C only needs to walk up a bit.
565  // If this behavior becomes problematic, we can fix without a ton of extra
566  // work.
567  if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
568  continue;
569 
570  UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
571  if (Res.IsKnownClobber) {
572  assert(Res.Result != StopWhere);
573  // If this wasn't a cache hit, we hit a clobber when walking. That's a
574  // failure.
575  TerminatedPath Term{Res.Result, PathIndex};
576  if (!MSSA.dominates(Res.Result, StopWhere))
577  return Term;
578 
579  // Otherwise, it's a valid thing to potentially optimize to.
580  Terminated.push_back(Term);
581  continue;
582  }
583 
584  if (Res.Result == StopWhere) {
585  // We've hit our target. Save this path off for if we want to continue
586  // walking.
587  NewPaused.push_back(PathIndex);
588  continue;
589  }
590 
591  assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
592  addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
593  }
594 
595  return None;
596  }
597 
598  template <typename T, typename Walker>
599  struct generic_def_path_iterator
600  : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
601  std::forward_iterator_tag, T *> {
602  generic_def_path_iterator() = default;
603  generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
604 
605  T &operator*() const { return curNode(); }
606 
607  generic_def_path_iterator &operator++() {
608  N = curNode().Previous;
609  return *this;
610  }
611 
612  bool operator==(const generic_def_path_iterator &O) const {
613  if (N.hasValue() != O.N.hasValue())
614  return false;
615  return !N.hasValue() || *N == *O.N;
616  }
617 
618  private:
619  T &curNode() const { return W->Paths[*N]; }
620 
621  Walker *W = nullptr;
623  };
624 
625  using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
626  using const_def_path_iterator =
627  generic_def_path_iterator<const DefPath, const ClobberWalker>;
628 
629  iterator_range<def_path_iterator> def_path(ListIndex From) {
630  return make_range(def_path_iterator(this, From), def_path_iterator());
631  }
632 
633  iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
634  return make_range(const_def_path_iterator(this, From),
635  const_def_path_iterator());
636  }
637 
638  struct OptznResult {
639  /// The path that contains our result.
640  TerminatedPath PrimaryClobber;
641  /// The paths that we can legally cache back from, but that aren't
642  /// necessarily the result of the Phi optimization.
643  SmallVector<TerminatedPath, 4> OtherClobbers;
644  };
645 
646  ListIndex defPathIndex(const DefPath &N) const {
647  // The assert looks nicer if we don't need to do &N
648  const DefPath *NP = &N;
649  assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
650  "Out of bounds DefPath!");
651  return NP - &Paths.front();
652  }
653 
654  /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
655  /// that act as legal clobbers. Note that this won't return *all* clobbers.
656  ///
657  /// Phi optimization algorithm tl;dr:
658  /// - Find the earliest def/phi, A, we can optimize to
659  /// - Find if all paths from the starting memory access ultimately reach A
660  /// - If not, optimization isn't possible.
661  /// - Otherwise, walk from A to another clobber or phi, A'.
662  /// - If A' is a def, we're done.
663  /// - If A' is a phi, try to optimize it.
664  ///
665  /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
666  /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
667  OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
668  const MemoryLocation &Loc) {
669  assert(Paths.empty() && VisitedPhis.empty() &&
670  "Reset the optimization state.");
671 
672  Paths.emplace_back(Loc, Start, Phi, None);
673  // Stores how many "valid" optimization nodes we had prior to calling
674  // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
675  auto PriorPathsSize = Paths.size();
676 
677  SmallVector<ListIndex, 16> PausedSearches;
678  SmallVector<ListIndex, 8> NewPaused;
679  SmallVector<TerminatedPath, 4> TerminatedPaths;
680 
681  addSearches(Phi, PausedSearches, 0);
682 
683  // Moves the TerminatedPath with the "most dominated" Clobber to the end of
684  // Paths.
685  auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
686  assert(!Paths.empty() && "Need a path to move");
687  auto Dom = Paths.begin();
688  for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
689  if (!MSSA.dominates(I->Clobber, Dom->Clobber))
690  Dom = I;
691  auto Last = Paths.end() - 1;
692  if (Last != Dom)
693  std::iter_swap(Last, Dom);
694  };
695 
696  MemoryPhi *Current = Phi;
697  while (true) {
698  assert(!MSSA.isLiveOnEntryDef(Current) &&
699  "liveOnEntry wasn't treated as a clobber?");
700 
701  const auto *Target = getWalkTarget(Current);
702  // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
703  // optimization for the prior phi.
704  assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
705  return MSSA.dominates(P.Clobber, Target);
706  }));
707 
708  // FIXME: This is broken, because the Blocker may be reported to be
709  // liveOnEntry, and we'll happily wait for that to disappear (read: never)
710  // For the moment, this is fine, since we do nothing with blocker info.
711  if (Optional<TerminatedPath> Blocker = getBlockingAccess(
712  Target, PausedSearches, NewPaused, TerminatedPaths)) {
713 
714  // Find the node we started at. We can't search based on N->Last, since
715  // we may have gone around a loop with a different MemoryLocation.
716  auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
717  return defPathIndex(N) < PriorPathsSize;
718  });
719  assert(Iter != def_path_iterator());
720 
721  DefPath &CurNode = *Iter;
722  assert(CurNode.Last == Current);
723 
724  // Two things:
725  // A. We can't reliably cache all of NewPaused back. Consider a case
726  // where we have two paths in NewPaused; one of which can't optimize
727  // above this phi, whereas the other can. If we cache the second path
728  // back, we'll end up with suboptimal cache entries. We can handle
729  // cases like this a bit better when we either try to find all
730  // clobbers that block phi optimization, or when our cache starts
731  // supporting unfinished searches.
732  // B. We can't reliably cache TerminatedPaths back here without doing
733  // extra checks; consider a case like:
734  // T
735  // / \
736  // D C
737  // \ /
738  // S
739  // Where T is our target, C is a node with a clobber on it, D is a
740  // diamond (with a clobber *only* on the left or right node, N), and
741  // S is our start. Say we walk to D, through the node opposite N
742  // (read: ignoring the clobber), and see a cache entry in the top
743  // node of D. That cache entry gets put into TerminatedPaths. We then
744  // walk up to C (N is later in our worklist), find the clobber, and
745  // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
746  // the bottom part of D to the cached clobber, ignoring the clobber
747  // in N. Again, this problem goes away if we start tracking all
748  // blockers for a given phi optimization.
749  TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
750  return {Result, {}};
751  }
752 
753  // If there's nothing left to search, then all paths led to valid clobbers
754  // that we got from our cache; pick the nearest to the start, and allow
755  // the rest to be cached back.
756  if (NewPaused.empty()) {
757  MoveDominatedPathToEnd(TerminatedPaths);
758  TerminatedPath Result = TerminatedPaths.pop_back_val();
759  return {Result, std::move(TerminatedPaths)};
760  }
761 
762  MemoryAccess *DefChainEnd = nullptr;
764  for (ListIndex Paused : NewPaused) {
765  UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
766  if (WR.IsKnownClobber)
767  Clobbers.push_back({WR.Result, Paused});
768  else
769  // Micro-opt: If we hit the end of the chain, save it.
770  DefChainEnd = WR.Result;
771  }
772 
773  if (!TerminatedPaths.empty()) {
774  // If we couldn't find the dominating phi/liveOnEntry in the above loop,
775  // do it now.
776  if (!DefChainEnd)
777  for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
778  DefChainEnd = MA;
779 
780  // If any of the terminated paths don't dominate the phi we'll try to
781  // optimize, we need to figure out what they are and quit.
782  const BasicBlock *ChainBB = DefChainEnd->getBlock();
783  for (const TerminatedPath &TP : TerminatedPaths) {
784  // Because we know that DefChainEnd is as "high" as we can go, we
785  // don't need local dominance checks; BB dominance is sufficient.
786  if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
787  Clobbers.push_back(TP);
788  }
789  }
790 
791  // If we have clobbers in the def chain, find the one closest to Current
792  // and quit.
793  if (!Clobbers.empty()) {
794  MoveDominatedPathToEnd(Clobbers);
795  TerminatedPath Result = Clobbers.pop_back_val();
796  return {Result, std::move(Clobbers)};
797  }
798 
799  assert(all_of(NewPaused,
800  [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
801 
802  // Because liveOnEntry is a clobber, this must be a phi.
803  auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
804 
805  PriorPathsSize = Paths.size();
806  PausedSearches.clear();
807  for (ListIndex I : NewPaused)
808  addSearches(DefChainPhi, PausedSearches, I);
809  NewPaused.clear();
810 
811  Current = DefChainPhi;
812  }
813  }
814 
815  void verifyOptResult(const OptznResult &R) const {
816  assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
817  return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
818  }));
819  }
820 
821  void resetPhiOptznState() {
822  Paths.clear();
823  VisitedPhis.clear();
824  }
825 
826 public:
827  ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
828  : MSSA(MSSA), AA(AA), DT(DT) {}
829 
830  void reset() {}
831 
832  /// Finds the nearest clobber for the given query, optimizing phis if
833  /// possible.
834  MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
835  Query = &Q;
836 
837  MemoryAccess *Current = Start;
838  // This walker pretends uses don't exist. If we're handed one, silently grab
839  // its def. (This has the nice side-effect of ensuring we never cache uses)
840  if (auto *MU = dyn_cast<MemoryUse>(Start))
841  Current = MU->getDefiningAccess();
842 
843  DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
844  // Fast path for the overly-common case (no crazy phi optimization
845  // necessary)
846  UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
847  MemoryAccess *Result;
848  if (WalkResult.IsKnownClobber) {
849  Result = WalkResult.Result;
850  } else {
851  OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
852  Current, Q.StartingLoc);
853  verifyOptResult(OptRes);
854  resetPhiOptznState();
855  Result = OptRes.PrimaryClobber.Clobber;
856  }
857 
858 #ifdef EXPENSIVE_CHECKS
859  checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
860 #endif
861  return Result;
862  }
863 
864  void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); }
865 };
866 
867 struct RenamePassData {
868  DomTreeNode *DTN;
870  MemoryAccess *IncomingVal;
871 
872  RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
873  MemoryAccess *M)
874  : DTN(D), ChildIt(It), IncomingVal(M) {}
875 
876  void swap(RenamePassData &RHS) {
877  std::swap(DTN, RHS.DTN);
878  std::swap(ChildIt, RHS.ChildIt);
879  std::swap(IncomingVal, RHS.IncomingVal);
880  }
881 };
882 
883 } // end anonymous namespace
884 
885 namespace llvm {
886 
887 /// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no
888 /// longer does caching on its own,
889 /// but the name has been retained for the moment.
891  ClobberWalker Walker;
892  bool AutoResetWalker = true;
893 
894  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
895  void verifyRemoved(MemoryAccess *);
896 
897 public:
899  ~CachingWalker() override = default;
900 
902 
903  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
904  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
905  const MemoryLocation &) override;
906  void invalidateInfo(MemoryAccess *) override;
907 
908  /// Whether we call resetClobberWalker() after each time we *actually* walk to
909  /// answer a clobber query.
910  void setAutoResetWalker(bool AutoReset) { AutoResetWalker = AutoReset; }
911 
912  /// Drop the walker's persistent data structures.
913  void resetClobberWalker() { Walker.reset(); }
914 
915  void verify(const MemorySSA *MSSA) override {
917  Walker.verify(MSSA);
918  }
919 };
920 
921 } // end namespace llvm
922 
923 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
924  bool RenameAllUses) {
925  // Pass through values to our successors
926  for (const BasicBlock *S : successors(BB)) {
927  auto It = PerBlockAccesses.find(S);
928  // Rename the phi nodes in our successor block
929  if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
930  continue;
931  AccessList *Accesses = It->second.get();
932  auto *Phi = cast<MemoryPhi>(&Accesses->front());
933  if (RenameAllUses) {
934  int PhiIndex = Phi->getBasicBlockIndex(BB);
935  assert(PhiIndex != -1 && "Incomplete phi during partial rename");
936  Phi->setIncomingValue(PhiIndex, IncomingVal);
937  } else
938  Phi->addIncoming(IncomingVal, BB);
939  }
940 }
941 
942 /// \brief Rename a single basic block into MemorySSA form.
943 /// Uses the standard SSA renaming algorithm.
944 /// \returns The new incoming value.
945 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
946  bool RenameAllUses) {
947  auto It = PerBlockAccesses.find(BB);
948  // Skip most processing if the list is empty.
949  if (It != PerBlockAccesses.end()) {
950  AccessList *Accesses = It->second.get();
951  for (MemoryAccess &L : *Accesses) {
952  if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
953  if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
954  MUD->setDefiningAccess(IncomingVal);
955  if (isa<MemoryDef>(&L))
956  IncomingVal = &L;
957  } else {
958  IncomingVal = &L;
959  }
960  }
961  }
962  return IncomingVal;
963 }
964 
965 /// \brief This is the standard SSA renaming algorithm.
966 ///
967 /// We walk the dominator tree in preorder, renaming accesses, and then filling
968 /// in phi nodes in our successors.
969 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
971  bool SkipVisited, bool RenameAllUses) {
973  // Skip everything if we already renamed this block and we are skipping.
974  // Note: You can't sink this into the if, because we need it to occur
975  // regardless of whether we skip blocks or not.
976  bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
977  if (SkipVisited && AlreadyVisited)
978  return;
979 
980  IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
981  renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
982  WorkStack.push_back({Root, Root->begin(), IncomingVal});
983 
984  while (!WorkStack.empty()) {
985  DomTreeNode *Node = WorkStack.back().DTN;
986  DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
987  IncomingVal = WorkStack.back().IncomingVal;
988 
989  if (ChildIt == Node->end()) {
990  WorkStack.pop_back();
991  } else {
992  DomTreeNode *Child = *ChildIt;
993  ++WorkStack.back().ChildIt;
994  BasicBlock *BB = Child->getBlock();
995  // Note: You can't sink this into the if, because we need it to occur
996  // regardless of whether we skip blocks or not.
997  AlreadyVisited = !Visited.insert(BB).second;
998  if (SkipVisited && AlreadyVisited) {
999  // We already visited this during our renaming, which can happen when
1000  // being asked to rename multiple blocks. Figure out the incoming val,
1001  // which is the last def.
1002  // Incoming value can only change if there is a block def, and in that
1003  // case, it's the last block def in the list.
1004  if (auto *BlockDefs = getWritableBlockDefs(BB))
1005  IncomingVal = &*BlockDefs->rbegin();
1006  } else
1007  IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1008  renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1009  WorkStack.push_back({Child, Child->begin(), IncomingVal});
1010  }
1011  }
1012 }
1013 
1014 /// \brief This handles unreachable block accesses by deleting phi nodes in
1015 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1016 /// being uses of the live on entry definition.
1017 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1018  assert(!DT->isReachableFromEntry(BB) &&
1019  "Reachable block found while handling unreachable blocks");
1020 
1021  // Make sure phi nodes in our reachable successors end up with a
1022  // LiveOnEntryDef for our incoming edge, even though our block is forward
1023  // unreachable. We could just disconnect these blocks from the CFG fully,
1024  // but we do not right now.
1025  for (const BasicBlock *S : successors(BB)) {
1026  if (!DT->isReachableFromEntry(S))
1027  continue;
1028  auto It = PerBlockAccesses.find(S);
1029  // Rename the phi nodes in our successor block
1030  if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1031  continue;
1032  AccessList *Accesses = It->second.get();
1033  auto *Phi = cast<MemoryPhi>(&Accesses->front());
1034  Phi->addIncoming(LiveOnEntryDef.get(), BB);
1035  }
1036 
1037  auto It = PerBlockAccesses.find(BB);
1038  if (It == PerBlockAccesses.end())
1039  return;
1040 
1041  auto &Accesses = It->second;
1042  for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1043  auto Next = std::next(AI);
1044  // If we have a phi, just remove it. We are going to replace all
1045  // users with live on entry.
1046  if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1047  UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1048  else
1049  Accesses->erase(AI);
1050  AI = Next;
1051  }
1052 }
1053 
1055  : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1056  NextID(INVALID_MEMORYACCESS_ID) {
1057  buildMemorySSA();
1058 }
1059 
1061  // Drop all our references
1062  for (const auto &Pair : PerBlockAccesses)
1063  for (MemoryAccess &MA : *Pair.second)
1064  MA.dropAllReferences();
1065 }
1066 
1067 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1068  auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1069 
1070  if (Res.second)
1071  Res.first->second = llvm::make_unique<AccessList>();
1072  return Res.first->second.get();
1073 }
1074 
1075 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1076  auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1077 
1078  if (Res.second)
1079  Res.first->second = llvm::make_unique<DefsList>();
1080  return Res.first->second.get();
1081 }
1082 
1083 namespace llvm {
1084 
1085 /// This class is a batch walker of all MemoryUse's in the program, and points
1086 /// their defining access at the thing that actually clobbers them. Because it
1087 /// is a batch walker that touches everything, it does not operate like the
1088 /// other walkers. This walker is basically performing a top-down SSA renaming
1089 /// pass, where the version stack is used as the cache. This enables it to be
1090 /// significantly more time and memory efficient than using the regular walker,
1091 /// which is walking bottom-up.
1093 public:
1095  DominatorTree *DT)
1096  : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1097  Walker = MSSA->getWalker();
1098  }
1099 
1100  void optimizeUses();
1101 
1102 private:
1103  /// This represents where a given memorylocation is in the stack.
1104  struct MemlocStackInfo {
1105  // This essentially is keeping track of versions of the stack. Whenever
1106  // the stack changes due to pushes or pops, these versions increase.
1107  unsigned long StackEpoch;
1108  unsigned long PopEpoch;
1109  // This is the lower bound of places on the stack to check. It is equal to
1110  // the place the last stack walk ended.
1111  // Note: Correctness depends on this being initialized to 0, which densemap
1112  // does
1113  unsigned long LowerBound;
1114  const BasicBlock *LowerBoundBlock;
1115  // This is where the last walk for this memory location ended.
1116  unsigned long LastKill;
1117  bool LastKillValid;
1118  };
1119 
1120  void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1123 
1124  MemorySSA *MSSA;
1125  MemorySSAWalker *Walker;
1126  AliasAnalysis *AA;
1127  DominatorTree *DT;
1128 };
1129 
1130 } // end namespace llvm
1131 
1132 /// Optimize the uses in a given block This is basically the SSA renaming
1133 /// algorithm, with one caveat: We are able to use a single stack for all
1134 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1135 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1136 /// going to be some position in that stack of possible ones.
1137 ///
1138 /// We track the stack positions that each MemoryLocation needs
1139 /// to check, and last ended at. This is because we only want to check the
1140 /// things that changed since last time. The same MemoryLocation should
1141 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1142 /// things like this, and if they start, we can modify MemoryLocOrCall to
1143 /// include relevant data)
1144 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1145  const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1146  SmallVectorImpl<MemoryAccess *> &VersionStack,
1148 
1149  /// If no accesses, nothing to do.
1150  MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1151  if (Accesses == nullptr)
1152  return;
1153 
1154  // Pop everything that doesn't dominate the current block off the stack,
1155  // increment the PopEpoch to account for this.
1156  while (true) {
1157  assert(
1158  !VersionStack.empty() &&
1159  "Version stack should have liveOnEntry sentinel dominating everything");
1160  BasicBlock *BackBlock = VersionStack.back()->getBlock();
1161  if (DT->dominates(BackBlock, BB))
1162  break;
1163  while (VersionStack.back()->getBlock() == BackBlock)
1164  VersionStack.pop_back();
1165  ++PopEpoch;
1166  }
1167 
1168  for (MemoryAccess &MA : *Accesses) {
1169  auto *MU = dyn_cast<MemoryUse>(&MA);
1170  if (!MU) {
1171  VersionStack.push_back(&MA);
1172  ++StackEpoch;
1173  continue;
1174  }
1175 
1176  if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1177  MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true);
1178  continue;
1179  }
1180 
1181  MemoryLocOrCall UseMLOC(MU);
1182  auto &LocInfo = LocStackInfo[UseMLOC];
1183  // If the pop epoch changed, it means we've removed stuff from top of
1184  // stack due to changing blocks. We may have to reset the lower bound or
1185  // last kill info.
1186  if (LocInfo.PopEpoch != PopEpoch) {
1187  LocInfo.PopEpoch = PopEpoch;
1188  LocInfo.StackEpoch = StackEpoch;
1189  // If the lower bound was in something that no longer dominates us, we
1190  // have to reset it.
1191  // We can't simply track stack size, because the stack may have had
1192  // pushes/pops in the meantime.
1193  // XXX: This is non-optimal, but only is slower cases with heavily
1194  // branching dominator trees. To get the optimal number of queries would
1195  // be to make lowerbound and lastkill a per-loc stack, and pop it until
1196  // the top of that stack dominates us. This does not seem worth it ATM.
1197  // A much cheaper optimization would be to always explore the deepest
1198  // branch of the dominator tree first. This will guarantee this resets on
1199  // the smallest set of blocks.
1200  if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1201  !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1202  // Reset the lower bound of things to check.
1203  // TODO: Some day we should be able to reset to last kill, rather than
1204  // 0.
1205  LocInfo.LowerBound = 0;
1206  LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1207  LocInfo.LastKillValid = false;
1208  }
1209  } else if (LocInfo.StackEpoch != StackEpoch) {
1210  // If all that has changed is the StackEpoch, we only have to check the
1211  // new things on the stack, because we've checked everything before. In
1212  // this case, the lower bound of things to check remains the same.
1213  LocInfo.PopEpoch = PopEpoch;
1214  LocInfo.StackEpoch = StackEpoch;
1215  }
1216  if (!LocInfo.LastKillValid) {
1217  LocInfo.LastKill = VersionStack.size() - 1;
1218  LocInfo.LastKillValid = true;
1219  }
1220 
1221  // At this point, we should have corrected last kill and LowerBound to be
1222  // in bounds.
1223  assert(LocInfo.LowerBound < VersionStack.size() &&
1224  "Lower bound out of range");
1225  assert(LocInfo.LastKill < VersionStack.size() &&
1226  "Last kill info out of range");
1227  // In any case, the new upper bound is the top of the stack.
1228  unsigned long UpperBound = VersionStack.size() - 1;
1229 
1230  if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1231  DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1232  << *(MU->getMemoryInst()) << ")"
1233  << " because there are " << UpperBound - LocInfo.LowerBound
1234  << " stores to disambiguate\n");
1235  // Because we did not walk, LastKill is no longer valid, as this may
1236  // have been a kill.
1237  LocInfo.LastKillValid = false;
1238  continue;
1239  }
1240  bool FoundClobberResult = false;
1241  while (UpperBound > LocInfo.LowerBound) {
1242  if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1243  // For phis, use the walker, see where we ended up, go there
1244  Instruction *UseInst = MU->getMemoryInst();
1245  MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1246  // We are guaranteed to find it or something is wrong
1247  while (VersionStack[UpperBound] != Result) {
1248  assert(UpperBound != 0);
1249  --UpperBound;
1250  }
1251  FoundClobberResult = true;
1252  break;
1253  }
1254 
1255  MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1256  // If the lifetime of the pointer ends at this instruction, it's live on
1257  // entry.
1258  if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1259  // Reset UpperBound to liveOnEntryDef's place in the stack
1260  UpperBound = 0;
1261  FoundClobberResult = true;
1262  break;
1263  }
1264  if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) {
1265  FoundClobberResult = true;
1266  break;
1267  }
1268  --UpperBound;
1269  }
1270  // At the end of this loop, UpperBound is either a clobber, or lower bound
1271  // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1272  if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1273  MU->setDefiningAccess(VersionStack[UpperBound], true);
1274  // We were last killed now by where we got to
1275  LocInfo.LastKill = UpperBound;
1276  } else {
1277  // Otherwise, we checked all the new ones, and now we know we can get to
1278  // LastKill.
1279  MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);
1280  }
1281  LocInfo.LowerBound = VersionStack.size() - 1;
1282  LocInfo.LowerBoundBlock = BB;
1283  }
1284 }
1285 
1286 /// Optimize uses to point to their actual clobbering definitions.
1288  SmallVector<MemoryAccess *, 16> VersionStack;
1290  VersionStack.push_back(MSSA->getLiveOnEntryDef());
1291 
1292  unsigned long StackEpoch = 1;
1293  unsigned long PopEpoch = 1;
1294  // We perform a non-recursive top-down dominator tree walk.
1295  for (const auto *DomNode : depth_first(DT->getRootNode()))
1296  optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1297  LocStackInfo);
1298 }
1299 
1300 void MemorySSA::placePHINodes(
1301  const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks,
1302  const DenseMap<const BasicBlock *, unsigned int> &BBNumbers) {
1303  // Determine where our MemoryPhi's should go
1304  ForwardIDFCalculator IDFs(*DT);
1305  IDFs.setDefiningBlocks(DefiningBlocks);
1307  IDFs.calculate(IDFBlocks);
1308 
1309  std::sort(IDFBlocks.begin(), IDFBlocks.end(),
1310  [&BBNumbers](const BasicBlock *A, const BasicBlock *B) {
1311  return BBNumbers.lookup(A) < BBNumbers.lookup(B);
1312  });
1313 
1314  // Now place MemoryPhi nodes.
1315  for (auto &BB : IDFBlocks)
1316  createMemoryPhi(BB);
1317 }
1318 
1319 void MemorySSA::buildMemorySSA() {
1320  // We create an access to represent "live on entry", for things like
1321  // arguments or users of globals, where the memory they use is defined before
1322  // the beginning of the function. We do not actually insert it into the IR.
1323  // We do not define a live on exit for the immediate uses, and thus our
1324  // semantics do *not* imply that something with no immediate uses can simply
1325  // be removed.
1326  BasicBlock &StartingPoint = F.getEntryBlock();
1327  LiveOnEntryDef =
1328  llvm::make_unique<MemoryDef>(F.getContext(), nullptr, nullptr,
1329  &StartingPoint, NextID++);
1331  unsigned NextBBNum = 0;
1332 
1333  // We maintain lists of memory accesses per-block, trading memory for time. We
1334  // could just look up the memory access for every possible instruction in the
1335  // stream.
1336  SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1337  // Go through each block, figure out where defs occur, and chain together all
1338  // the accesses.
1339  for (BasicBlock &B : F) {
1340  BBNumbers[&B] = NextBBNum++;
1341  bool InsertIntoDef = false;
1342  AccessList *Accesses = nullptr;
1343  DefsList *Defs = nullptr;
1344  for (Instruction &I : B) {
1345  MemoryUseOrDef *MUD = createNewAccess(&I);
1346  if (!MUD)
1347  continue;
1348 
1349  if (!Accesses)
1350  Accesses = getOrCreateAccessList(&B);
1351  Accesses->push_back(MUD);
1352  if (isa<MemoryDef>(MUD)) {
1353  InsertIntoDef = true;
1354  if (!Defs)
1355  Defs = getOrCreateDefsList(&B);
1356  Defs->push_back(*MUD);
1357  }
1358  }
1359  if (InsertIntoDef)
1360  DefiningBlocks.insert(&B);
1361  }
1362  placePHINodes(DefiningBlocks, BBNumbers);
1363 
1364  // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1365  // filled in with all blocks.
1367  renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1368 
1369  CachingWalker *Walker = getWalkerImpl();
1370 
1371  // We're doing a batch of updates; don't drop useful caches between them.
1372  Walker->setAutoResetWalker(false);
1373  OptimizeUses(this, Walker, AA, DT).optimizeUses();
1374  Walker->setAutoResetWalker(true);
1375  Walker->resetClobberWalker();
1376 
1377  // Mark the uses in unreachable blocks as live on entry, so that they go
1378  // somewhere.
1379  for (auto &BB : F)
1380  if (!Visited.count(&BB))
1381  markUnreachableAsLiveOnEntry(&BB);
1382 }
1383 
1384 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1385 
1386 MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1387  if (Walker)
1388  return Walker.get();
1389 
1390  Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
1391  return Walker.get();
1392 }
1393 
1394 // This is a helper function used by the creation routines. It places NewAccess
1395 // into the access and defs lists for a given basic block, at the given
1396 // insertion point.
1398  const BasicBlock *BB,
1399  InsertionPlace Point) {
1400  auto *Accesses = getOrCreateAccessList(BB);
1401  if (Point == Beginning) {
1402  // If it's a phi node, it goes first, otherwise, it goes after any phi
1403  // nodes.
1404  if (isa<MemoryPhi>(NewAccess)) {
1405  Accesses->push_front(NewAccess);
1406  auto *Defs = getOrCreateDefsList(BB);
1407  Defs->push_front(*NewAccess);
1408  } else {
1409  auto AI = find_if_not(
1410  *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1411  Accesses->insert(AI, NewAccess);
1412  if (!isa<MemoryUse>(NewAccess)) {
1413  auto *Defs = getOrCreateDefsList(BB);
1414  auto DI = find_if_not(
1415  *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1416  Defs->insert(DI, *NewAccess);
1417  }
1418  }
1419  } else {
1420  Accesses->push_back(NewAccess);
1421  if (!isa<MemoryUse>(NewAccess)) {
1422  auto *Defs = getOrCreateDefsList(BB);
1423  Defs->push_back(*NewAccess);
1424  }
1425  }
1426  BlockNumberingValid.erase(BB);
1427 }
1428 
1430  AccessList::iterator InsertPt) {
1431  auto *Accesses = getWritableBlockAccesses(BB);
1432  bool WasEnd = InsertPt == Accesses->end();
1433  Accesses->insert(AccessList::iterator(InsertPt), What);
1434  if (!isa<MemoryUse>(What)) {
1435  auto *Defs = getOrCreateDefsList(BB);
1436  // If we got asked to insert at the end, we have an easy job, just shove it
1437  // at the end. If we got asked to insert before an existing def, we also get
1438  // an terator. If we got asked to insert before a use, we have to hunt for
1439  // the next def.
1440  if (WasEnd) {
1441  Defs->push_back(*What);
1442  } else if (isa<MemoryDef>(InsertPt)) {
1443  Defs->insert(InsertPt->getDefsIterator(), *What);
1444  } else {
1445  while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1446  ++InsertPt;
1447  // Either we found a def, or we are inserting at the end
1448  if (InsertPt == Accesses->end())
1449  Defs->push_back(*What);
1450  else
1451  Defs->insert(InsertPt->getDefsIterator(), *What);
1452  }
1453  }
1454  BlockNumberingValid.erase(BB);
1455 }
1456 
1457 // Move What before Where in the IR. The end result is taht What will belong to
1458 // the right lists and have the right Block set, but will not otherwise be
1459 // correct. It will not have the right defining access, and if it is a def,
1460 // things below it will not properly be updated.
1462  AccessList::iterator Where) {
1463  // Keep it in the lookup tables, remove from the lists
1464  removeFromLists(What, false);
1465  What->setBlock(BB);
1466  insertIntoListsBefore(What, BB, Where);
1467 }
1468 
1470  InsertionPlace Point) {
1471  removeFromLists(What, false);
1472  What->setBlock(BB);
1473  insertIntoListsForBlock(What, BB, Point);
1474 }
1475 
1476 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1477  assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1478  MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1479  // Phi's always are placed at the front of the block.
1481  ValueToMemoryAccess[BB] = Phi;
1482  return Phi;
1483 }
1484 
1486  MemoryAccess *Definition) {
1487  assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1488  MemoryUseOrDef *NewAccess = createNewAccess(I);
1489  assert(
1490  NewAccess != nullptr &&
1491  "Tried to create a memory access for a non-memory touching instruction");
1492  NewAccess->setDefiningAccess(Definition);
1493  return NewAccess;
1494 }
1495 
1496 // Return true if the instruction has ordering constraints.
1497 // Note specifically that this only considers stores and loads
1498 // because others are still considered ModRef by getModRefInfo.
1499 static inline bool isOrdered(const Instruction *I) {
1500  if (auto *SI = dyn_cast<StoreInst>(I)) {
1501  if (!SI->isUnordered())
1502  return true;
1503  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1504  if (!LI->isUnordered())
1505  return true;
1506  }
1507  return false;
1508 }
1509 
1510 /// \brief Helper function to create new memory accesses
1511 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
1512  // The assume intrinsic has a control dependency which we model by claiming
1513  // that it writes arbitrarily. Ignore that fake memory dependency here.
1514  // FIXME: Replace this special casing with a more accurate modelling of
1515  // assume's control dependency.
1516  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1517  if (II->getIntrinsicID() == Intrinsic::assume)
1518  return nullptr;
1519 
1520  // Find out what affect this instruction has on memory.
1521  ModRefInfo ModRef = AA->getModRefInfo(I, None);
1522  // The isOrdered check is used to ensure that volatiles end up as defs
1523  // (atomics end up as ModRef right now anyway). Until we separate the
1524  // ordering chain from the memory chain, this enables people to see at least
1525  // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1526  // will still give an answer that bypasses other volatile loads. TODO:
1527  // Separate memory aliasing and ordering into two different chains so that we
1528  // can precisely represent both "what memory will this read/write/is clobbered
1529  // by" and "what instructions can I move this past".
1530  bool Def = bool(ModRef & MRI_Mod) || isOrdered(I);
1531  bool Use = bool(ModRef & MRI_Ref);
1532 
1533  // It's possible for an instruction to not modify memory at all. During
1534  // construction, we ignore them.
1535  if (!Def && !Use)
1536  return nullptr;
1537 
1538  assert((Def || Use) &&
1539  "Trying to create a memory access with a non-memory instruction");
1540 
1541  MemoryUseOrDef *MUD;
1542  if (Def)
1543  MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1544  else
1545  MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1546  ValueToMemoryAccess[I] = MUD;
1547  return MUD;
1548 }
1549 
1550 /// \brief Returns true if \p Replacer dominates \p Replacee .
1551 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1552  const MemoryAccess *Replacee) const {
1553  if (isa<MemoryUseOrDef>(Replacee))
1554  return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1555  const auto *MP = cast<MemoryPhi>(Replacee);
1556  // For a phi node, the use occurs in the predecessor block of the phi node.
1557  // Since we may occur multiple times in the phi node, we have to check each
1558  // operand to ensure Replacer dominates each operand where Replacee occurs.
1559  for (const Use &Arg : MP->operands()) {
1560  if (Arg.get() != Replacee &&
1561  !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1562  return false;
1563  }
1564  return true;
1565 }
1566 
1567 /// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
1569  assert(MA->use_empty() &&
1570  "Trying to remove memory access that still has uses");
1571  BlockNumbering.erase(MA);
1572  if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA))
1573  MUD->setDefiningAccess(nullptr);
1574  // Invalidate our walker's cache if necessary
1575  if (!isa<MemoryUse>(MA))
1576  Walker->invalidateInfo(MA);
1577  // The call below to erase will destroy MA, so we can't change the order we
1578  // are doing things here
1579  Value *MemoryInst;
1580  if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1581  MemoryInst = MUD->getMemoryInst();
1582  } else {
1583  MemoryInst = MA->getBlock();
1584  }
1585  auto VMA = ValueToMemoryAccess.find(MemoryInst);
1586  if (VMA->second == MA)
1587  ValueToMemoryAccess.erase(VMA);
1588 }
1589 
1590 /// \brief Properly remove \p MA from all of MemorySSA's lists.
1591 ///
1592 /// Because of the way the intrusive list and use lists work, it is important to
1593 /// do removal in the right order.
1594 /// ShouldDelete defaults to true, and will cause the memory access to also be
1595 /// deleted, not just removed.
1596 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1597  // The access list owns the reference, so we erase it from the non-owning list
1598  // first.
1599  if (!isa<MemoryUse>(MA)) {
1600  auto DefsIt = PerBlockDefs.find(MA->getBlock());
1601  std::unique_ptr<DefsList> &Defs = DefsIt->second;
1602  Defs->remove(*MA);
1603  if (Defs->empty())
1604  PerBlockDefs.erase(DefsIt);
1605  }
1606 
1607  // The erase call here will delete it. If we don't want it deleted, we call
1608  // remove instead.
1609  auto AccessIt = PerBlockAccesses.find(MA->getBlock());
1610  std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1611  if (ShouldDelete)
1612  Accesses->erase(MA);
1613  else
1614  Accesses->remove(MA);
1615 
1616  if (Accesses->empty())
1617  PerBlockAccesses.erase(AccessIt);
1618 }
1619 
1621  MemorySSAAnnotatedWriter Writer(this);
1622  F.print(OS, &Writer);
1623 }
1624 
1625 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1627 #endif
1628 
1630  verifyDefUses(F);
1632  verifyOrdering(F);
1633  Walker->verify(this);
1634 }
1635 
1636 /// \brief Verify that the order and existence of MemoryAccesses matches the
1637 /// order and existence of memory affecting instructions.
1639  // Walk all the blocks, comparing what the lookups think and what the access
1640  // lists think, as well as the order in the blocks vs the order in the access
1641  // lists.
1642  SmallVector<MemoryAccess *, 32> ActualAccesses;
1644  for (BasicBlock &B : F) {
1645  const AccessList *AL = getBlockAccesses(&B);
1646  const auto *DL = getBlockDefs(&B);
1647  MemoryAccess *Phi = getMemoryAccess(&B);
1648  if (Phi) {
1649  ActualAccesses.push_back(Phi);
1650  ActualDefs.push_back(Phi);
1651  }
1652 
1653  for (Instruction &I : B) {
1654  MemoryAccess *MA = getMemoryAccess(&I);
1655  assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1656  "We have memory affecting instructions "
1657  "in this block but they are not in the "
1658  "access list or defs list");
1659  if (MA) {
1660  ActualAccesses.push_back(MA);
1661  if (isa<MemoryDef>(MA))
1662  ActualDefs.push_back(MA);
1663  }
1664  }
1665  // Either we hit the assert, really have no accesses, or we have both
1666  // accesses and an access list.
1667  // Same with defs.
1668  if (!AL && !DL)
1669  continue;
1670  assert(AL->size() == ActualAccesses.size() &&
1671  "We don't have the same number of accesses in the block as on the "
1672  "access list");
1673  assert((DL || ActualDefs.size() == 0) &&
1674  "Either we should have a defs list, or we should have no defs");
1675  assert((!DL || DL->size() == ActualDefs.size()) &&
1676  "We don't have the same number of defs in the block as on the "
1677  "def list");
1678  auto ALI = AL->begin();
1679  auto AAI = ActualAccesses.begin();
1680  while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1681  assert(&*ALI == *AAI && "Not the same accesses in the same order");
1682  ++ALI;
1683  ++AAI;
1684  }
1685  ActualAccesses.clear();
1686  if (DL) {
1687  auto DLI = DL->begin();
1688  auto ADI = ActualDefs.begin();
1689  while (DLI != DL->end() && ADI != ActualDefs.end()) {
1690  assert(&*DLI == *ADI && "Not the same defs in the same order");
1691  ++DLI;
1692  ++ADI;
1693  }
1694  }
1695  ActualDefs.clear();
1696  }
1697 }
1698 
1699 /// \brief Verify the domination properties of MemorySSA by checking that each
1700 /// definition dominates all of its uses.
1702 #ifndef NDEBUG
1703  for (BasicBlock &B : F) {
1704  // Phi nodes are attached to basic blocks
1705  if (MemoryPhi *MP = getMemoryAccess(&B))
1706  for (const Use &U : MP->uses())
1707  assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
1708 
1709  for (Instruction &I : B) {
1710  MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1711  if (!MD)
1712  continue;
1713 
1714  for (const Use &U : MD->uses())
1715  assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
1716  }
1717  }
1718 #endif
1719 }
1720 
1721 /// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use
1722 /// appears in the use list of \p Def.
1723 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1724 #ifndef NDEBUG
1725  // The live on entry use may cause us to get a NULL def here
1726  if (!Def)
1727  assert(isLiveOnEntryDef(Use) &&
1728  "Null def but use not point to live on entry def");
1729  else
1730  assert(is_contained(Def->users(), Use) &&
1731  "Did not find use in def's use list");
1732 #endif
1733 }
1734 
1735 /// \brief Verify the immediate use information, by walking all the memory
1736 /// accesses and verifying that, for each use, it appears in the
1737 /// appropriate def's use list
1739  for (BasicBlock &B : F) {
1740  // Phi nodes are attached to basic blocks
1741  if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1742  assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1743  pred_begin(&B), pred_end(&B))) &&
1744  "Incomplete MemoryPhi Node");
1745  for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1746  verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1747  }
1748 
1749  for (Instruction &I : B) {
1750  if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1751  verifyUseInDefs(MA->getDefiningAccess(), MA);
1752  }
1753  }
1754  }
1755 }
1756 
1758  return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
1759 }
1760 
1762  return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
1763 }
1764 
1765 /// Perform a local numbering on blocks so that instruction ordering can be
1766 /// determined in constant time.
1767 /// TODO: We currently just number in order. If we numbered by N, we could
1768 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1769 /// log2(N) sequences of mixed before and after) without needing to invalidate
1770 /// the numbering.
1771 void MemorySSA::renumberBlock(const BasicBlock *B) const {
1772  // The pre-increment ensures the numbers really start at 1.
1773  unsigned long CurrentNumber = 0;
1774  const AccessList *AL = getBlockAccesses(B);
1775  assert(AL != nullptr && "Asking to renumber an empty block");
1776  for (const auto &I : *AL)
1777  BlockNumbering[&I] = ++CurrentNumber;
1778  BlockNumberingValid.insert(B);
1779 }
1780 
1781 /// \brief Determine, for two memory accesses in the same block,
1782 /// whether \p Dominator dominates \p Dominatee.
1783 /// \returns True if \p Dominator dominates \p Dominatee.
1785  const MemoryAccess *Dominatee) const {
1786  const BasicBlock *DominatorBlock = Dominator->getBlock();
1787 
1788  assert((DominatorBlock == Dominatee->getBlock()) &&
1789  "Asking for local domination when accesses are in different blocks!");
1790  // A node dominates itself.
1791  if (Dominatee == Dominator)
1792  return true;
1793 
1794  // When Dominatee is defined on function entry, it is not dominated by another
1795  // memory access.
1796  if (isLiveOnEntryDef(Dominatee))
1797  return false;
1798 
1799  // When Dominator is defined on function entry, it dominates the other memory
1800  // access.
1801  if (isLiveOnEntryDef(Dominator))
1802  return true;
1803 
1804  if (!BlockNumberingValid.count(DominatorBlock))
1805  renumberBlock(DominatorBlock);
1806 
1807  unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
1808  // All numbers start with 1
1809  assert(DominatorNum != 0 && "Block was not numbered properly");
1810  unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
1811  assert(DominateeNum != 0 && "Block was not numbered properly");
1812  return DominatorNum < DominateeNum;
1813 }
1814 
1815 bool MemorySSA::dominates(const MemoryAccess *Dominator,
1816  const MemoryAccess *Dominatee) const {
1817  if (Dominator == Dominatee)
1818  return true;
1819 
1820  if (isLiveOnEntryDef(Dominatee))
1821  return false;
1822 
1823  if (Dominator->getBlock() != Dominatee->getBlock())
1824  return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
1825  return locallyDominates(Dominator, Dominatee);
1826 }
1827 
1828 bool MemorySSA::dominates(const MemoryAccess *Dominator,
1829  const Use &Dominatee) const {
1830  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
1831  BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
1832  // The def must dominate the incoming block of the phi.
1833  if (UseBB != Dominator->getBlock())
1834  return DT->dominates(Dominator->getBlock(), UseBB);
1835  // If the UseBB and the DefBB are the same, compare locally.
1836  return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
1837  }
1838  // If it's not a PHI node use, the normal dominates can already handle it.
1839  return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
1840 }
1841 
1842 const static char LiveOnEntryStr[] = "liveOnEntry";
1843 
1845  switch (getValueID()) {
1846  case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
1847  case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
1848  case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
1849  }
1850  llvm_unreachable("invalid value id");
1851 }
1852 
1854  MemoryAccess *UO = getDefiningAccess();
1855 
1856  OS << getID() << " = MemoryDef(";
1857  if (UO && UO->getID())
1858  OS << UO->getID();
1859  else
1860  OS << LiveOnEntryStr;
1861  OS << ')';
1862 }
1863 
1865  bool First = true;
1866  OS << getID() << " = MemoryPhi(";
1867  for (const auto &Op : operands()) {
1868  BasicBlock *BB = getIncomingBlock(Op);
1869  MemoryAccess *MA = cast<MemoryAccess>(Op);
1870  if (!First)
1871  OS << ',';
1872  else
1873  First = false;
1874 
1875  OS << '{';
1876  if (BB->hasName())
1877  OS << BB->getName();
1878  else
1879  BB->printAsOperand(OS, false);
1880  OS << ',';
1881  if (unsigned ID = MA->getID())
1882  OS << ID;
1883  else
1884  OS << LiveOnEntryStr;
1885  OS << '}';
1886  }
1887  OS << ')';
1888 }
1889 
1891  MemoryAccess *UO = getDefiningAccess();
1892  OS << "MemoryUse(";
1893  if (UO && UO->getID())
1894  OS << UO->getID();
1895  else
1896  OS << LiveOnEntryStr;
1897  OS << ')';
1898 }
1899 
1900 void MemoryAccess::dump() const {
1901 // Cannot completely remove virtual function even in release mode.
1902 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1903  print(dbgs());
1904  dbgs() << "\n";
1905 #endif
1906 }
1907 
1909 
1912 }
1913 
1915  AU.setPreservesAll();
1917 }
1918 
1920  auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
1921  MSSA.print(dbgs());
1922  if (VerifyMemorySSA)
1923  MSSA.verifyMemorySSA();
1924  return false;
1925 }
1926 
1927 AnalysisKey MemorySSAAnalysis::Key;
1928 
1931  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1932  auto &AA = AM.getResult<AAManager>(F);
1933  return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
1934 }
1935 
1938  OS << "MemorySSA for function: " << F.getName() << "\n";
1939  AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
1940 
1941  return PreservedAnalyses::all();
1942 }
1943 
1946  AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
1947 
1948  return PreservedAnalyses::all();
1949 }
1950 
1951 char MemorySSAWrapperPass::ID = 0;
1952 
1955 }
1956 
1957 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
1958 
1960  AU.setPreservesAll();
1963 }
1964 
1966  auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1967  auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1968  MSSA.reset(new MemorySSA(F, &AA, &DT));
1969  return false;
1970 }
1971 
1972 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
1973 
1975  MSSA->print(OS);
1976 }
1977 
1979 
1981  DominatorTree *D)
1982  : MemorySSAWalker(M), Walker(*M, *A, *D) {}
1983 
1985  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1986  MUD->resetOptimized();
1987 }
1988 
1989 /// \brief Walk the use-def chains starting at \p MA and find
1990 /// the MemoryAccess that actually clobbers Loc.
1991 ///
1992 /// \returns our clobbering memory access
1993 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
1994  MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
1995  MemoryAccess *New = Walker.findClobber(StartingAccess, Q);
1996 #ifdef EXPENSIVE_CHECKS
1997  MemoryAccess *NewNoCache = Walker.findClobber(StartingAccess, Q);
1998  assert(NewNoCache == New && "Cache made us hand back a different result?");
1999  (void)NewNoCache;
2000 #endif
2001  if (AutoResetWalker)
2003  return New;
2004 }
2005 
2006 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
2007  MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
2008  if (isa<MemoryPhi>(StartingAccess))
2009  return StartingAccess;
2010 
2011  auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2012  if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2013  return StartingUseOrDef;
2014 
2015  Instruction *I = StartingUseOrDef->getMemoryInst();
2016 
2017  // Conservatively, fences are always clobbers, so don't perform the walk if we
2018  // hit a fence.
2019  if (!ImmutableCallSite(I) && I->isFenceLike())
2020  return StartingUseOrDef;
2021 
2022  UpwardsMemoryQuery Q;
2023  Q.OriginalAccess = StartingUseOrDef;
2024  Q.StartingLoc = Loc;
2025  Q.Inst = I;
2026  Q.IsCall = false;
2027 
2028  // Unlike the other function, do not walk to the def of a def, because we are
2029  // handed something we already believe is the clobbering access.
2030  MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2031  ? StartingUseOrDef->getDefiningAccess()
2032  : StartingUseOrDef;
2033 
2034  MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
2035  DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2036  DEBUG(dbgs() << *StartingUseOrDef << "\n");
2037  DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2038  DEBUG(dbgs() << *Clobber << "\n");
2039  return Clobber;
2040 }
2041 
2042 MemoryAccess *
2043 MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2044  auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2045  // If this is a MemoryPhi, we can't do anything.
2046  if (!StartingAccess)
2047  return MA;
2048 
2049  // If this is an already optimized use or def, return the optimized result.
2050  // Note: Currently, we do not store the optimized def result because we'd need
2051  // a separate field, since we can't use it as the defining access.
2052  if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2053  if (MUD->isOptimized())
2054  return MUD->getOptimized();
2055 
2056  const Instruction *I = StartingAccess->getMemoryInst();
2057  UpwardsMemoryQuery Q(I, StartingAccess);
2058  // We can't sanely do anything with a fences, they conservatively
2059  // clobber all memory, and have no locations to get pointers from to
2060  // try to disambiguate.
2061  if (!Q.IsCall && I->isFenceLike())
2062  return StartingAccess;
2063 
2065  MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2066  if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2067  MUD->setOptimized(LiveOnEntry);
2068  return LiveOnEntry;
2069  }
2070 
2071  // Start with the thing we already think clobbers this location
2072  MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2073 
2074  // At this point, DefiningAccess may be the live on entry def.
2075  // If it is, we will not get a better result.
2076  if (MSSA->isLiveOnEntryDef(DefiningAccess))
2077  return DefiningAccess;
2078 
2079  MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
2080  DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2081  DEBUG(dbgs() << *DefiningAccess << "\n");
2082  DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2083  DEBUG(dbgs() << *Result << "\n");
2084  if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
2085  MUD->setOptimized(Result);
2086 
2087  return Result;
2088 }
2089 
2090 MemoryAccess *
2092  if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2093  return Use->getDefiningAccess();
2094  return MA;
2095 }
2096 
2098  MemoryAccess *StartingAccess, const MemoryLocation &) {
2099  if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2100  return Use->getDefiningAccess();
2101  return StartingAccess;
2102 }
2103 
2104 void MemoryPhi::deleteMe(DerivedUser *Self) {
2105  delete static_cast<MemoryPhi *>(Self);
2106 }
2107 
2108 void MemoryDef::deleteMe(DerivedUser *Self) {
2109  delete static_cast<MemoryDef *>(Self);
2110 }
2111 
2112 void MemoryUse::deleteMe(DerivedUser *Self) {
2113  delete static_cast<MemoryUse *>(Self);
2114 }
MemorySSAWalker * getWalker()
Definition: MemorySSA.cpp:1384
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Definition: MemorySSA.cpp:1965
DomTreeNodeBase< NodeT > * getNode(NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
MemorySSA * MSSA
Definition: MemorySSA.h:946
void initializeMemorySSAWrapperPassPass(PassRegistry &)
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:699
typename std::vector< DomTreeNodeBase *>::const_iterator const_iterator
Safe Stack instrumentation pass
Definition: SafeStack.cpp:846
iterator_range< use_iterator > uses()
Definition: Value.h:356
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void verify(const MemorySSA *MSSA)
Definition: MemorySSA.h:941
void dropAllReferences()
Drop all references to operands.
Definition: User.h:279
Atomic ordering constants.
bool isFenceLike() const
Return true if this instruction behaves like a memory fence: it can load or store to memory location ...
Definition: Instruction.h:514
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:687
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:1864
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:449
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:1815
static Reorderability getLoadReorderability(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
Definition: MemorySSA.cpp:205
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
void push_back(reference Node)
Insert a node at the back; never copies.
Definition: simple_ilist.h:148
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
This class provides various memory handling functions that manipulate MemoryBlock instances...
Definition: Memory.h:46
Implements a dense probed hash-table based set.
Definition: DenseSet.h:221
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *)
Definition: MemorySSA.cpp:1485
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess&#39;s for a given basic block.
Definition: MemorySSA.h:656
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias. ...
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: MemorySSA.cpp:1914
Extension point for the Value hierarchy.
Definition: DerivedUser.h:28
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:350
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:233
static const char LiveOnEntryStr[]
Definition: MemorySSA.cpp:1842
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:728
unsigned second
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: MemorySSA.cpp:1959
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:813
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:238
F(f)
OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA, DominatorTree *DT)
Definition: MemorySSA.cpp:1094
An instruction for reading from memory.
Definition: Instructions.h:164
memoryssa
Definition: MemorySSA.cpp:65
This defines the Use class.
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:1853
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
Definition: MemorySSA.cpp:1054
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:33
The access modifies the value stored in memory.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, true) INITIALIZE_PASS_END(MemorySSAWrapperPass
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
Represents read-only accesses to memory.
Definition: MemorySSA.h:295
This class is a batch walker of all MemoryUse&#39;s in the program, and points their defining access at t...
Definition: MemorySSA.cpp:1092
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:850
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:217
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
static Value * getPointerOperand(Instruction &Inst)
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
Definition: MemorySSA.h:718
A MemorySSAWalker that does AA walks to disambiguate accesses.
Definition: MemorySSA.cpp:890
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
Definition: MemorySSA.cpp:297
MemorySSAAnnotatedWriter(const MemorySSA *M)
Definition: MemorySSA.cpp:93
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:612
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef&#39;s and MemoryPhi&#39;s for a given basic block.
Definition: MemorySSA.h:664
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition: MemorySSA.cpp:1397
static void LLVM_ATTRIBUTE_UNUSED checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, AliasAnalysis &AA)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
Definition: MemorySSA.cpp:365
The access references the value stored in memory.
static bool isOrdered(const Instruction *I)
Definition: MemorySSA.cpp:1499
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:736
APInt operator*(APInt a, uint64_t RHS)
Definition: APInt.h:2070
void verifyDefUses(Function &F) const
Verify the immediate use information, by walking all the memory accesses and verifying that...
Definition: MemorySSA.cpp:1738
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
Definition: MemorySSA.cpp:1972
A simple intrusive list implementation.
Definition: simple_ilist.h:79
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:41
#define F(x, y, z)
Definition: MD5.cpp:55
ppc ctr loops PowerPC CTR Loops Verify
static int getID(struct InternalInstruction *insn, const void *miiArg)
upward_defs_iterator upward_defs_end()
Definition: MemorySSA.h:1135
early cse memssa
Definition: EarlyCSE.cpp:1154
Memory SSA
Definition: MemorySSA.cpp:65
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:194
void emitBasicBlockStartAnnot(const BasicBlock *BB, formatted_raw_ostream &OS) override
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
Definition: MemorySSA.cpp:95
void dump() const
Definition: MemorySSA.cpp:1626
This is the generic walker interface for walkers of MemorySSA.
Definition: MemorySSA.h:881
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:68
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:140
static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, AliasAnalysis &AA)
Definition: MemorySSA.cpp:326
bool isAtLeastOrStrongerThan(AtomicOrdering ao, AtomicOrdering other)
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
An assembly annotator class to print Memory SSA information in comments.
Definition: MemorySSA.cpp:87
void setAutoResetWalker(bool AutoReset)
Whether we call resetClobberWalker() after each time we actually walk to answer a clobber query...
Definition: MemorySSA.cpp:910
The access neither references nor modifies the value stored in memory.
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA&#39;s lookup tables.
Definition: MemorySSA.cpp:1568
NodeT * getBlock() const
#define P(N)
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
bool hasName() const
Definition: Value.h:251
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
A manager for alias analyses.
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
Definition: MemorySSA.cpp:190
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
early cse Early CSE w MemorySSA
Definition: EarlyCSE.cpp:1154
void dump() const
Definition: MemorySSA.cpp:1900
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:686
Represent the analysis usage information of a pass.
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:1844
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:144
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
void verifyDomination(Function &F) const
Verify the domination properties of MemorySSA by checking that each definition dominates all of its u...
Definition: MemorySSA.cpp:1701
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
Memory true print Memory SSA Printer
Definition: MemorySSA.cpp:71
static bool instructionClobbersQuery(MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysis &AA)
Definition: MemorySSA.cpp:237
void invalidateInfo(MemoryAccess *) override
Given a memory access, invalidate anything this walker knows about that access.
Definition: MemorySSA.cpp:1984
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:841
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:1944
auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Definition: STLExtras.h:846
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:159
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:403
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:3573
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool verify(const TargetRegisterInfo &TRI) const
Check that information hold by this instance make sense for the given TRI.
Memory true print Memory SSA static false cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
MemoryUseOrDef * getMemoryAccess(const Instruction *) const
Given a memory Mod/Ref&#39;ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.cpp:1757
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
Definition: MemorySSA.cpp:1287
Representation for a specific memory location.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void verifyOrdering(Function &F) const
Verify that the order and existence of MemoryAccesses matches the order and existence of memory affec...
Definition: MemorySSA.cpp:1638
unsigned getNumOperands() const
Definition: User.h:176
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
void verifyMemorySSA() const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1629
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition: MemorySSA.cpp:1784
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:239
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Definition: MemorySSA.cpp:1919
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:814
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:385
BasicBlock * getBlock() const
Definition: MemorySSA.h:156
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
void verify(const MemorySSA *MSSA) override
Definition: MemorySSA.cpp:915
#define MAP(n)
void emitInstructionAnnot(const Instruction *I, formatted_raw_ostream &OS) override
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
Definition: MemorySSA.cpp:101
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:640
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:1890
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
A range adaptor for a pair of iterators.
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA&#39;s lists.
Definition: MemorySSA.cpp:1596
Target - Wrapper for Target specific information.
void push_back(pointer val)
Definition: ilist.h:326
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:602
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:236
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc)
getModRefInfo (for call sites) - Return information about whether a particular call site modifies or ...
void setPreservesAll()
Set by analyses that do not transform their input at all.
iterator_range< user_iterator > users()
Definition: Value.h:401
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are must-alias.
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:243
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:398
void resetClobberWalker()
Drop the walker&#39;s persistent data structures.
Definition: MemorySSA.cpp:913
amdgpu Simplify well known AMD library false Value Value * Arg
void initializeMemorySSAPrinterLegacyPassPass(PassRegistry &)
Basic Alias true
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:120
MemorySSAWalker(MemorySSA *)
Definition: MemorySSA.cpp:1978
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
Definition: MemorySSA.h:1186
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:656
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
Definition: MemorySSA.cpp:1957
This file provides utility analysis objects describing memory locations.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:220
Establish a view to a call site for examination.
Definition: CallSite.h:713
CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *)
Definition: MemorySSA.cpp:1980
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
Definition: MemorySSA.h:273
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
Definition: MemorySSA.cpp:1461
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
Definition: MemorySSA.cpp:2091
static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read, bool &Write, bool &Effects, bool &StackPointer)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:636
AnalysisUsage & addRequiredTransitive()
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:181
iterator_range< df_iterator< T > > depth_first(const T &G)
MemoryAccess * getClobberingMemoryAccess(const Instruction *I)
Given a memory Mod/Ref/ModRef&#39;ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:910
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:1936
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static cl::opt< bool > VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden, cl::desc("Verify MemorySSA in legacy printer pass."))
Result run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:1929
LLVM Value Representation.
Definition: Value.h:73
static MemoryLocOrCall getTombstoneKey()
Definition: MemorySSA.cpp:177
succ_range successors(BasicBlock *BB)
Definition: CFG.h:143
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair)
Definition: MemorySSA.h:1131
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
Definition: MemorySSA.h:573
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, const Instruction *I)
Definition: MemorySSA.cpp:340
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:44
#define DEBUG(X)
Definition: Debug.h:118
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
Definition: MemorySSA.cpp:181
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
hexagon cext opt
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:267
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Definition: MemorySSA.h:209
void sort(Policy policy, RandomAccessIterator Start, RandomAccessIterator End, const Comparator &Comp=Comparator())
Definition: Parallel.h:199
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:1946
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:431
void print(raw_ostream &) const
Definition: MemorySSA.cpp:1620
This header defines various interfaces for pass management in LLVM.
Reorderability
Definition: MemorySSA.cpp:195
static MemoryLocOrCall getEmptyKey()
Definition: MemorySSA.cpp:173
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
Definition: MemorySSA.cpp:1429
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:70
bool use_empty() const
Definition: Value.h:328
hexagon widen stores
reverse_iterator rbegin()
Definition: simple_ilist.h:122
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
Definition: MemorySSA.h:962
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
const BasicBlock * getParent() const
Definition: Instruction.h:66
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
Definition: MemorySSA.cpp:1974
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:867