LLVM  9.0.0svn
MemorySSA.cpp
Go to the documentation of this file.
1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/iterator.h"
29 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/IR/Use.h"
41 #include "llvm/Pass.h"
43 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/Compiler.h"
46 #include "llvm/Support/Debug.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <iterator>
53 #include <memory>
54 #include <utility>
55 
56 using namespace llvm;
57 
58 #define DEBUG_TYPE "memoryssa"
59 
60 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
61  true)
65  true)
66 
68  "Memory SSA Printer", false, false)
69 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
70 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
71  "Memory SSA Printer", false, false)
72 
73 static cl::opt<unsigned> MaxCheckLimit(
74  "memssa-check-limit", cl::Hidden, cl::init(100),
75  cl::desc("The maximum number of stores/phis MemorySSA"
76  "will consider trying to walk past (default = 100)"));
77 
78 // Always verify MemorySSA if expensive checking is enabled.
79 #ifdef EXPENSIVE_CHECKS
80 bool llvm::VerifyMemorySSA = true;
81 #else
82 bool llvm::VerifyMemorySSA = false;
83 #endif
85  VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
86  cl::Hidden, cl::desc("Enable verification of MemorySSA."));
87 
88 namespace llvm {
89 
90 /// An assembly annotator class to print Memory SSA information in
91 /// comments.
93  friend class MemorySSA;
94 
95  const MemorySSA *MSSA;
96 
97 public:
98  MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
99 
101  formatted_raw_ostream &OS) override {
102  if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
103  OS << "; " << *MA << "\n";
104  }
105 
107  formatted_raw_ostream &OS) override {
108  if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
109  OS << "; " << *MA << "\n";
110  }
111 };
112 
113 } // end namespace llvm
114 
115 namespace {
116 
117 /// Our current alias analysis API differentiates heavily between calls and
118 /// non-calls, and functions called on one usually assert on the other.
119 /// This class encapsulates the distinction to simplify other code that wants
120 /// "Memory affecting instructions and related data" to use as a key.
121 /// For example, this class is used as a densemap key in the use optimizer.
122 class MemoryLocOrCall {
123 public:
124  bool IsCall = false;
125 
126  MemoryLocOrCall(MemoryUseOrDef *MUD)
127  : MemoryLocOrCall(MUD->getMemoryInst()) {}
128  MemoryLocOrCall(const MemoryUseOrDef *MUD)
129  : MemoryLocOrCall(MUD->getMemoryInst()) {}
130 
131  MemoryLocOrCall(Instruction *Inst) {
132  if (auto *C = dyn_cast<CallBase>(Inst)) {
133  IsCall = true;
134  Call = C;
135  } else {
136  IsCall = false;
137  // There is no such thing as a memorylocation for a fence inst, and it is
138  // unique in that regard.
139  if (!isa<FenceInst>(Inst))
140  Loc = MemoryLocation::get(Inst);
141  }
142  }
143 
144  explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
145 
146  const CallBase *getCall() const {
147  assert(IsCall);
148  return Call;
149  }
150 
151  MemoryLocation getLoc() const {
152  assert(!IsCall);
153  return Loc;
154  }
155 
156  bool operator==(const MemoryLocOrCall &Other) const {
157  if (IsCall != Other.IsCall)
158  return false;
159 
160  if (!IsCall)
161  return Loc == Other.Loc;
162 
163  if (Call->getCalledValue() != Other.Call->getCalledValue())
164  return false;
165 
166  return Call->arg_size() == Other.Call->arg_size() &&
167  std::equal(Call->arg_begin(), Call->arg_end(),
168  Other.Call->arg_begin());
169  }
170 
171 private:
172  union {
173  const CallBase *Call;
174  MemoryLocation Loc;
175  };
176 };
177 
178 } // end anonymous namespace
179 
180 namespace llvm {
181 
182 template <> struct DenseMapInfo<MemoryLocOrCall> {
183  static inline MemoryLocOrCall getEmptyKey() {
184  return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
185  }
186 
187  static inline MemoryLocOrCall getTombstoneKey() {
188  return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
189  }
190 
191  static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
192  if (!MLOC.IsCall)
193  return hash_combine(
194  MLOC.IsCall,
196 
197  hash_code hash =
199  MLOC.getCall()->getCalledValue()));
200 
201  for (const Value *Arg : MLOC.getCall()->args())
203  return hash;
204  }
205 
206  static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
207  return LHS == RHS;
208  }
209 };
210 
211 } // end namespace llvm
212 
213 /// This does one-way checks to see if Use could theoretically be hoisted above
214 /// MayClobber. This will not check the other way around.
215 ///
216 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
217 /// MayClobber, with no potentially clobbering operations in between them.
218 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
219 static bool areLoadsReorderable(const LoadInst *Use,
220  const LoadInst *MayClobber) {
221  bool VolatileUse = Use->isVolatile();
222  bool VolatileClobber = MayClobber->isVolatile();
223  // Volatile operations may never be reordered with other volatile operations.
224  if (VolatileUse && VolatileClobber)
225  return false;
226  // Otherwise, volatile doesn't matter here. From the language reference:
227  // 'optimizers may change the order of volatile operations relative to
228  // non-volatile operations.'"
229 
230  // If a load is seq_cst, it cannot be moved above other loads. If its ordering
231  // is weaker, it can be moved above other loads. We just need to be sure that
232  // MayClobber isn't an acquire load, because loads can't be moved above
233  // acquire loads.
234  //
235  // Note that this explicitly *does* allow the free reordering of monotonic (or
236  // weaker) loads of the same address.
237  bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
238  bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
240  return !(SeqCstUse || MayClobberIsAcquire);
241 }
242 
243 namespace {
244 
245 struct ClobberAlias {
246  bool IsClobber;
248 };
249 
250 } // end anonymous namespace
251 
252 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
253 // ignored if IsClobber = false.
254 static ClobberAlias instructionClobbersQuery(const MemoryDef *MD,
255  const MemoryLocation &UseLoc,
256  const Instruction *UseInst,
257  AliasAnalysis &AA) {
258  Instruction *DefInst = MD->getMemoryInst();
259  assert(DefInst && "Defining instruction not actually an instruction");
260  const auto *UseCall = dyn_cast<CallBase>(UseInst);
262 
263  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
264  // These intrinsics will show up as affecting memory, but they are just
265  // markers, mostly.
266  //
267  // FIXME: We probably don't actually want MemorySSA to model these at all
268  // (including creating MemoryAccesses for them): we just end up inventing
269  // clobbers where they don't really exist at all. Please see D43269 for
270  // context.
271  switch (II->getIntrinsicID()) {
272  case Intrinsic::lifetime_start:
273  if (UseCall)
274  return {false, NoAlias};
275  AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
276  return {AR != NoAlias, AR};
277  case Intrinsic::lifetime_end:
278  case Intrinsic::invariant_start:
279  case Intrinsic::invariant_end:
280  case Intrinsic::assume:
281  return {false, NoAlias};
282  default:
283  break;
284  }
285  }
286 
287  if (UseCall) {
288  ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
289  AR = isMustSet(I) ? MustAlias : MayAlias;
290  return {isModOrRefSet(I), AR};
291  }
292 
293  if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
294  if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
295  return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
296 
297  ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
298  AR = isMustSet(I) ? MustAlias : MayAlias;
299  return {isModSet(I), AR};
300 }
301 
302 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
303  const MemoryUseOrDef *MU,
304  const MemoryLocOrCall &UseMLOC,
305  AliasAnalysis &AA) {
306  // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
307  // to exist while MemoryLocOrCall is pushed through places.
308  if (UseMLOC.IsCall)
310  AA);
311  return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
312  AA);
313 }
314 
315 // Return true when MD may alias MU, return false otherwise.
317  AliasAnalysis &AA) {
318  return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
319 }
320 
321 namespace {
322 
323 struct UpwardsMemoryQuery {
324  // True if our original query started off as a call
325  bool IsCall = false;
326  // The pointer location we started the query with. This will be empty if
327  // IsCall is true.
328  MemoryLocation StartingLoc;
329  // This is the instruction we were querying about.
330  const Instruction *Inst = nullptr;
331  // The MemoryAccess we actually got called with, used to test local domination
332  const MemoryAccess *OriginalAccess = nullptr;
334  bool SkipSelfAccess = false;
335 
336  UpwardsMemoryQuery() = default;
337 
338  UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
339  : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
340  if (!IsCall)
341  StartingLoc = MemoryLocation::get(Inst);
342  }
343 };
344 
345 } // end anonymous namespace
346 
347 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
348  AliasAnalysis &AA) {
349  Instruction *Inst = MD->getMemoryInst();
350  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
351  switch (II->getIntrinsicID()) {
352  case Intrinsic::lifetime_end:
353  return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
354  default:
355  return false;
356  }
357  }
358  return false;
359 }
360 
362  const Instruction *I) {
363  // If the memory can't be changed, then loads of the memory can't be
364  // clobbered.
365  return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
366  AA.pointsToConstantMemory(cast<LoadInst>(I)->
367  getPointerOperand()));
368 }
369 
370 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
371 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
372 ///
373 /// This is meant to be as simple and self-contained as possible. Because it
374 /// uses no cache, etc., it can be relatively expensive.
375 ///
376 /// \param Start The MemoryAccess that we want to walk from.
377 /// \param ClobberAt A clobber for Start.
378 /// \param StartLoc The MemoryLocation for Start.
379 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
380 /// \param Query The UpwardsMemoryQuery we used for our search.
381 /// \param AA The AliasAnalysis we used for our search.
382 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
383 LLVM_ATTRIBUTE_UNUSED static void
385  const MemoryLocation &StartLoc, const MemorySSA &MSSA,
386  const UpwardsMemoryQuery &Query, AliasAnalysis &AA,
387  bool AllowImpreciseClobber = false) {
388  assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
389 
390  if (MSSA.isLiveOnEntryDef(Start)) {
391  assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
392  "liveOnEntry must clobber itself");
393  return;
394  }
395 
396  bool FoundClobber = false;
399  Worklist.emplace_back(Start, StartLoc);
400  // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
401  // is found, complain.
402  while (!Worklist.empty()) {
403  auto MAP = Worklist.pop_back_val();
404  // All we care about is that nothing from Start to ClobberAt clobbers Start.
405  // We learn nothing from revisiting nodes.
406  if (!VisitedPhis.insert(MAP).second)
407  continue;
408 
409  for (const auto *MA : def_chain(MAP.first)) {
410  if (MA == ClobberAt) {
411  if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
412  // instructionClobbersQuery isn't essentially free, so don't use `|=`,
413  // since it won't let us short-circuit.
414  //
415  // Also, note that this can't be hoisted out of the `Worklist` loop,
416  // since MD may only act as a clobber for 1 of N MemoryLocations.
417  FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
418  if (!FoundClobber) {
419  ClobberAlias CA =
420  instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
421  if (CA.IsClobber) {
422  FoundClobber = true;
423  // Not used: CA.AR;
424  }
425  }
426  }
427  break;
428  }
429 
430  // We should never hit liveOnEntry, unless it's the clobber.
431  assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
432 
433  if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
434  // If Start is a Def, skip self.
435  if (MD == Start)
436  continue;
437 
438  assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
439  .IsClobber &&
440  "Found clobber before reaching ClobberAt!");
441  continue;
442  }
443 
444  if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
445  (void)MU;
446  assert (MU == Start &&
447  "Can only find use in def chain if Start is a use");
448  continue;
449  }
450 
451  assert(isa<MemoryPhi>(MA));
452  Worklist.append(
453  upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
454  upward_defs_end());
455  }
456  }
457 
458  // If the verify is done following an optimization, it's possible that
459  // ClobberAt was a conservative clobbering, that we can now infer is not a
460  // true clobbering access. Don't fail the verify if that's the case.
461  // We do have accesses that claim they're optimized, but could be optimized
462  // further. Updating all these can be expensive, so allow it for now (FIXME).
463  if (AllowImpreciseClobber)
464  return;
465 
466  // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
467  // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
468  assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
469  "ClobberAt never acted as a clobber");
470 }
471 
472 namespace {
473 
474 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
475 /// in one class.
476 class ClobberWalker {
477  /// Save a few bytes by using unsigned instead of size_t.
478  using ListIndex = unsigned;
479 
480  /// Represents a span of contiguous MemoryDefs, potentially ending in a
481  /// MemoryPhi.
482  struct DefPath {
483  MemoryLocation Loc;
484  // Note that, because we always walk in reverse, Last will always dominate
485  // First. Also note that First and Last are inclusive.
486  MemoryAccess *First;
487  MemoryAccess *Last;
488  Optional<ListIndex> Previous;
489 
490  DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
491  Optional<ListIndex> Previous)
492  : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
493 
494  DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
495  Optional<ListIndex> Previous)
496  : DefPath(Loc, Init, Init, Previous) {}
497  };
498 
499  const MemorySSA &MSSA;
500  AliasAnalysis &AA;
501  DominatorTree &DT;
502  UpwardsMemoryQuery *Query;
503 
504  // Phi optimization bookkeeping
507 
508  /// Find the nearest def or phi that `From` can legally be optimized to.
509  const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
510  assert(From->getNumOperands() && "Phi with no operands?");
511 
512  BasicBlock *BB = From->getBlock();
513  MemoryAccess *Result = MSSA.getLiveOnEntryDef();
514  DomTreeNode *Node = DT.getNode(BB);
515  while ((Node = Node->getIDom())) {
516  auto *Defs = MSSA.getBlockDefs(Node->getBlock());
517  if (Defs)
518  return &*Defs->rbegin();
519  }
520  return Result;
521  }
522 
523  /// Result of calling walkToPhiOrClobber.
524  struct UpwardsWalkResult {
525  /// The "Result" of the walk. Either a clobber, the last thing we walked, or
526  /// both. Include alias info when clobber found.
527  MemoryAccess *Result;
528  bool IsKnownClobber;
530  };
531 
532  /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
533  /// This will update Desc.Last as it walks. It will (optionally) also stop at
534  /// StopAt.
535  ///
536  /// This does not test for whether StopAt is a clobber
537  UpwardsWalkResult
538  walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
539  const MemoryAccess *SkipStopAt = nullptr) const {
540  assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
541 
542  for (MemoryAccess *Current : def_chain(Desc.Last)) {
543  Desc.Last = Current;
544  if (Current == StopAt || Current == SkipStopAt)
545  return {Current, false, MayAlias};
546 
547  if (auto *MD = dyn_cast<MemoryDef>(Current)) {
548  if (MSSA.isLiveOnEntryDef(MD))
549  return {MD, true, MustAlias};
550  ClobberAlias CA =
551  instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
552  if (CA.IsClobber)
553  return {MD, true, CA.AR};
554  }
555  }
556 
557  assert(isa<MemoryPhi>(Desc.Last) &&
558  "Ended at a non-clobber that's not a phi?");
559  return {Desc.Last, false, MayAlias};
560  }
561 
562  void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
563  ListIndex PriorNode) {
564  auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
565  upward_defs_end());
566  for (const MemoryAccessPair &P : UpwardDefs) {
567  PausedSearches.push_back(Paths.size());
568  Paths.emplace_back(P.second, P.first, PriorNode);
569  }
570  }
571 
572  /// Represents a search that terminated after finding a clobber. This clobber
573  /// may or may not be present in the path of defs from LastNode..SearchStart,
574  /// since it may have been retrieved from cache.
575  struct TerminatedPath {
576  MemoryAccess *Clobber;
577  ListIndex LastNode;
578  };
579 
580  /// Get an access that keeps us from optimizing to the given phi.
581  ///
582  /// PausedSearches is an array of indices into the Paths array. Its incoming
583  /// value is the indices of searches that stopped at the last phi optimization
584  /// target. It's left in an unspecified state.
585  ///
586  /// If this returns None, NewPaused is a vector of searches that terminated
587  /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
589  getBlockingAccess(const MemoryAccess *StopWhere,
590  SmallVectorImpl<ListIndex> &PausedSearches,
591  SmallVectorImpl<ListIndex> &NewPaused,
592  SmallVectorImpl<TerminatedPath> &Terminated) {
593  assert(!PausedSearches.empty() && "No searches to continue?");
594 
595  // BFS vs DFS really doesn't make a difference here, so just do a DFS with
596  // PausedSearches as our stack.
597  while (!PausedSearches.empty()) {
598  ListIndex PathIndex = PausedSearches.pop_back_val();
599  DefPath &Node = Paths[PathIndex];
600 
601  // If we've already visited this path with this MemoryLocation, we don't
602  // need to do so again.
603  //
604  // NOTE: That we just drop these paths on the ground makes caching
605  // behavior sporadic. e.g. given a diamond:
606  // A
607  // B C
608  // D
609  //
610  // ...If we walk D, B, A, C, we'll only cache the result of phi
611  // optimization for A, B, and D; C will be skipped because it dies here.
612  // This arguably isn't the worst thing ever, since:
613  // - We generally query things in a top-down order, so if we got below D
614  // without needing cache entries for {C, MemLoc}, then chances are
615  // that those cache entries would end up ultimately unused.
616  // - We still cache things for A, so C only needs to walk up a bit.
617  // If this behavior becomes problematic, we can fix without a ton of extra
618  // work.
619  if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
620  continue;
621 
622  const MemoryAccess *SkipStopWhere = nullptr;
623  if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
624  assert(isa<MemoryDef>(Query->OriginalAccess));
625  SkipStopWhere = Query->OriginalAccess;
626  }
627 
628  UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere,
629  /*SkipStopAt=*/SkipStopWhere);
630  if (Res.IsKnownClobber) {
631  assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
632  // If this wasn't a cache hit, we hit a clobber when walking. That's a
633  // failure.
634  TerminatedPath Term{Res.Result, PathIndex};
635  if (!MSSA.dominates(Res.Result, StopWhere))
636  return Term;
637 
638  // Otherwise, it's a valid thing to potentially optimize to.
639  Terminated.push_back(Term);
640  continue;
641  }
642 
643  if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
644  // We've hit our target. Save this path off for if we want to continue
645  // walking. If we are in the mode of skipping the OriginalAccess, and
646  // we've reached back to the OriginalAccess, do not save path, we've
647  // just looped back to self.
648  if (Res.Result != SkipStopWhere)
649  NewPaused.push_back(PathIndex);
650  continue;
651  }
652 
653  assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
654  addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
655  }
656 
657  return None;
658  }
659 
660  template <typename T, typename Walker>
661  struct generic_def_path_iterator
662  : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
663  std::forward_iterator_tag, T *> {
664  generic_def_path_iterator() = default;
665  generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
666 
667  T &operator*() const { return curNode(); }
668 
669  generic_def_path_iterator &operator++() {
670  N = curNode().Previous;
671  return *this;
672  }
673 
674  bool operator==(const generic_def_path_iterator &O) const {
675  if (N.hasValue() != O.N.hasValue())
676  return false;
677  return !N.hasValue() || *N == *O.N;
678  }
679 
680  private:
681  T &curNode() const { return W->Paths[*N]; }
682 
683  Walker *W = nullptr;
685  };
686 
687  using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
688  using const_def_path_iterator =
689  generic_def_path_iterator<const DefPath, const ClobberWalker>;
690 
691  iterator_range<def_path_iterator> def_path(ListIndex From) {
692  return make_range(def_path_iterator(this, From), def_path_iterator());
693  }
694 
695  iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
696  return make_range(const_def_path_iterator(this, From),
697  const_def_path_iterator());
698  }
699 
700  struct OptznResult {
701  /// The path that contains our result.
702  TerminatedPath PrimaryClobber;
703  /// The paths that we can legally cache back from, but that aren't
704  /// necessarily the result of the Phi optimization.
705  SmallVector<TerminatedPath, 4> OtherClobbers;
706  };
707 
708  ListIndex defPathIndex(const DefPath &N) const {
709  // The assert looks nicer if we don't need to do &N
710  const DefPath *NP = &N;
711  assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
712  "Out of bounds DefPath!");
713  return NP - &Paths.front();
714  }
715 
716  /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
717  /// that act as legal clobbers. Note that this won't return *all* clobbers.
718  ///
719  /// Phi optimization algorithm tl;dr:
720  /// - Find the earliest def/phi, A, we can optimize to
721  /// - Find if all paths from the starting memory access ultimately reach A
722  /// - If not, optimization isn't possible.
723  /// - Otherwise, walk from A to another clobber or phi, A'.
724  /// - If A' is a def, we're done.
725  /// - If A' is a phi, try to optimize it.
726  ///
727  /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
728  /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
729  OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
730  const MemoryLocation &Loc) {
731  assert(Paths.empty() && VisitedPhis.empty() &&
732  "Reset the optimization state.");
733 
734  Paths.emplace_back(Loc, Start, Phi, None);
735  // Stores how many "valid" optimization nodes we had prior to calling
736  // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
737  auto PriorPathsSize = Paths.size();
738 
739  SmallVector<ListIndex, 16> PausedSearches;
740  SmallVector<ListIndex, 8> NewPaused;
741  SmallVector<TerminatedPath, 4> TerminatedPaths;
742 
743  addSearches(Phi, PausedSearches, 0);
744 
745  // Moves the TerminatedPath with the "most dominated" Clobber to the end of
746  // Paths.
747  auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
748  assert(!Paths.empty() && "Need a path to move");
749  auto Dom = Paths.begin();
750  for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
751  if (!MSSA.dominates(I->Clobber, Dom->Clobber))
752  Dom = I;
753  auto Last = Paths.end() - 1;
754  if (Last != Dom)
755  std::iter_swap(Last, Dom);
756  };
757 
758  MemoryPhi *Current = Phi;
759  while (true) {
760  assert(!MSSA.isLiveOnEntryDef(Current) &&
761  "liveOnEntry wasn't treated as a clobber?");
762 
763  const auto *Target = getWalkTarget(Current);
764  // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
765  // optimization for the prior phi.
766  assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
767  return MSSA.dominates(P.Clobber, Target);
768  }));
769 
770  // FIXME: This is broken, because the Blocker may be reported to be
771  // liveOnEntry, and we'll happily wait for that to disappear (read: never)
772  // For the moment, this is fine, since we do nothing with blocker info.
773  if (Optional<TerminatedPath> Blocker = getBlockingAccess(
774  Target, PausedSearches, NewPaused, TerminatedPaths)) {
775 
776  // Find the node we started at. We can't search based on N->Last, since
777  // we may have gone around a loop with a different MemoryLocation.
778  auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
779  return defPathIndex(N) < PriorPathsSize;
780  });
781  assert(Iter != def_path_iterator());
782 
783  DefPath &CurNode = *Iter;
784  assert(CurNode.Last == Current);
785 
786  // Two things:
787  // A. We can't reliably cache all of NewPaused back. Consider a case
788  // where we have two paths in NewPaused; one of which can't optimize
789  // above this phi, whereas the other can. If we cache the second path
790  // back, we'll end up with suboptimal cache entries. We can handle
791  // cases like this a bit better when we either try to find all
792  // clobbers that block phi optimization, or when our cache starts
793  // supporting unfinished searches.
794  // B. We can't reliably cache TerminatedPaths back here without doing
795  // extra checks; consider a case like:
796  // T
797  // / \
798  // D C
799  // \ /
800  // S
801  // Where T is our target, C is a node with a clobber on it, D is a
802  // diamond (with a clobber *only* on the left or right node, N), and
803  // S is our start. Say we walk to D, through the node opposite N
804  // (read: ignoring the clobber), and see a cache entry in the top
805  // node of D. That cache entry gets put into TerminatedPaths. We then
806  // walk up to C (N is later in our worklist), find the clobber, and
807  // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
808  // the bottom part of D to the cached clobber, ignoring the clobber
809  // in N. Again, this problem goes away if we start tracking all
810  // blockers for a given phi optimization.
811  TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
812  return {Result, {}};
813  }
814 
815  // If there's nothing left to search, then all paths led to valid clobbers
816  // that we got from our cache; pick the nearest to the start, and allow
817  // the rest to be cached back.
818  if (NewPaused.empty()) {
819  MoveDominatedPathToEnd(TerminatedPaths);
820  TerminatedPath Result = TerminatedPaths.pop_back_val();
821  return {Result, std::move(TerminatedPaths)};
822  }
823 
824  MemoryAccess *DefChainEnd = nullptr;
826  for (ListIndex Paused : NewPaused) {
827  UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
828  if (WR.IsKnownClobber)
829  Clobbers.push_back({WR.Result, Paused});
830  else
831  // Micro-opt: If we hit the end of the chain, save it.
832  DefChainEnd = WR.Result;
833  }
834 
835  if (!TerminatedPaths.empty()) {
836  // If we couldn't find the dominating phi/liveOnEntry in the above loop,
837  // do it now.
838  if (!DefChainEnd)
839  for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
840  DefChainEnd = MA;
841 
842  // If any of the terminated paths don't dominate the phi we'll try to
843  // optimize, we need to figure out what they are and quit.
844  const BasicBlock *ChainBB = DefChainEnd->getBlock();
845  for (const TerminatedPath &TP : TerminatedPaths) {
846  // Because we know that DefChainEnd is as "high" as we can go, we
847  // don't need local dominance checks; BB dominance is sufficient.
848  if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
849  Clobbers.push_back(TP);
850  }
851  }
852 
853  // If we have clobbers in the def chain, find the one closest to Current
854  // and quit.
855  if (!Clobbers.empty()) {
856  MoveDominatedPathToEnd(Clobbers);
857  TerminatedPath Result = Clobbers.pop_back_val();
858  return {Result, std::move(Clobbers)};
859  }
860 
861  assert(all_of(NewPaused,
862  [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
863 
864  // Because liveOnEntry is a clobber, this must be a phi.
865  auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
866 
867  PriorPathsSize = Paths.size();
868  PausedSearches.clear();
869  for (ListIndex I : NewPaused)
870  addSearches(DefChainPhi, PausedSearches, I);
871  NewPaused.clear();
872 
873  Current = DefChainPhi;
874  }
875  }
876 
877  void verifyOptResult(const OptznResult &R) const {
878  assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
879  return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
880  }));
881  }
882 
883  void resetPhiOptznState() {
884  Paths.clear();
885  VisitedPhis.clear();
886  }
887 
888 public:
889  ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
890  : MSSA(MSSA), AA(AA), DT(DT) {}
891 
892  /// Finds the nearest clobber for the given query, optimizing phis if
893  /// possible.
894  MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
895  Query = &Q;
896 
897  MemoryAccess *Current = Start;
898  // This walker pretends uses don't exist. If we're handed one, silently grab
899  // its def. (This has the nice side-effect of ensuring we never cache uses)
900  if (auto *MU = dyn_cast<MemoryUse>(Start))
901  Current = MU->getDefiningAccess();
902 
903  DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
904  // Fast path for the overly-common case (no crazy phi optimization
905  // necessary)
906  UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
907  MemoryAccess *Result;
908  if (WalkResult.IsKnownClobber) {
909  Result = WalkResult.Result;
910  Q.AR = WalkResult.AR;
911  } else {
912  OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
913  Current, Q.StartingLoc);
914  verifyOptResult(OptRes);
915  resetPhiOptznState();
916  Result = OptRes.PrimaryClobber.Clobber;
917  }
918 
919 #ifdef EXPENSIVE_CHECKS
920  if (!Q.SkipSelfAccess)
921  checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
922 #endif
923  return Result;
924  }
925 
926  void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); }
927 };
928 
929 struct RenamePassData {
930  DomTreeNode *DTN;
932  MemoryAccess *IncomingVal;
933 
934  RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
935  MemoryAccess *M)
936  : DTN(D), ChildIt(It), IncomingVal(M) {}
937 
938  void swap(RenamePassData &RHS) {
939  std::swap(DTN, RHS.DTN);
940  std::swap(ChildIt, RHS.ChildIt);
941  std::swap(IncomingVal, RHS.IncomingVal);
942  }
943 };
944 
945 } // end anonymous namespace
946 
947 namespace llvm {
948 
950  ClobberWalker Walker;
951  MemorySSA *MSSA;
952 
953 public:
955  : Walker(*M, *A, *D), MSSA(M) {}
956 
957  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
958  const MemoryLocation &);
959  // Second argument (bool), defines whether the clobber search should skip the
960  // original queried access. If true, there will be a follow-up query searching
961  // for a clobber access past "self". Note that the Optimized access is not
962  // updated if a new clobber is found by this SkipSelf search. If this
963  // additional query becomes heavily used we may decide to cache the result.
964  // Walker instantiations will decide how to set the SkipSelf bool.
965  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, bool);
966  void verify(const MemorySSA *MSSA) { Walker.verify(MSSA); }
967 };
968 
969 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
970 /// longer does caching on its own, but the name has been retained for the
971 /// moment.
973  ClobberWalkerBase *Walker;
974 
975 public:
977  : MemorySSAWalker(M), Walker(W) {}
978  ~CachingWalker() override = default;
979 
981 
982  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
983  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
984  const MemoryLocation &Loc) override;
985 
986  void invalidateInfo(MemoryAccess *MA) override {
987  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
988  MUD->resetOptimized();
989  }
990 
991  void verify(const MemorySSA *MSSA) override {
993  Walker->verify(MSSA);
994  }
995 };
996 
998  ClobberWalkerBase *Walker;
999 
1000 public:
1002  : MemorySSAWalker(M), Walker(W) {}
1003  ~SkipSelfWalker() override = default;
1004 
1006 
1007  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
1008  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1009  const MemoryLocation &Loc) override;
1010 
1011  void invalidateInfo(MemoryAccess *MA) override {
1012  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1013  MUD->resetOptimized();
1014  }
1015 
1016  void verify(const MemorySSA *MSSA) override {
1018  Walker->verify(MSSA);
1019  }
1020 };
1021 
1022 } // end namespace llvm
1023 
1024 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1025  bool RenameAllUses) {
1026  // Pass through values to our successors
1027  for (const BasicBlock *S : successors(BB)) {
1028  auto It = PerBlockAccesses.find(S);
1029  // Rename the phi nodes in our successor block
1030  if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1031  continue;
1032  AccessList *Accesses = It->second.get();
1033  auto *Phi = cast<MemoryPhi>(&Accesses->front());
1034  if (RenameAllUses) {
1035  int PhiIndex = Phi->getBasicBlockIndex(BB);
1036  assert(PhiIndex != -1 && "Incomplete phi during partial rename");
1037  Phi->setIncomingValue(PhiIndex, IncomingVal);
1038  } else
1039  Phi->addIncoming(IncomingVal, BB);
1040  }
1041 }
1042 
1043 /// Rename a single basic block into MemorySSA form.
1044 /// Uses the standard SSA renaming algorithm.
1045 /// \returns The new incoming value.
1046 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1047  bool RenameAllUses) {
1048  auto It = PerBlockAccesses.find(BB);
1049  // Skip most processing if the list is empty.
1050  if (It != PerBlockAccesses.end()) {
1051  AccessList *Accesses = It->second.get();
1052  for (MemoryAccess &L : *Accesses) {
1053  if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1054  if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1055  MUD->setDefiningAccess(IncomingVal);
1056  if (isa<MemoryDef>(&L))
1057  IncomingVal = &L;
1058  } else {
1059  IncomingVal = &L;
1060  }
1061  }
1062  }
1063  return IncomingVal;
1064 }
1065 
1066 /// This is the standard SSA renaming algorithm.
1067 ///
1068 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1069 /// in phi nodes in our successors.
1070 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1072  bool SkipVisited, bool RenameAllUses) {
1074  // Skip everything if we already renamed this block and we are skipping.
1075  // Note: You can't sink this into the if, because we need it to occur
1076  // regardless of whether we skip blocks or not.
1077  bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1078  if (SkipVisited && AlreadyVisited)
1079  return;
1080 
1081  IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1082  renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1083  WorkStack.push_back({Root, Root->begin(), IncomingVal});
1084 
1085  while (!WorkStack.empty()) {
1086  DomTreeNode *Node = WorkStack.back().DTN;
1087  DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1088  IncomingVal = WorkStack.back().IncomingVal;
1089 
1090  if (ChildIt == Node->end()) {
1091  WorkStack.pop_back();
1092  } else {
1093  DomTreeNode *Child = *ChildIt;
1094  ++WorkStack.back().ChildIt;
1095  BasicBlock *BB = Child->getBlock();
1096  // Note: You can't sink this into the if, because we need it to occur
1097  // regardless of whether we skip blocks or not.
1098  AlreadyVisited = !Visited.insert(BB).second;
1099  if (SkipVisited && AlreadyVisited) {
1100  // We already visited this during our renaming, which can happen when
1101  // being asked to rename multiple blocks. Figure out the incoming val,
1102  // which is the last def.
1103  // Incoming value can only change if there is a block def, and in that
1104  // case, it's the last block def in the list.
1105  if (auto *BlockDefs = getWritableBlockDefs(BB))
1106  IncomingVal = &*BlockDefs->rbegin();
1107  } else
1108  IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1109  renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1110  WorkStack.push_back({Child, Child->begin(), IncomingVal});
1111  }
1112  }
1113 }
1114 
1115 /// This handles unreachable block accesses by deleting phi nodes in
1116 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1117 /// being uses of the live on entry definition.
1118 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1119  assert(!DT->isReachableFromEntry(BB) &&
1120  "Reachable block found while handling unreachable blocks");
1121 
1122  // Make sure phi nodes in our reachable successors end up with a
1123  // LiveOnEntryDef for our incoming edge, even though our block is forward
1124  // unreachable. We could just disconnect these blocks from the CFG fully,
1125  // but we do not right now.
1126  for (const BasicBlock *S : successors(BB)) {
1127  if (!DT->isReachableFromEntry(S))
1128  continue;
1129  auto It = PerBlockAccesses.find(S);
1130  // Rename the phi nodes in our successor block
1131  if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1132  continue;
1133  AccessList *Accesses = It->second.get();
1134  auto *Phi = cast<MemoryPhi>(&Accesses->front());
1135  Phi->addIncoming(LiveOnEntryDef.get(), BB);
1136  }
1137 
1138  auto It = PerBlockAccesses.find(BB);
1139  if (It == PerBlockAccesses.end())
1140  return;
1141 
1142  auto &Accesses = It->second;
1143  for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1144  auto Next = std::next(AI);
1145  // If we have a phi, just remove it. We are going to replace all
1146  // users with live on entry.
1147  if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1148  UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1149  else
1150  Accesses->erase(AI);
1151  AI = Next;
1152  }
1153 }
1154 
1156  : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1157  SkipWalker(nullptr), NextID(0) {
1158  buildMemorySSA();
1159 }
1160 
1162  // Drop all our references
1163  for (const auto &Pair : PerBlockAccesses)
1164  for (MemoryAccess &MA : *Pair.second)
1165  MA.dropAllReferences();
1166 }
1167 
1168 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1169  auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1170 
1171  if (Res.second)
1172  Res.first->second = llvm::make_unique<AccessList>();
1173  return Res.first->second.get();
1174 }
1175 
1176 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1177  auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1178 
1179  if (Res.second)
1180  Res.first->second = llvm::make_unique<DefsList>();
1181  return Res.first->second.get();
1182 }
1183 
1184 namespace llvm {
1185 
1186 /// This class is a batch walker of all MemoryUse's in the program, and points
1187 /// their defining access at the thing that actually clobbers them. Because it
1188 /// is a batch walker that touches everything, it does not operate like the
1189 /// other walkers. This walker is basically performing a top-down SSA renaming
1190 /// pass, where the version stack is used as the cache. This enables it to be
1191 /// significantly more time and memory efficient than using the regular walker,
1192 /// which is walking bottom-up.
1194 public:
1196  DominatorTree *DT)
1197  : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1198  Walker = MSSA->getWalker();
1199  }
1200 
1201  void optimizeUses();
1202 
1203 private:
1204  /// This represents where a given memorylocation is in the stack.
1205  struct MemlocStackInfo {
1206  // This essentially is keeping track of versions of the stack. Whenever
1207  // the stack changes due to pushes or pops, these versions increase.
1208  unsigned long StackEpoch;
1209  unsigned long PopEpoch;
1210  // This is the lower bound of places on the stack to check. It is equal to
1211  // the place the last stack walk ended.
1212  // Note: Correctness depends on this being initialized to 0, which densemap
1213  // does
1214  unsigned long LowerBound;
1215  const BasicBlock *LowerBoundBlock;
1216  // This is where the last walk for this memory location ended.
1217  unsigned long LastKill;
1218  bool LastKillValid;
1220  };
1221 
1222  void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1225 
1226  MemorySSA *MSSA;
1227  MemorySSAWalker *Walker;
1228  AliasAnalysis *AA;
1229  DominatorTree *DT;
1230 };
1231 
1232 } // end namespace llvm
1233 
1234 /// Optimize the uses in a given block This is basically the SSA renaming
1235 /// algorithm, with one caveat: We are able to use a single stack for all
1236 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1237 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1238 /// going to be some position in that stack of possible ones.
1239 ///
1240 /// We track the stack positions that each MemoryLocation needs
1241 /// to check, and last ended at. This is because we only want to check the
1242 /// things that changed since last time. The same MemoryLocation should
1243 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1244 /// things like this, and if they start, we can modify MemoryLocOrCall to
1245 /// include relevant data)
1246 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1247  const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1248  SmallVectorImpl<MemoryAccess *> &VersionStack,
1250 
1251  /// If no accesses, nothing to do.
1252  MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1253  if (Accesses == nullptr)
1254  return;
1255 
1256  // Pop everything that doesn't dominate the current block off the stack,
1257  // increment the PopEpoch to account for this.
1258  while (true) {
1259  assert(
1260  !VersionStack.empty() &&
1261  "Version stack should have liveOnEntry sentinel dominating everything");
1262  BasicBlock *BackBlock = VersionStack.back()->getBlock();
1263  if (DT->dominates(BackBlock, BB))
1264  break;
1265  while (VersionStack.back()->getBlock() == BackBlock)
1266  VersionStack.pop_back();
1267  ++PopEpoch;
1268  }
1269 
1270  for (MemoryAccess &MA : *Accesses) {
1271  auto *MU = dyn_cast<MemoryUse>(&MA);
1272  if (!MU) {
1273  VersionStack.push_back(&MA);
1274  ++StackEpoch;
1275  continue;
1276  }
1277 
1278  if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1279  MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1280  continue;
1281  }
1282 
1283  MemoryLocOrCall UseMLOC(MU);
1284  auto &LocInfo = LocStackInfo[UseMLOC];
1285  // If the pop epoch changed, it means we've removed stuff from top of
1286  // stack due to changing blocks. We may have to reset the lower bound or
1287  // last kill info.
1288  if (LocInfo.PopEpoch != PopEpoch) {
1289  LocInfo.PopEpoch = PopEpoch;
1290  LocInfo.StackEpoch = StackEpoch;
1291  // If the lower bound was in something that no longer dominates us, we
1292  // have to reset it.
1293  // We can't simply track stack size, because the stack may have had
1294  // pushes/pops in the meantime.
1295  // XXX: This is non-optimal, but only is slower cases with heavily
1296  // branching dominator trees. To get the optimal number of queries would
1297  // be to make lowerbound and lastkill a per-loc stack, and pop it until
1298  // the top of that stack dominates us. This does not seem worth it ATM.
1299  // A much cheaper optimization would be to always explore the deepest
1300  // branch of the dominator tree first. This will guarantee this resets on
1301  // the smallest set of blocks.
1302  if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1303  !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1304  // Reset the lower bound of things to check.
1305  // TODO: Some day we should be able to reset to last kill, rather than
1306  // 0.
1307  LocInfo.LowerBound = 0;
1308  LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1309  LocInfo.LastKillValid = false;
1310  }
1311  } else if (LocInfo.StackEpoch != StackEpoch) {
1312  // If all that has changed is the StackEpoch, we only have to check the
1313  // new things on the stack, because we've checked everything before. In
1314  // this case, the lower bound of things to check remains the same.
1315  LocInfo.PopEpoch = PopEpoch;
1316  LocInfo.StackEpoch = StackEpoch;
1317  }
1318  if (!LocInfo.LastKillValid) {
1319  LocInfo.LastKill = VersionStack.size() - 1;
1320  LocInfo.LastKillValid = true;
1321  LocInfo.AR = MayAlias;
1322  }
1323 
1324  // At this point, we should have corrected last kill and LowerBound to be
1325  // in bounds.
1326  assert(LocInfo.LowerBound < VersionStack.size() &&
1327  "Lower bound out of range");
1328  assert(LocInfo.LastKill < VersionStack.size() &&
1329  "Last kill info out of range");
1330  // In any case, the new upper bound is the top of the stack.
1331  unsigned long UpperBound = VersionStack.size() - 1;
1332 
1333  if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1334  LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1335  << *(MU->getMemoryInst()) << ")"
1336  << " because there are "
1337  << UpperBound - LocInfo.LowerBound
1338  << " stores to disambiguate\n");
1339  // Because we did not walk, LastKill is no longer valid, as this may
1340  // have been a kill.
1341  LocInfo.LastKillValid = false;
1342  continue;
1343  }
1344  bool FoundClobberResult = false;
1345  while (UpperBound > LocInfo.LowerBound) {
1346  if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1347  // For phis, use the walker, see where we ended up, go there
1348  Instruction *UseInst = MU->getMemoryInst();
1349  MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1350  // We are guaranteed to find it or something is wrong
1351  while (VersionStack[UpperBound] != Result) {
1352  assert(UpperBound != 0);
1353  --UpperBound;
1354  }
1355  FoundClobberResult = true;
1356  break;
1357  }
1358 
1359  MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1360  // If the lifetime of the pointer ends at this instruction, it's live on
1361  // entry.
1362  if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1363  // Reset UpperBound to liveOnEntryDef's place in the stack
1364  UpperBound = 0;
1365  FoundClobberResult = true;
1366  LocInfo.AR = MustAlias;
1367  break;
1368  }
1369  ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1370  if (CA.IsClobber) {
1371  FoundClobberResult = true;
1372  LocInfo.AR = CA.AR;
1373  break;
1374  }
1375  --UpperBound;
1376  }
1377 
1378  // Note: Phis always have AliasResult AR set to MayAlias ATM.
1379 
1380  // At the end of this loop, UpperBound is either a clobber, or lower bound
1381  // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1382  if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1383  // We were last killed now by where we got to
1384  if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1385  LocInfo.AR = None;
1386  MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1387  LocInfo.LastKill = UpperBound;
1388  } else {
1389  // Otherwise, we checked all the new ones, and now we know we can get to
1390  // LastKill.
1391  MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1392  }
1393  LocInfo.LowerBound = VersionStack.size() - 1;
1394  LocInfo.LowerBoundBlock = BB;
1395  }
1396 }
1397 
1398 /// Optimize uses to point to their actual clobbering definitions.
1400  SmallVector<MemoryAccess *, 16> VersionStack;
1402  VersionStack.push_back(MSSA->getLiveOnEntryDef());
1403 
1404  unsigned long StackEpoch = 1;
1405  unsigned long PopEpoch = 1;
1406  // We perform a non-recursive top-down dominator tree walk.
1407  for (const auto *DomNode : depth_first(DT->getRootNode()))
1408  optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1409  LocStackInfo);
1410 }
1411 
1412 void MemorySSA::placePHINodes(
1413  const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1414  // Determine where our MemoryPhi's should go
1415  ForwardIDFCalculator IDFs(*DT);
1416  IDFs.setDefiningBlocks(DefiningBlocks);
1418  IDFs.calculate(IDFBlocks);
1419 
1420  // Now place MemoryPhi nodes.
1421  for (auto &BB : IDFBlocks)
1422  createMemoryPhi(BB);
1423 }
1424 
1425 void MemorySSA::buildMemorySSA() {
1426  // We create an access to represent "live on entry", for things like
1427  // arguments or users of globals, where the memory they use is defined before
1428  // the beginning of the function. We do not actually insert it into the IR.
1429  // We do not define a live on exit for the immediate uses, and thus our
1430  // semantics do *not* imply that something with no immediate uses can simply
1431  // be removed.
1432  BasicBlock &StartingPoint = F.getEntryBlock();
1433  LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1434  &StartingPoint, NextID++));
1435 
1436  // We maintain lists of memory accesses per-block, trading memory for time. We
1437  // could just look up the memory access for every possible instruction in the
1438  // stream.
1439  SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1440  // Go through each block, figure out where defs occur, and chain together all
1441  // the accesses.
1442  for (BasicBlock &B : F) {
1443  bool InsertIntoDef = false;
1444  AccessList *Accesses = nullptr;
1445  DefsList *Defs = nullptr;
1446  for (Instruction &I : B) {
1447  MemoryUseOrDef *MUD = createNewAccess(&I);
1448  if (!MUD)
1449  continue;
1450 
1451  if (!Accesses)
1452  Accesses = getOrCreateAccessList(&B);
1453  Accesses->push_back(MUD);
1454  if (isa<MemoryDef>(MUD)) {
1455  InsertIntoDef = true;
1456  if (!Defs)
1457  Defs = getOrCreateDefsList(&B);
1458  Defs->push_back(*MUD);
1459  }
1460  }
1461  if (InsertIntoDef)
1462  DefiningBlocks.insert(&B);
1463  }
1464  placePHINodes(DefiningBlocks);
1465 
1466  // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1467  // filled in with all blocks.
1469  renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1470 
1471  CachingWalker *Walker = getWalkerImpl();
1472 
1473  OptimizeUses(this, Walker, AA, DT).optimizeUses();
1474 
1475  // Mark the uses in unreachable blocks as live on entry, so that they go
1476  // somewhere.
1477  for (auto &BB : F)
1478  if (!Visited.count(&BB))
1479  markUnreachableAsLiveOnEntry(&BB);
1480 }
1481 
1482 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1483 
1484 MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1485  if (Walker)
1486  return Walker.get();
1487 
1488  if (!WalkerBase)
1489  WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
1490 
1491  Walker = llvm::make_unique<CachingWalker>(this, WalkerBase.get());
1492  return Walker.get();
1493 }
1494 
1496  if (SkipWalker)
1497  return SkipWalker.get();
1498 
1499  if (!WalkerBase)
1500  WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
1501 
1502  SkipWalker = llvm::make_unique<SkipSelfWalker>(this, WalkerBase.get());
1503  return SkipWalker.get();
1504  }
1505 
1506 
1507 // This is a helper function used by the creation routines. It places NewAccess
1508 // into the access and defs lists for a given basic block, at the given
1509 // insertion point.
1511  const BasicBlock *BB,
1512  InsertionPlace Point) {
1513  auto *Accesses = getOrCreateAccessList(BB);
1514  if (Point == Beginning) {
1515  // If it's a phi node, it goes first, otherwise, it goes after any phi
1516  // nodes.
1517  if (isa<MemoryPhi>(NewAccess)) {
1518  Accesses->push_front(NewAccess);
1519  auto *Defs = getOrCreateDefsList(BB);
1520  Defs->push_front(*NewAccess);
1521  } else {
1522  auto AI = find_if_not(
1523  *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1524  Accesses->insert(AI, NewAccess);
1525  if (!isa<MemoryUse>(NewAccess)) {
1526  auto *Defs = getOrCreateDefsList(BB);
1527  auto DI = find_if_not(
1528  *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1529  Defs->insert(DI, *NewAccess);
1530  }
1531  }
1532  } else {
1533  Accesses->push_back(NewAccess);
1534  if (!isa<MemoryUse>(NewAccess)) {
1535  auto *Defs = getOrCreateDefsList(BB);
1536  Defs->push_back(*NewAccess);
1537  }
1538  }
1539  BlockNumberingValid.erase(BB);
1540 }
1541 
1543  AccessList::iterator InsertPt) {
1544  auto *Accesses = getWritableBlockAccesses(BB);
1545  bool WasEnd = InsertPt == Accesses->end();
1546  Accesses->insert(AccessList::iterator(InsertPt), What);
1547  if (!isa<MemoryUse>(What)) {
1548  auto *Defs = getOrCreateDefsList(BB);
1549  // If we got asked to insert at the end, we have an easy job, just shove it
1550  // at the end. If we got asked to insert before an existing def, we also get
1551  // an iterator. If we got asked to insert before a use, we have to hunt for
1552  // the next def.
1553  if (WasEnd) {
1554  Defs->push_back(*What);
1555  } else if (isa<MemoryDef>(InsertPt)) {
1556  Defs->insert(InsertPt->getDefsIterator(), *What);
1557  } else {
1558  while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1559  ++InsertPt;
1560  // Either we found a def, or we are inserting at the end
1561  if (InsertPt == Accesses->end())
1562  Defs->push_back(*What);
1563  else
1564  Defs->insert(InsertPt->getDefsIterator(), *What);
1565  }
1566  }
1567  BlockNumberingValid.erase(BB);
1568 }
1569 
1570 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1571  // Keep it in the lookup tables, remove from the lists
1572  removeFromLists(What, false);
1573 
1574  // Note that moving should implicitly invalidate the optimized state of a
1575  // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1576  // MemoryDef.
1577  if (auto *MD = dyn_cast<MemoryDef>(What))
1578  MD->resetOptimized();
1579  What->setBlock(BB);
1580 }
1581 
1582 // Move What before Where in the IR. The end result is that What will belong to
1583 // the right lists and have the right Block set, but will not otherwise be
1584 // correct. It will not have the right defining access, and if it is a def,
1585 // things below it will not properly be updated.
1587  AccessList::iterator Where) {
1588  prepareForMoveTo(What, BB);
1589  insertIntoListsBefore(What, BB, Where);
1590 }
1591 
1593  InsertionPlace Point) {
1594  if (isa<MemoryPhi>(What)) {
1595  assert(Point == Beginning &&
1596  "Can only move a Phi at the beginning of the block");
1597  // Update lookup table entry
1598  ValueToMemoryAccess.erase(What->getBlock());
1599  bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1600  (void)Inserted;
1601  assert(Inserted && "Cannot move a Phi to a block that already has one");
1602  }
1603 
1604  prepareForMoveTo(What, BB);
1605  insertIntoListsForBlock(What, BB, Point);
1606 }
1607 
1608 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1609  assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1610  MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1611  // Phi's always are placed at the front of the block.
1613  ValueToMemoryAccess[BB] = Phi;
1614  return Phi;
1615 }
1616 
1618  MemoryAccess *Definition,
1619  const MemoryUseOrDef *Template) {
1620  assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1621  MemoryUseOrDef *NewAccess = createNewAccess(I, Template);
1622  assert(
1623  NewAccess != nullptr &&
1624  "Tried to create a memory access for a non-memory touching instruction");
1625  NewAccess->setDefiningAccess(Definition);
1626  return NewAccess;
1627 }
1628 
1629 // Return true if the instruction has ordering constraints.
1630 // Note specifically that this only considers stores and loads
1631 // because others are still considered ModRef by getModRefInfo.
1632 static inline bool isOrdered(const Instruction *I) {
1633  if (auto *SI = dyn_cast<StoreInst>(I)) {
1634  if (!SI->isUnordered())
1635  return true;
1636  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1637  if (!LI->isUnordered())
1638  return true;
1639  }
1640  return false;
1641 }
1642 
1643 /// Helper function to create new memory accesses
1644 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1645  const MemoryUseOrDef *Template) {
1646  // The assume intrinsic has a control dependency which we model by claiming
1647  // that it writes arbitrarily. Ignore that fake memory dependency here.
1648  // FIXME: Replace this special casing with a more accurate modelling of
1649  // assume's control dependency.
1650  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1651  if (II->getIntrinsicID() == Intrinsic::assume)
1652  return nullptr;
1653 
1654  bool Def, Use;
1655  if (Template) {
1656  Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
1657  Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
1658 #if !defined(NDEBUG)
1659  ModRefInfo ModRef = AA->getModRefInfo(I, None);
1660  bool DefCheck, UseCheck;
1661  DefCheck = isModSet(ModRef) || isOrdered(I);
1662  UseCheck = isRefSet(ModRef);
1663  assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1664 #endif
1665  } else {
1666  // Find out what affect this instruction has on memory.
1667  ModRefInfo ModRef = AA->getModRefInfo(I, None);
1668  // The isOrdered check is used to ensure that volatiles end up as defs
1669  // (atomics end up as ModRef right now anyway). Until we separate the
1670  // ordering chain from the memory chain, this enables people to see at least
1671  // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1672  // will still give an answer that bypasses other volatile loads. TODO:
1673  // Separate memory aliasing and ordering into two different chains so that
1674  // we can precisely represent both "what memory will this read/write/is
1675  // clobbered by" and "what instructions can I move this past".
1676  Def = isModSet(ModRef) || isOrdered(I);
1677  Use = isRefSet(ModRef);
1678  }
1679 
1680  // It's possible for an instruction to not modify memory at all. During
1681  // construction, we ignore them.
1682  if (!Def && !Use)
1683  return nullptr;
1684 
1685  MemoryUseOrDef *MUD;
1686  if (Def)
1687  MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1688  else
1689  MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1690  ValueToMemoryAccess[I] = MUD;
1691  return MUD;
1692 }
1693 
1694 /// Returns true if \p Replacer dominates \p Replacee .
1695 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1696  const MemoryAccess *Replacee) const {
1697  if (isa<MemoryUseOrDef>(Replacee))
1698  return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1699  const auto *MP = cast<MemoryPhi>(Replacee);
1700  // For a phi node, the use occurs in the predecessor block of the phi node.
1701  // Since we may occur multiple times in the phi node, we have to check each
1702  // operand to ensure Replacer dominates each operand where Replacee occurs.
1703  for (const Use &Arg : MP->operands()) {
1704  if (Arg.get() != Replacee &&
1705  !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1706  return false;
1707  }
1708  return true;
1709 }
1710 
1711 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1713  assert(MA->use_empty() &&
1714  "Trying to remove memory access that still has uses");
1715  BlockNumbering.erase(MA);
1716  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1717  MUD->setDefiningAccess(nullptr);
1718  // Invalidate our walker's cache if necessary
1719  if (!isa<MemoryUse>(MA))
1720  Walker->invalidateInfo(MA);
1721 
1722  Value *MemoryInst;
1723  if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1724  MemoryInst = MUD->getMemoryInst();
1725  else
1726  MemoryInst = MA->getBlock();
1727 
1728  auto VMA = ValueToMemoryAccess.find(MemoryInst);
1729  if (VMA->second == MA)
1730  ValueToMemoryAccess.erase(VMA);
1731 }
1732 
1733 /// Properly remove \p MA from all of MemorySSA's lists.
1734 ///
1735 /// Because of the way the intrusive list and use lists work, it is important to
1736 /// do removal in the right order.
1737 /// ShouldDelete defaults to true, and will cause the memory access to also be
1738 /// deleted, not just removed.
1739 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1740  BasicBlock *BB = MA->getBlock();
1741  // The access list owns the reference, so we erase it from the non-owning list
1742  // first.
1743  if (!isa<MemoryUse>(MA)) {
1744  auto DefsIt = PerBlockDefs.find(BB);
1745  std::unique_ptr<DefsList> &Defs = DefsIt->second;
1746  Defs->remove(*MA);
1747  if (Defs->empty())
1748  PerBlockDefs.erase(DefsIt);
1749  }
1750 
1751  // The erase call here will delete it. If we don't want it deleted, we call
1752  // remove instead.
1753  auto AccessIt = PerBlockAccesses.find(BB);
1754  std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1755  if (ShouldDelete)
1756  Accesses->erase(MA);
1757  else
1758  Accesses->remove(MA);
1759 
1760  if (Accesses->empty()) {
1761  PerBlockAccesses.erase(AccessIt);
1762  BlockNumberingValid.erase(BB);
1763  }
1764 }
1765 
1767  MemorySSAAnnotatedWriter Writer(this);
1768  F.print(OS, &Writer);
1769 }
1770 
1771 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1773 #endif
1774 
1776  verifyDefUses(F);
1778  verifyOrdering(F);
1780  Walker->verify(this);
1781  // Previously, the verification used to also verify that the clobberingAccess
1782  // cached by MemorySSA is the same as the clobberingAccess found at a later
1783  // query to AA. This does not hold true in general due to the current fragility
1784  // of BasicAA which has arbitrary caps on the things it analyzes before giving
1785  // up. As a result, transformations that are correct, will lead to BasicAA
1786  // returning different Alias answers before and after that transformation.
1787  // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1788  // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1789  // every transformation, which defeats the purpose of using it. For such an
1790  // example, see test4 added in D51960.
1791 }
1792 
1793 /// Verify that all of the blocks we believe to have valid domination numbers
1794 /// actually have valid domination numbers.
1796 #ifndef NDEBUG
1797  if (BlockNumberingValid.empty())
1798  return;
1799 
1800  SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1801  for (const BasicBlock &BB : F) {
1802  if (!ValidBlocks.count(&BB))
1803  continue;
1804 
1805  ValidBlocks.erase(&BB);
1806 
1807  const AccessList *Accesses = getBlockAccesses(&BB);
1808  // It's correct to say an empty block has valid numbering.
1809  if (!Accesses)
1810  continue;
1811 
1812  // Block numbering starts at 1.
1813  unsigned long LastNumber = 0;
1814  for (const MemoryAccess &MA : *Accesses) {
1815  auto ThisNumberIter = BlockNumbering.find(&MA);
1816  assert(ThisNumberIter != BlockNumbering.end() &&
1817  "MemoryAccess has no domination number in a valid block!");
1818 
1819  unsigned long ThisNumber = ThisNumberIter->second;
1820  assert(ThisNumber > LastNumber &&
1821  "Domination numbers should be strictly increasing!");
1822  LastNumber = ThisNumber;
1823  }
1824  }
1825 
1826  assert(ValidBlocks.empty() &&
1827  "All valid BasicBlocks should exist in F -- dangling pointers?");
1828 #endif
1829 }
1830 
1831 /// Verify that the order and existence of MemoryAccesses matches the
1832 /// order and existence of memory affecting instructions.
1834 #ifndef NDEBUG
1835  // Walk all the blocks, comparing what the lookups think and what the access
1836  // lists think, as well as the order in the blocks vs the order in the access
1837  // lists.
1838  SmallVector<MemoryAccess *, 32> ActualAccesses;
1840  for (BasicBlock &B : F) {
1841  const AccessList *AL = getBlockAccesses(&B);
1842  const auto *DL = getBlockDefs(&B);
1843  MemoryAccess *Phi = getMemoryAccess(&B);
1844  if (Phi) {
1845  ActualAccesses.push_back(Phi);
1846  ActualDefs.push_back(Phi);
1847  }
1848 
1849  for (Instruction &I : B) {
1850  MemoryAccess *MA = getMemoryAccess(&I);
1851  assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1852  "We have memory affecting instructions "
1853  "in this block but they are not in the "
1854  "access list or defs list");
1855  if (MA) {
1856  ActualAccesses.push_back(MA);
1857  if (isa<MemoryDef>(MA))
1858  ActualDefs.push_back(MA);
1859  }
1860  }
1861  // Either we hit the assert, really have no accesses, or we have both
1862  // accesses and an access list.
1863  // Same with defs.
1864  if (!AL && !DL)
1865  continue;
1866  assert(AL->size() == ActualAccesses.size() &&
1867  "We don't have the same number of accesses in the block as on the "
1868  "access list");
1869  assert((DL || ActualDefs.size() == 0) &&
1870  "Either we should have a defs list, or we should have no defs");
1871  assert((!DL || DL->size() == ActualDefs.size()) &&
1872  "We don't have the same number of defs in the block as on the "
1873  "def list");
1874  auto ALI = AL->begin();
1875  auto AAI = ActualAccesses.begin();
1876  while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1877  assert(&*ALI == *AAI && "Not the same accesses in the same order");
1878  ++ALI;
1879  ++AAI;
1880  }
1881  ActualAccesses.clear();
1882  if (DL) {
1883  auto DLI = DL->begin();
1884  auto ADI = ActualDefs.begin();
1885  while (DLI != DL->end() && ADI != ActualDefs.end()) {
1886  assert(&*DLI == *ADI && "Not the same defs in the same order");
1887  ++DLI;
1888  ++ADI;
1889  }
1890  }
1891  ActualDefs.clear();
1892  }
1893 #endif
1894 }
1895 
1896 /// Verify the domination properties of MemorySSA by checking that each
1897 /// definition dominates all of its uses.
1899 #ifndef NDEBUG
1900  for (BasicBlock &B : F) {
1901  // Phi nodes are attached to basic blocks
1902  if (MemoryPhi *MP = getMemoryAccess(&B))
1903  for (const Use &U : MP->uses())
1904  assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
1905 
1906  for (Instruction &I : B) {
1907  MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1908  if (!MD)
1909  continue;
1910 
1911  for (const Use &U : MD->uses())
1912  assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
1913  }
1914  }
1915 #endif
1916 }
1917 
1918 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
1919 /// appears in the use list of \p Def.
1920 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1921 #ifndef NDEBUG
1922  // The live on entry use may cause us to get a NULL def here
1923  if (!Def)
1924  assert(isLiveOnEntryDef(Use) &&
1925  "Null def but use not point to live on entry def");
1926  else
1927  assert(is_contained(Def->users(), Use) &&
1928  "Did not find use in def's use list");
1929 #endif
1930 }
1931 
1932 /// Verify the immediate use information, by walking all the memory
1933 /// accesses and verifying that, for each use, it appears in the
1934 /// appropriate def's use list
1936 #ifndef NDEBUG
1937  for (BasicBlock &B : F) {
1938  // Phi nodes are attached to basic blocks
1939  if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1940  assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1941  pred_begin(&B), pred_end(&B))) &&
1942  "Incomplete MemoryPhi Node");
1943  for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1944  verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1945  assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
1946  pred_end(&B) &&
1947  "Incoming phi block not a block predecessor");
1948  }
1949  }
1950 
1951  for (Instruction &I : B) {
1952  if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1953  verifyUseInDefs(MA->getDefiningAccess(), MA);
1954  }
1955  }
1956  }
1957 #endif
1958 }
1959 
1960 /// Perform a local numbering on blocks so that instruction ordering can be
1961 /// determined in constant time.
1962 /// TODO: We currently just number in order. If we numbered by N, we could
1963 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1964 /// log2(N) sequences of mixed before and after) without needing to invalidate
1965 /// the numbering.
1966 void MemorySSA::renumberBlock(const BasicBlock *B) const {
1967  // The pre-increment ensures the numbers really start at 1.
1968  unsigned long CurrentNumber = 0;
1969  const AccessList *AL = getBlockAccesses(B);
1970  assert(AL != nullptr && "Asking to renumber an empty block");
1971  for (const auto &I : *AL)
1972  BlockNumbering[&I] = ++CurrentNumber;
1973  BlockNumberingValid.insert(B);
1974 }
1975 
1976 /// Determine, for two memory accesses in the same block,
1977 /// whether \p Dominator dominates \p Dominatee.
1978 /// \returns True if \p Dominator dominates \p Dominatee.
1980  const MemoryAccess *Dominatee) const {
1981  const BasicBlock *DominatorBlock = Dominator->getBlock();
1982 
1983  assert((DominatorBlock == Dominatee->getBlock()) &&
1984  "Asking for local domination when accesses are in different blocks!");
1985  // A node dominates itself.
1986  if (Dominatee == Dominator)
1987  return true;
1988 
1989  // When Dominatee is defined on function entry, it is not dominated by another
1990  // memory access.
1991  if (isLiveOnEntryDef(Dominatee))
1992  return false;
1993 
1994  // When Dominator is defined on function entry, it dominates the other memory
1995  // access.
1996  if (isLiveOnEntryDef(Dominator))
1997  return true;
1998 
1999  if (!BlockNumberingValid.count(DominatorBlock))
2000  renumberBlock(DominatorBlock);
2001 
2002  unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2003  // All numbers start with 1
2004  assert(DominatorNum != 0 && "Block was not numbered properly");
2005  unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2006  assert(DominateeNum != 0 && "Block was not numbered properly");
2007  return DominatorNum < DominateeNum;
2008 }
2009 
2010 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2011  const MemoryAccess *Dominatee) const {
2012  if (Dominator == Dominatee)
2013  return true;
2014 
2015  if (isLiveOnEntryDef(Dominatee))
2016  return false;
2017 
2018  if (Dominator->getBlock() != Dominatee->getBlock())
2019  return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2020  return locallyDominates(Dominator, Dominatee);
2021 }
2022 
2023 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2024  const Use &Dominatee) const {
2025  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2026  BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2027  // The def must dominate the incoming block of the phi.
2028  if (UseBB != Dominator->getBlock())
2029  return DT->dominates(Dominator->getBlock(), UseBB);
2030  // If the UseBB and the DefBB are the same, compare locally.
2031  return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2032  }
2033  // If it's not a PHI node use, the normal dominates can already handle it.
2034  return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2035 }
2036 
2037 const static char LiveOnEntryStr[] = "liveOnEntry";
2038 
2040  switch (getValueID()) {
2041  case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2042  case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2043  case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2044  }
2045  llvm_unreachable("invalid value id");
2046 }
2047 
2049  MemoryAccess *UO = getDefiningAccess();
2050 
2051  auto printID = [&OS](MemoryAccess *A) {
2052  if (A && A->getID())
2053  OS << A->getID();
2054  else
2055  OS << LiveOnEntryStr;
2056  };
2057 
2058  OS << getID() << " = MemoryDef(";
2059  printID(UO);
2060  OS << ")";
2061 
2062  if (isOptimized()) {
2063  OS << "->";
2064  printID(getOptimized());
2065 
2066  if (Optional<AliasResult> AR = getOptimizedAccessType())
2067  OS << " " << *AR;
2068  }
2069 }
2070 
2072  bool First = true;
2073  OS << getID() << " = MemoryPhi(";
2074  for (const auto &Op : operands()) {
2075  BasicBlock *BB = getIncomingBlock(Op);
2076  MemoryAccess *MA = cast<MemoryAccess>(Op);
2077  if (!First)
2078  OS << ',';
2079  else
2080  First = false;
2081 
2082  OS << '{';
2083  if (BB->hasName())
2084  OS << BB->getName();
2085  else
2086  BB->printAsOperand(OS, false);
2087  OS << ',';
2088  if (unsigned ID = MA->getID())
2089  OS << ID;
2090  else
2091  OS << LiveOnEntryStr;
2092  OS << '}';
2093  }
2094  OS << ')';
2095 }
2096 
2098  MemoryAccess *UO = getDefiningAccess();
2099  OS << "MemoryUse(";
2100  if (UO && UO->getID())
2101  OS << UO->getID();
2102  else
2103  OS << LiveOnEntryStr;
2104  OS << ')';
2105 
2106  if (Optional<AliasResult> AR = getOptimizedAccessType())
2107  OS << " " << *AR;
2108 }
2109 
2110 void MemoryAccess::dump() const {
2111 // Cannot completely remove virtual function even in release mode.
2112 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2113  print(dbgs());
2114  dbgs() << "\n";
2115 #endif
2116 }
2117 
2119 
2122 }
2123 
2125  AU.setPreservesAll();
2126  AU.addRequired<MemorySSAWrapperPass>();
2127 }
2128 
2130  auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2131  MSSA.print(dbgs());
2132  if (VerifyMemorySSA)
2133  MSSA.verifyMemorySSA();
2134  return false;
2135 }
2136 
2137 AnalysisKey MemorySSAAnalysis::Key;
2138 
2141  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2142  auto &AA = AM.getResult<AAManager>(F);
2143  return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
2144 }
2145 
2148  OS << "MemorySSA for function: " << F.getName() << "\n";
2149  AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2150 
2151  return PreservedAnalyses::all();
2152 }
2153 
2156  AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2157 
2158  return PreservedAnalyses::all();
2159 }
2160 
2161 char MemorySSAWrapperPass::ID = 0;
2162 
2165 }
2166 
2167 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2168 
2170  AU.setPreservesAll();
2173 }
2174 
2176  auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2177  auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2178  MSSA.reset(new MemorySSA(F, &AA, &DT));
2179  return false;
2180 }
2181 
2182 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
2183 
2185  MSSA->print(OS);
2186 }
2187 
2189 
2190 /// Walk the use-def chains starting at \p StartingAccess and find
2191 /// the MemoryAccess that actually clobbers Loc.
2192 ///
2193 /// \returns our clobbering memory access
2195  MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
2196  if (isa<MemoryPhi>(StartingAccess))
2197  return StartingAccess;
2198 
2199  auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2200  if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2201  return StartingUseOrDef;
2202 
2203  Instruction *I = StartingUseOrDef->getMemoryInst();
2204 
2205  // Conservatively, fences are always clobbers, so don't perform the walk if we
2206  // hit a fence.
2207  if (!isa<CallBase>(I) && I->isFenceLike())
2208  return StartingUseOrDef;
2209 
2210  UpwardsMemoryQuery Q;
2211  Q.OriginalAccess = StartingUseOrDef;
2212  Q.StartingLoc = Loc;
2213  Q.Inst = I;
2214  Q.IsCall = false;
2215 
2216  // Unlike the other function, do not walk to the def of a def, because we are
2217  // handed something we already believe is the clobbering access.
2218  // We never set SkipSelf to true in Q in this method.
2219  MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2220  ? StartingUseOrDef->getDefiningAccess()
2221  : StartingUseOrDef;
2222 
2223  MemoryAccess *Clobber = Walker.findClobber(DefiningAccess, Q);
2224  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2225  LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2226  LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2227  LLVM_DEBUG(dbgs() << *Clobber << "\n");
2228  return Clobber;
2229 }
2230 
2231 MemoryAccess *
2233  bool SkipSelf) {
2234  auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2235  // If this is a MemoryPhi, we can't do anything.
2236  if (!StartingAccess)
2237  return MA;
2238 
2239  bool IsOptimized = false;
2240 
2241  // If this is an already optimized use or def, return the optimized result.
2242  // Note: Currently, we store the optimized def result in a separate field,
2243  // since we can't use the defining access.
2244  if (StartingAccess->isOptimized()) {
2245  if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2246  return StartingAccess->getOptimized();
2247  IsOptimized = true;
2248  }
2249 
2250  const Instruction *I = StartingAccess->getMemoryInst();
2251  // We can't sanely do anything with a fence, since they conservatively clobber
2252  // all memory, and have no locations to get pointers from to try to
2253  // disambiguate.
2254  if (!isa<CallBase>(I) && I->isFenceLike())
2255  return StartingAccess;
2256 
2257  UpwardsMemoryQuery Q(I, StartingAccess);
2258 
2260  MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2261  StartingAccess->setOptimized(LiveOnEntry);
2262  StartingAccess->setOptimizedAccessType(None);
2263  return LiveOnEntry;
2264  }
2265 
2266  MemoryAccess *OptimizedAccess;
2267  if (!IsOptimized) {
2268  // Start with the thing we already think clobbers this location
2269  MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2270 
2271  // At this point, DefiningAccess may be the live on entry def.
2272  // If it is, we will not get a better result.
2273  if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2274  StartingAccess->setOptimized(DefiningAccess);
2275  StartingAccess->setOptimizedAccessType(None);
2276  return DefiningAccess;
2277  }
2278 
2279  OptimizedAccess = Walker.findClobber(DefiningAccess, Q);
2280  StartingAccess->setOptimized(OptimizedAccess);
2281  if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2282  StartingAccess->setOptimizedAccessType(None);
2283  else if (Q.AR == MustAlias)
2284  StartingAccess->setOptimizedAccessType(MustAlias);
2285  } else
2286  OptimizedAccess = StartingAccess->getOptimized();
2287 
2288  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2289  LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2290  LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2291  LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2292 
2293  MemoryAccess *Result;
2294  if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2295  isa<MemoryDef>(StartingAccess)) {
2296  assert(isa<MemoryDef>(Q.OriginalAccess));
2297  Q.SkipSelfAccess = true;
2298  Result = Walker.findClobber(OptimizedAccess, Q);
2299  } else
2300  Result = OptimizedAccess;
2301 
2302  LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2303  LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2304 
2305  return Result;
2306 }
2307 
2308 MemoryAccess *
2310  return Walker->getClobberingMemoryAccessBase(MA, false);
2311 }
2312 
2313 MemoryAccess *
2315  const MemoryLocation &Loc) {
2316  return Walker->getClobberingMemoryAccessBase(MA, Loc);
2317 }
2318 
2319 MemoryAccess *
2321  return Walker->getClobberingMemoryAccessBase(MA, true);
2322 }
2323 
2324 MemoryAccess *
2326  const MemoryLocation &Loc) {
2327  return Walker->getClobberingMemoryAccessBase(MA, Loc);
2328 }
2329 
2330 MemoryAccess *
2332  if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2333  return Use->getDefiningAccess();
2334  return MA;
2335 }
2336 
2338  MemoryAccess *StartingAccess, const MemoryLocation &) {
2339  if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2340  return Use->getDefiningAccess();
2341  return StartingAccess;
2342 }
2343 
2344 void MemoryPhi::deleteMe(DerivedUser *Self) {
2345  delete static_cast<MemoryPhi *>(Self);
2346 }
2347 
2348 void MemoryDef::deleteMe(DerivedUser *Self) {
2349  delete static_cast<MemoryDef *>(Self);
2350 }
2351 
2352 void MemoryUse::deleteMe(DerivedUser *Self) {
2353  delete static_cast<MemoryUse *>(Self);
2354 }
MemorySSAWalker * getWalker()
Definition: MemorySSA.cpp:1482
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Definition: MemorySSA.cpp:2175
The access may reference and may modify the value stored in memory.
uint64_t CallInst * C
void initializeMemorySSAWrapperPassPass(PassRegistry &)
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:794
typename std::vector< DomTreeNodeBase *>::const_iterator const_iterator
iterator_range< use_iterator > uses()
Definition: Value.h:354
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Value * getPointerOperand(Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
virtual void verify(const MemorySSA *MSSA)
Definition: MemorySSA.h:1042
void dropAllReferences()
Drop all references to operands.
Definition: User.h:294
Atomic ordering constants.
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:82
bool isFenceLike() const
Return true if this instruction behaves like a memory fence: it can load or store to memory location ...
Definition: Instruction.h:554
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:769
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2071
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
Definition: MemorySSA.cpp:2309
SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
Definition: MemorySSA.cpp:1001
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:464
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2010
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:64
void push_back(reference Node)
Insert a node at the back; never copies.
Definition: simple_ilist.h:147
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
This class provides various memory handling functions that manipulate MemoryBlock instances...
Definition: Memory.h:45
Implements a dense probed hash-table based set.
Definition: DenseSet.h:249
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess&#39;s for a given basic block.
Definition: MemorySSA.h:750
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: MemorySSA.cpp:2124
The two locations do not alias at all.
Definition: AliasAnalysis.h:83
Extension point for the Value hierarchy.
Definition: DerivedUser.h:27
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:372
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:247
static const char LiveOnEntryStr[]
Definition: MemorySSA.cpp:2037
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:705
unsigned second
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: MemorySSA.cpp:2169
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1185
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1014
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:230
F(f)
OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA, DominatorTree *DT)
Definition: MemorySSA.cpp:1195
block Block Frequency true
An instruction for reading from memory.
Definition: Instructions.h:167
memoryssa
Definition: MemorySSA.cpp:64
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
Definition: APFloat.h:1261
This defines the Use class.
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2048
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
Definition: MemorySSA.cpp:1011
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
Definition: MemorySSA.cpp:1155
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:32
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, true) INITIALIZE_PASS_END(MemorySSAWrapperPass
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:221
Represents read-only accesses to memory.
Definition: MemorySSA.h:316
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
This class is a batch walker of all MemoryUse&#39;s in the program, and points their defining access at t...
Definition: MemorySSA.cpp:1193
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:951
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:231
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
Definition: MemorySSA.h:813
void verify(const MemorySSA *MSSA)
Definition: MemorySSA.cpp:966
A MemorySSAWalker that does AA walks to disambiguate accesses.
Definition: MemorySSA.cpp:972
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
Definition: MemorySSA.cpp:316
MemorySSAAnnotatedWriter(const MemorySSA *M)
Definition: MemorySSA.cpp:98
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:700
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef&#39;s and MemoryPhi&#39;s for a given basic block.
Definition: MemorySSA.h:758
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition: MemorySSA.cpp:1510
static bool isOrdered(const Instruction *I)
Definition: MemorySSA.cpp:1632
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:810
APInt operator*(APInt a, uint64_t RHS)
Definition: APInt.h:2090
void verifyDefUses(Function &F) const
Verify the immediate use information, by walking all the memory accesses and verifying that...
Definition: MemorySSA.cpp:1935
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
Definition: MemorySSA.cpp:2182
A simple intrusive list implementation.
Definition: simple_ilist.h:78
LLVM_NODISCARD bool isMustSet(const ModRefInfo MRI)
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:40
#define F(x, y, z)
Definition: MD5.cpp:55
static int getID(struct InternalInstruction *insn, const void *miiArg)
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref&#39;ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:712
upward_defs_iterator upward_defs_end()
Definition: MemorySSA.h:1236
early cse memssa
Definition: EarlyCSE.cpp:1319
Memory SSA
Definition: MemorySSA.cpp:64
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:234
void emitBasicBlockStartAnnot(const BasicBlock *BB, formatted_raw_ostream &OS) override
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
Definition: MemorySSA.cpp:100
void dump() const
Definition: MemorySSA.cpp:1772
static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))
This is the generic walker interface for walkers of MemorySSA.
Definition: MemorySSA.h:982
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:67
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, AliasAnalysis &AA)
Definition: MemorySSA.cpp:347
bool isAtLeastOrStrongerThan(AtomicOrdering ao, AtomicOrdering other)
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
An assembly annotator class to print Memory SSA information in comments.
Definition: MemorySSA.cpp:92
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA&#39;s lookup tables.
Definition: MemorySSA.cpp:1712
NodeT * getBlock() const
#define P(N)
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:422
ClobberWalkerBase(MemorySSA *M, AliasAnalysis *A, DominatorTree *D)
Definition: MemorySSA.cpp:954
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
MemorySSAWalker * getSkipSelfWalker()
Definition: MemorySSA.cpp:1495
bool hasName() const
Definition: Value.h:250
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
Definition: MemorySSA.cpp:2320
A manager for alias analyses.
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
Definition: MemorySSA.cpp:206
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
early cse Early CSE w MemorySSA
Definition: EarlyCSE.cpp:1319
void dump() const
Definition: MemorySSA.cpp:2110
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:112
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:780
Represent the analysis usage information of a pass.
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2039
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:159
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:115
void verifyDomination(Function &F) const
Verify the domination properties of MemorySSA by checking that each definition dominates all of its u...
Definition: MemorySSA.cpp:1898
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
Memory true print Memory SSA Printer
Definition: MemorySSA.cpp:70
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1213
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2154
auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Definition: STLExtras.h:1218
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:159
size_t size() const
Definition: SmallVector.h:52
Compute iterated dominance frontiers using a linear time algorithm.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1206
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:388
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:4269
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool verify(const TargetRegisterInfo &TRI) const
Check that information hold by this instance make sense for the given TRI.
Memory true print Memory SSA static false cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr)
Definition: MemorySSA.cpp:1617
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
Definition: MemorySSA.cpp:1399
The two locations may or may not alias. This is the least precise result.
Definition: AliasAnalysis.h:85
Representation for a specific memory location.
The two locations precisely alias each other.
Definition: AliasAnalysis.h:89
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void verifyOrdering(Function &F) const
Verify that the order and existence of MemoryAccesses matches the order and existence of memory affec...
Definition: MemorySSA.cpp:1833
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=MayAlias)
Definition: MemorySSA.h:296
unsigned getNumOperands() const
Definition: User.h:191
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
BlockVerifier::State From
void verifyMemorySSA() const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1775
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false...
Definition: SmallPtrSet.h:377
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition: MemorySSA.cpp:1979
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:839
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:248
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Definition: MemorySSA.cpp:2129
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:915
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:373
static ClobberAlias instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysis &AA)
Definition: MemorySSA.cpp:254
BasicBlock * getBlock() const
Definition: MemorySSA.h:156
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:124
void verify(const MemorySSA *MSSA) override
Definition: MemorySSA.cpp:991
#define MAP(n)
void emitInstructionAnnot(const Instruction *I, formatted_raw_ostream &OS) override
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
Definition: MemorySSA.cpp:106
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:734
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2097
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
A range adaptor for a pair of iterators.
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA&#39;s lists.
Definition: MemorySSA.cpp:1739
Target - Wrapper for Target specific information.
void push_back(pointer val)
Definition: ilist.h:311
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:600
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:244
void setPreservesAll()
Set by analyses that do not transform their input at all.
iterator_range< user_iterator > users()
Definition: Value.h:399
static LLVM_ATTRIBUTE_UNUSED void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, AliasAnalysis &AA, bool AllowImpreciseClobber=false)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
Definition: MemorySSA.cpp:384
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are must-alias.
An opaque object representing a hash code.
Definition: Hashing.h:71
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:251
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:386
void initializeMemorySSAPrinterLegacyPassPass(PassRegistry &)
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
void verify(const MemorySSA *MSSA) override
Definition: MemorySSA.cpp:1016
MemorySSAWalker(MemorySSA *)
Definition: MemorySSA.cpp:2188
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
Definition: MemorySSA.h:1287
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:644
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
Definition: MemorySSA.cpp:2167
This file provides utility analysis objects describing memory locations.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:214
CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
Definition: MemorySSA.cpp:976
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
Definition: MemorySSA.cpp:1586
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
Definition: MemorySSA.cpp:2331
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:322
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:730
AnalysisUsage & addRequiredTransitive()
MemoryAccess * getClobberingMemoryAccessBase(MemoryAccess *, const MemoryLocation &)
Walk the use-def chains starting at StartingAccess and find the MemoryAccess that actually clobbers L...
Definition: MemorySSA.cpp:2194
iterator_range< df_iterator< T > > depth_first(const T &G)
MemoryAccess * getClobberingMemoryAccess(const Instruction *I)
Given a memory Mod/Ref/ModRef&#39;ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1011
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2146
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Result run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2139
static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
Definition: MemorySSA.cpp:219
LLVM Value Representation.
Definition: Value.h:72
static MemoryLocOrCall getTombstoneKey()
Definition: MemorySSA.cpp:187
succ_range successors(Instruction *I)
Definition: CFG.h:259
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair)
Definition: MemorySSA.h:1232
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
Definition: MemorySSA.h:661
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, const Instruction *I)
Definition: MemorySSA.cpp:361
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:45
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
Definition: MemorySSA.cpp:986
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
Definition: MemorySSA.cpp:191
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
hexagon cext opt
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:259
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Definition: MemorySSA.h:209
LLVM_NODISCARD bool isModOrRefSet(const ModRefInfo MRI)
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:1966
void verifyDominationNumbers(const Function &F) const
Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...
Definition: MemorySSA.cpp:1795
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:478
void print(raw_ostream &) const
Definition: MemorySSA.cpp:1766
This header defines various interfaces for pass management in LLVM.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc)
getModRefInfo (for call sites) - Return information about whether a particular call site modifies or ...
#define LLVM_DEBUG(X)
Definition: Debug.h:122
static MemoryLocOrCall getEmptyKey()
Definition: MemorySSA.cpp:183
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
Definition: MemorySSA.cpp:1542
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:70
bool use_empty() const
Definition: Value.h:322
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:438
hexagon widen stores
reverse_iterator rbegin()
Definition: simple_ilist.h:121
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
Definition: MemorySSA.h:1063
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:43
const BasicBlock * getParent() const
Definition: Instruction.h:66
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
Definition: MemorySSA.cpp:2184
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1244
LLVM_NODISCARD bool isRefSet(const ModRefInfo MRI)