LLVM  10.0.0svn
MemorySSA.cpp
Go to the documentation of this file.
1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/iterator.h"
29 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/IR/Use.h"
41 #include "llvm/Pass.h"
43 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/Compiler.h"
46 #include "llvm/Support/Debug.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <cstdlib>
53 #include <iterator>
54 #include <memory>
55 #include <utility>
56 
57 using namespace llvm;
58 
59 #define DEBUG_TYPE "memoryssa"
60 
61 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
62  true)
66  true)
67 
69  "Memory SSA Printer", false, false)
70 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
71 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
72  "Memory SSA Printer", false, false)
73 
74 static cl::opt<unsigned> MaxCheckLimit(
75  "memssa-check-limit", cl::Hidden, cl::init(100),
76  cl::desc("The maximum number of stores/phis MemorySSA"
77  "will consider trying to walk past (default = 100)"));
78 
79 // Always verify MemorySSA if expensive checking is enabled.
80 #ifdef EXPENSIVE_CHECKS
81 bool llvm::VerifyMemorySSA = true;
82 #else
83 bool llvm::VerifyMemorySSA = false;
84 #endif
85 /// Enables memory ssa as a dependency for loop passes in legacy pass manager.
87  "enable-mssa-loop-dependency", cl::Hidden, cl::init(true),
88  cl::desc("Enable MemorySSA dependency for loop pass manager"));
89 
91  VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
92  cl::Hidden, cl::desc("Enable verification of MemorySSA."));
93 
94 namespace llvm {
95 
96 /// An assembly annotator class to print Memory SSA information in
97 /// comments.
99  friend class MemorySSA;
100 
101  const MemorySSA *MSSA;
102 
103 public:
104  MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
105 
107  formatted_raw_ostream &OS) override {
108  if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
109  OS << "; " << *MA << "\n";
110  }
111 
113  formatted_raw_ostream &OS) override {
114  if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
115  OS << "; " << *MA << "\n";
116  }
117 };
118 
119 } // end namespace llvm
120 
121 namespace {
122 
123 /// Our current alias analysis API differentiates heavily between calls and
124 /// non-calls, and functions called on one usually assert on the other.
125 /// This class encapsulates the distinction to simplify other code that wants
126 /// "Memory affecting instructions and related data" to use as a key.
127 /// For example, this class is used as a densemap key in the use optimizer.
128 class MemoryLocOrCall {
129 public:
130  bool IsCall = false;
131 
132  MemoryLocOrCall(MemoryUseOrDef *MUD)
133  : MemoryLocOrCall(MUD->getMemoryInst()) {}
134  MemoryLocOrCall(const MemoryUseOrDef *MUD)
135  : MemoryLocOrCall(MUD->getMemoryInst()) {}
136 
137  MemoryLocOrCall(Instruction *Inst) {
138  if (auto *C = dyn_cast<CallBase>(Inst)) {
139  IsCall = true;
140  Call = C;
141  } else {
142  IsCall = false;
143  // There is no such thing as a memorylocation for a fence inst, and it is
144  // unique in that regard.
145  if (!isa<FenceInst>(Inst))
146  Loc = MemoryLocation::get(Inst);
147  }
148  }
149 
150  explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
151 
152  const CallBase *getCall() const {
153  assert(IsCall);
154  return Call;
155  }
156 
157  MemoryLocation getLoc() const {
158  assert(!IsCall);
159  return Loc;
160  }
161 
162  bool operator==(const MemoryLocOrCall &Other) const {
163  if (IsCall != Other.IsCall)
164  return false;
165 
166  if (!IsCall)
167  return Loc == Other.Loc;
168 
169  if (Call->getCalledValue() != Other.Call->getCalledValue())
170  return false;
171 
172  return Call->arg_size() == Other.Call->arg_size() &&
173  std::equal(Call->arg_begin(), Call->arg_end(),
174  Other.Call->arg_begin());
175  }
176 
177 private:
178  union {
179  const CallBase *Call;
180  MemoryLocation Loc;
181  };
182 };
183 
184 } // end anonymous namespace
185 
186 namespace llvm {
187 
188 template <> struct DenseMapInfo<MemoryLocOrCall> {
189  static inline MemoryLocOrCall getEmptyKey() {
190  return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
191  }
192 
193  static inline MemoryLocOrCall getTombstoneKey() {
194  return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
195  }
196 
197  static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
198  if (!MLOC.IsCall)
199  return hash_combine(
200  MLOC.IsCall,
202 
203  hash_code hash =
205  MLOC.getCall()->getCalledValue()));
206 
207  for (const Value *Arg : MLOC.getCall()->args())
209  return hash;
210  }
211 
212  static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
213  return LHS == RHS;
214  }
215 };
216 
217 } // end namespace llvm
218 
219 /// This does one-way checks to see if Use could theoretically be hoisted above
220 /// MayClobber. This will not check the other way around.
221 ///
222 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
223 /// MayClobber, with no potentially clobbering operations in between them.
224 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
225 static bool areLoadsReorderable(const LoadInst *Use,
226  const LoadInst *MayClobber) {
227  bool VolatileUse = Use->isVolatile();
228  bool VolatileClobber = MayClobber->isVolatile();
229  // Volatile operations may never be reordered with other volatile operations.
230  if (VolatileUse && VolatileClobber)
231  return false;
232  // Otherwise, volatile doesn't matter here. From the language reference:
233  // 'optimizers may change the order of volatile operations relative to
234  // non-volatile operations.'"
235 
236  // If a load is seq_cst, it cannot be moved above other loads. If its ordering
237  // is weaker, it can be moved above other loads. We just need to be sure that
238  // MayClobber isn't an acquire load, because loads can't be moved above
239  // acquire loads.
240  //
241  // Note that this explicitly *does* allow the free reordering of monotonic (or
242  // weaker) loads of the same address.
243  bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
244  bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
246  return !(SeqCstUse || MayClobberIsAcquire);
247 }
248 
249 namespace {
250 
251 struct ClobberAlias {
252  bool IsClobber;
254 };
255 
256 } // end anonymous namespace
257 
258 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
259 // ignored if IsClobber = false.
260 template <typename AliasAnalysisType>
261 static ClobberAlias
263  const Instruction *UseInst, AliasAnalysisType &AA) {
264  Instruction *DefInst = MD->getMemoryInst();
265  assert(DefInst && "Defining instruction not actually an instruction");
266  const auto *UseCall = dyn_cast<CallBase>(UseInst);
268 
269  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
270  // These intrinsics will show up as affecting memory, but they are just
271  // markers, mostly.
272  //
273  // FIXME: We probably don't actually want MemorySSA to model these at all
274  // (including creating MemoryAccesses for them): we just end up inventing
275  // clobbers where they don't really exist at all. Please see D43269 for
276  // context.
277  switch (II->getIntrinsicID()) {
278  case Intrinsic::lifetime_start:
279  if (UseCall)
280  return {false, NoAlias};
281  AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
282  return {AR != NoAlias, AR};
283  case Intrinsic::lifetime_end:
284  case Intrinsic::invariant_start:
285  case Intrinsic::invariant_end:
286  case Intrinsic::assume:
287  return {false, NoAlias};
288  case Intrinsic::dbg_addr:
289  case Intrinsic::dbg_declare:
290  case Intrinsic::dbg_label:
291  case Intrinsic::dbg_value:
292  llvm_unreachable("debuginfo shouldn't have associated defs!");
293  default:
294  break;
295  }
296  }
297 
298  if (UseCall) {
299  ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
300  AR = isMustSet(I) ? MustAlias : MayAlias;
301  return {isModOrRefSet(I), AR};
302  }
303 
304  if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
305  if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
306  return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
307 
308  ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
309  AR = isMustSet(I) ? MustAlias : MayAlias;
310  return {isModSet(I), AR};
311 }
312 
313 template <typename AliasAnalysisType>
314 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
315  const MemoryUseOrDef *MU,
316  const MemoryLocOrCall &UseMLOC,
317  AliasAnalysisType &AA) {
318  // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
319  // to exist while MemoryLocOrCall is pushed through places.
320  if (UseMLOC.IsCall)
322  AA);
323  return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
324  AA);
325 }
326 
327 // Return true when MD may alias MU, return false otherwise.
329  AliasAnalysis &AA) {
330  return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
331 }
332 
333 namespace {
334 
335 struct UpwardsMemoryQuery {
336  // True if our original query started off as a call
337  bool IsCall = false;
338  // The pointer location we started the query with. This will be empty if
339  // IsCall is true.
340  MemoryLocation StartingLoc;
341  // This is the instruction we were querying about.
342  const Instruction *Inst = nullptr;
343  // The MemoryAccess we actually got called with, used to test local domination
344  const MemoryAccess *OriginalAccess = nullptr;
346  bool SkipSelfAccess = false;
347 
348  UpwardsMemoryQuery() = default;
349 
350  UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
351  : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
352  if (!IsCall)
353  StartingLoc = MemoryLocation::get(Inst);
354  }
355 };
356 
357 } // end anonymous namespace
358 
359 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
360  BatchAAResults &AA) {
361  Instruction *Inst = MD->getMemoryInst();
362  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
363  switch (II->getIntrinsicID()) {
364  case Intrinsic::lifetime_end:
365  return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias;
366  default:
367  return false;
368  }
369  }
370  return false;
371 }
372 
373 template <typename AliasAnalysisType>
374 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
375  const Instruction *I) {
376  // If the memory can't be changed, then loads of the memory can't be
377  // clobbered.
378  return isa<LoadInst>(I) && (I->hasMetadata(LLVMContext::MD_invariant_load) ||
379  AA.pointsToConstantMemory(MemoryLocation(
380  cast<LoadInst>(I)->getPointerOperand())));
381 }
382 
383 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
384 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
385 ///
386 /// This is meant to be as simple and self-contained as possible. Because it
387 /// uses no cache, etc., it can be relatively expensive.
388 ///
389 /// \param Start The MemoryAccess that we want to walk from.
390 /// \param ClobberAt A clobber for Start.
391 /// \param StartLoc The MemoryLocation for Start.
392 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
393 /// \param Query The UpwardsMemoryQuery we used for our search.
394 /// \param AA The AliasAnalysis we used for our search.
395 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
396 
397 template <typename AliasAnalysisType>
398 LLVM_ATTRIBUTE_UNUSED static void
400  const MemoryLocation &StartLoc, const MemorySSA &MSSA,
401  const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
402  bool AllowImpreciseClobber = false) {
403  assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
404 
405  if (MSSA.isLiveOnEntryDef(Start)) {
406  assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
407  "liveOnEntry must clobber itself");
408  return;
409  }
410 
411  bool FoundClobber = false;
414  Worklist.emplace_back(Start, StartLoc);
415  // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
416  // is found, complain.
417  while (!Worklist.empty()) {
418  auto MAP = Worklist.pop_back_val();
419  // All we care about is that nothing from Start to ClobberAt clobbers Start.
420  // We learn nothing from revisiting nodes.
421  if (!VisitedPhis.insert(MAP).second)
422  continue;
423 
424  for (const auto *MA : def_chain(MAP.first)) {
425  if (MA == ClobberAt) {
426  if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
427  // instructionClobbersQuery isn't essentially free, so don't use `|=`,
428  // since it won't let us short-circuit.
429  //
430  // Also, note that this can't be hoisted out of the `Worklist` loop,
431  // since MD may only act as a clobber for 1 of N MemoryLocations.
432  FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
433  if (!FoundClobber) {
434  ClobberAlias CA =
435  instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
436  if (CA.IsClobber) {
437  FoundClobber = true;
438  // Not used: CA.AR;
439  }
440  }
441  }
442  break;
443  }
444 
445  // We should never hit liveOnEntry, unless it's the clobber.
446  assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
447 
448  if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
449  // If Start is a Def, skip self.
450  if (MD == Start)
451  continue;
452 
453  assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
454  .IsClobber &&
455  "Found clobber before reaching ClobberAt!");
456  continue;
457  }
458 
459  if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
460  (void)MU;
461  assert (MU == Start &&
462  "Can only find use in def chain if Start is a use");
463  continue;
464  }
465 
466  assert(isa<MemoryPhi>(MA));
467  Worklist.append(
468  upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
469  upward_defs_end());
470  }
471  }
472 
473  // If the verify is done following an optimization, it's possible that
474  // ClobberAt was a conservative clobbering, that we can now infer is not a
475  // true clobbering access. Don't fail the verify if that's the case.
476  // We do have accesses that claim they're optimized, but could be optimized
477  // further. Updating all these can be expensive, so allow it for now (FIXME).
478  if (AllowImpreciseClobber)
479  return;
480 
481  // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
482  // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
483  assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
484  "ClobberAt never acted as a clobber");
485 }
486 
487 namespace {
488 
489 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
490 /// in one class.
491 template <class AliasAnalysisType> class ClobberWalker {
492  /// Save a few bytes by using unsigned instead of size_t.
493  using ListIndex = unsigned;
494 
495  /// Represents a span of contiguous MemoryDefs, potentially ending in a
496  /// MemoryPhi.
497  struct DefPath {
498  MemoryLocation Loc;
499  // Note that, because we always walk in reverse, Last will always dominate
500  // First. Also note that First and Last are inclusive.
501  MemoryAccess *First;
502  MemoryAccess *Last;
503  Optional<ListIndex> Previous;
504 
505  DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
506  Optional<ListIndex> Previous)
507  : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
508 
509  DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
510  Optional<ListIndex> Previous)
511  : DefPath(Loc, Init, Init, Previous) {}
512  };
513 
514  const MemorySSA &MSSA;
515  AliasAnalysisType &AA;
516  DominatorTree &DT;
517  UpwardsMemoryQuery *Query;
518  unsigned *UpwardWalkLimit;
519 
520  // Phi optimization bookkeeping
523 
524  /// Find the nearest def or phi that `From` can legally be optimized to.
525  const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
526  assert(From->getNumOperands() && "Phi with no operands?");
527 
528  BasicBlock *BB = From->getBlock();
529  MemoryAccess *Result = MSSA.getLiveOnEntryDef();
530  DomTreeNode *Node = DT.getNode(BB);
531  while ((Node = Node->getIDom())) {
532  auto *Defs = MSSA.getBlockDefs(Node->getBlock());
533  if (Defs)
534  return &*Defs->rbegin();
535  }
536  return Result;
537  }
538 
539  /// Result of calling walkToPhiOrClobber.
540  struct UpwardsWalkResult {
541  /// The "Result" of the walk. Either a clobber, the last thing we walked, or
542  /// both. Include alias info when clobber found.
543  MemoryAccess *Result;
544  bool IsKnownClobber;
546  };
547 
548  /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
549  /// This will update Desc.Last as it walks. It will (optionally) also stop at
550  /// StopAt.
551  ///
552  /// This does not test for whether StopAt is a clobber
553  UpwardsWalkResult
554  walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
555  const MemoryAccess *SkipStopAt = nullptr) const {
556  assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
557  assert(UpwardWalkLimit && "Need a valid walk limit");
558  bool LimitAlreadyReached = false;
559  // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
560  // it to 1. This will not do any alias() calls. It either returns in the
561  // first iteration in the loop below, or is set back to 0 if all def chains
562  // are free of MemoryDefs.
563  if (!*UpwardWalkLimit) {
564  *UpwardWalkLimit = 1;
565  LimitAlreadyReached = true;
566  }
567 
568  for (MemoryAccess *Current : def_chain(Desc.Last)) {
569  Desc.Last = Current;
570  if (Current == StopAt || Current == SkipStopAt)
571  return {Current, false, MayAlias};
572 
573  if (auto *MD = dyn_cast<MemoryDef>(Current)) {
574  if (MSSA.isLiveOnEntryDef(MD))
575  return {MD, true, MustAlias};
576 
577  if (!--*UpwardWalkLimit)
578  return {Current, true, MayAlias};
579 
580  ClobberAlias CA =
581  instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
582  if (CA.IsClobber)
583  return {MD, true, CA.AR};
584  }
585  }
586 
587  if (LimitAlreadyReached)
588  *UpwardWalkLimit = 0;
589 
590  assert(isa<MemoryPhi>(Desc.Last) &&
591  "Ended at a non-clobber that's not a phi?");
592  return {Desc.Last, false, MayAlias};
593  }
594 
595  void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
596  ListIndex PriorNode) {
597  auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
598  upward_defs_end());
599  for (const MemoryAccessPair &P : UpwardDefs) {
600  PausedSearches.push_back(Paths.size());
601  Paths.emplace_back(P.second, P.first, PriorNode);
602  }
603  }
604 
605  /// Represents a search that terminated after finding a clobber. This clobber
606  /// may or may not be present in the path of defs from LastNode..SearchStart,
607  /// since it may have been retrieved from cache.
608  struct TerminatedPath {
609  MemoryAccess *Clobber;
610  ListIndex LastNode;
611  };
612 
613  /// Get an access that keeps us from optimizing to the given phi.
614  ///
615  /// PausedSearches is an array of indices into the Paths array. Its incoming
616  /// value is the indices of searches that stopped at the last phi optimization
617  /// target. It's left in an unspecified state.
618  ///
619  /// If this returns None, NewPaused is a vector of searches that terminated
620  /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
622  getBlockingAccess(const MemoryAccess *StopWhere,
623  SmallVectorImpl<ListIndex> &PausedSearches,
624  SmallVectorImpl<ListIndex> &NewPaused,
625  SmallVectorImpl<TerminatedPath> &Terminated) {
626  assert(!PausedSearches.empty() && "No searches to continue?");
627 
628  // BFS vs DFS really doesn't make a difference here, so just do a DFS with
629  // PausedSearches as our stack.
630  while (!PausedSearches.empty()) {
631  ListIndex PathIndex = PausedSearches.pop_back_val();
632  DefPath &Node = Paths[PathIndex];
633 
634  // If we've already visited this path with this MemoryLocation, we don't
635  // need to do so again.
636  //
637  // NOTE: That we just drop these paths on the ground makes caching
638  // behavior sporadic. e.g. given a diamond:
639  // A
640  // B C
641  // D
642  //
643  // ...If we walk D, B, A, C, we'll only cache the result of phi
644  // optimization for A, B, and D; C will be skipped because it dies here.
645  // This arguably isn't the worst thing ever, since:
646  // - We generally query things in a top-down order, so if we got below D
647  // without needing cache entries for {C, MemLoc}, then chances are
648  // that those cache entries would end up ultimately unused.
649  // - We still cache things for A, so C only needs to walk up a bit.
650  // If this behavior becomes problematic, we can fix without a ton of extra
651  // work.
652  if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
653  continue;
654 
655  const MemoryAccess *SkipStopWhere = nullptr;
656  if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
657  assert(isa<MemoryDef>(Query->OriginalAccess));
658  SkipStopWhere = Query->OriginalAccess;
659  }
660 
661  UpwardsWalkResult Res = walkToPhiOrClobber(Node,
662  /*StopAt=*/StopWhere,
663  /*SkipStopAt=*/SkipStopWhere);
664  if (Res.IsKnownClobber) {
665  assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
666 
667  // If this wasn't a cache hit, we hit a clobber when walking. That's a
668  // failure.
669  TerminatedPath Term{Res.Result, PathIndex};
670  if (!MSSA.dominates(Res.Result, StopWhere))
671  return Term;
672 
673  // Otherwise, it's a valid thing to potentially optimize to.
674  Terminated.push_back(Term);
675  continue;
676  }
677 
678  if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
679  // We've hit our target. Save this path off for if we want to continue
680  // walking. If we are in the mode of skipping the OriginalAccess, and
681  // we've reached back to the OriginalAccess, do not save path, we've
682  // just looped back to self.
683  if (Res.Result != SkipStopWhere)
684  NewPaused.push_back(PathIndex);
685  continue;
686  }
687 
688  assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
689  addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
690  }
691 
692  return None;
693  }
694 
695  template <typename T, typename Walker>
696  struct generic_def_path_iterator
697  : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
698  std::forward_iterator_tag, T *> {
699  generic_def_path_iterator() {}
700  generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
701 
702  T &operator*() const { return curNode(); }
703 
704  generic_def_path_iterator &operator++() {
705  N = curNode().Previous;
706  return *this;
707  }
708 
709  bool operator==(const generic_def_path_iterator &O) const {
710  if (N.hasValue() != O.N.hasValue())
711  return false;
712  return !N.hasValue() || *N == *O.N;
713  }
714 
715  private:
716  T &curNode() const { return W->Paths[*N]; }
717 
718  Walker *W = nullptr;
720  };
721 
722  using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
723  using const_def_path_iterator =
724  generic_def_path_iterator<const DefPath, const ClobberWalker>;
725 
726  iterator_range<def_path_iterator> def_path(ListIndex From) {
727  return make_range(def_path_iterator(this, From), def_path_iterator());
728  }
729 
730  iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
731  return make_range(const_def_path_iterator(this, From),
732  const_def_path_iterator());
733  }
734 
735  struct OptznResult {
736  /// The path that contains our result.
737  TerminatedPath PrimaryClobber;
738  /// The paths that we can legally cache back from, but that aren't
739  /// necessarily the result of the Phi optimization.
740  SmallVector<TerminatedPath, 4> OtherClobbers;
741  };
742 
743  ListIndex defPathIndex(const DefPath &N) const {
744  // The assert looks nicer if we don't need to do &N
745  const DefPath *NP = &N;
746  assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
747  "Out of bounds DefPath!");
748  return NP - &Paths.front();
749  }
750 
751  /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
752  /// that act as legal clobbers. Note that this won't return *all* clobbers.
753  ///
754  /// Phi optimization algorithm tl;dr:
755  /// - Find the earliest def/phi, A, we can optimize to
756  /// - Find if all paths from the starting memory access ultimately reach A
757  /// - If not, optimization isn't possible.
758  /// - Otherwise, walk from A to another clobber or phi, A'.
759  /// - If A' is a def, we're done.
760  /// - If A' is a phi, try to optimize it.
761  ///
762  /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
763  /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
764  OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
765  const MemoryLocation &Loc) {
766  assert(Paths.empty() && VisitedPhis.empty() &&
767  "Reset the optimization state.");
768 
769  Paths.emplace_back(Loc, Start, Phi, None);
770  // Stores how many "valid" optimization nodes we had prior to calling
771  // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
772  auto PriorPathsSize = Paths.size();
773 
774  SmallVector<ListIndex, 16> PausedSearches;
775  SmallVector<ListIndex, 8> NewPaused;
776  SmallVector<TerminatedPath, 4> TerminatedPaths;
777 
778  addSearches(Phi, PausedSearches, 0);
779 
780  // Moves the TerminatedPath with the "most dominated" Clobber to the end of
781  // Paths.
782  auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
783  assert(!Paths.empty() && "Need a path to move");
784  auto Dom = Paths.begin();
785  for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
786  if (!MSSA.dominates(I->Clobber, Dom->Clobber))
787  Dom = I;
788  auto Last = Paths.end() - 1;
789  if (Last != Dom)
790  std::iter_swap(Last, Dom);
791  };
792 
793  MemoryPhi *Current = Phi;
794  while (true) {
795  assert(!MSSA.isLiveOnEntryDef(Current) &&
796  "liveOnEntry wasn't treated as a clobber?");
797 
798  const auto *Target = getWalkTarget(Current);
799  // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
800  // optimization for the prior phi.
801  assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
802  return MSSA.dominates(P.Clobber, Target);
803  }));
804 
805  // FIXME: This is broken, because the Blocker may be reported to be
806  // liveOnEntry, and we'll happily wait for that to disappear (read: never)
807  // For the moment, this is fine, since we do nothing with blocker info.
808  if (Optional<TerminatedPath> Blocker = getBlockingAccess(
809  Target, PausedSearches, NewPaused, TerminatedPaths)) {
810 
811  // Find the node we started at. We can't search based on N->Last, since
812  // we may have gone around a loop with a different MemoryLocation.
813  auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
814  return defPathIndex(N) < PriorPathsSize;
815  });
816  assert(Iter != def_path_iterator());
817 
818  DefPath &CurNode = *Iter;
819  assert(CurNode.Last == Current);
820 
821  // Two things:
822  // A. We can't reliably cache all of NewPaused back. Consider a case
823  // where we have two paths in NewPaused; one of which can't optimize
824  // above this phi, whereas the other can. If we cache the second path
825  // back, we'll end up with suboptimal cache entries. We can handle
826  // cases like this a bit better when we either try to find all
827  // clobbers that block phi optimization, or when our cache starts
828  // supporting unfinished searches.
829  // B. We can't reliably cache TerminatedPaths back here without doing
830  // extra checks; consider a case like:
831  // T
832  // / \
833  // D C
834  // \ /
835  // S
836  // Where T is our target, C is a node with a clobber on it, D is a
837  // diamond (with a clobber *only* on the left or right node, N), and
838  // S is our start. Say we walk to D, through the node opposite N
839  // (read: ignoring the clobber), and see a cache entry in the top
840  // node of D. That cache entry gets put into TerminatedPaths. We then
841  // walk up to C (N is later in our worklist), find the clobber, and
842  // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
843  // the bottom part of D to the cached clobber, ignoring the clobber
844  // in N. Again, this problem goes away if we start tracking all
845  // blockers for a given phi optimization.
846  TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
847  return {Result, {}};
848  }
849 
850  // If there's nothing left to search, then all paths led to valid clobbers
851  // that we got from our cache; pick the nearest to the start, and allow
852  // the rest to be cached back.
853  if (NewPaused.empty()) {
854  MoveDominatedPathToEnd(TerminatedPaths);
855  TerminatedPath Result = TerminatedPaths.pop_back_val();
856  return {Result, std::move(TerminatedPaths)};
857  }
858 
859  MemoryAccess *DefChainEnd = nullptr;
861  for (ListIndex Paused : NewPaused) {
862  UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
863  if (WR.IsKnownClobber)
864  Clobbers.push_back({WR.Result, Paused});
865  else
866  // Micro-opt: If we hit the end of the chain, save it.
867  DefChainEnd = WR.Result;
868  }
869 
870  if (!TerminatedPaths.empty()) {
871  // If we couldn't find the dominating phi/liveOnEntry in the above loop,
872  // do it now.
873  if (!DefChainEnd)
874  for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
875  DefChainEnd = MA;
876  assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
877 
878  // If any of the terminated paths don't dominate the phi we'll try to
879  // optimize, we need to figure out what they are and quit.
880  const BasicBlock *ChainBB = DefChainEnd->getBlock();
881  for (const TerminatedPath &TP : TerminatedPaths) {
882  // Because we know that DefChainEnd is as "high" as we can go, we
883  // don't need local dominance checks; BB dominance is sufficient.
884  if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
885  Clobbers.push_back(TP);
886  }
887  }
888 
889  // If we have clobbers in the def chain, find the one closest to Current
890  // and quit.
891  if (!Clobbers.empty()) {
892  MoveDominatedPathToEnd(Clobbers);
893  TerminatedPath Result = Clobbers.pop_back_val();
894  return {Result, std::move(Clobbers)};
895  }
896 
897  assert(all_of(NewPaused,
898  [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
899 
900  // Because liveOnEntry is a clobber, this must be a phi.
901  auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
902 
903  PriorPathsSize = Paths.size();
904  PausedSearches.clear();
905  for (ListIndex I : NewPaused)
906  addSearches(DefChainPhi, PausedSearches, I);
907  NewPaused.clear();
908 
909  Current = DefChainPhi;
910  }
911  }
912 
913  void verifyOptResult(const OptznResult &R) const {
914  assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
915  return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
916  }));
917  }
918 
919  void resetPhiOptznState() {
920  Paths.clear();
921  VisitedPhis.clear();
922  }
923 
924 public:
925  ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
926  : MSSA(MSSA), AA(AA), DT(DT) {}
927 
928  AliasAnalysisType *getAA() { return &AA; }
929  /// Finds the nearest clobber for the given query, optimizing phis if
930  /// possible.
931  MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
932  unsigned &UpWalkLimit) {
933  Query = &Q;
934  UpwardWalkLimit = &UpWalkLimit;
935  // Starting limit must be > 0.
936  if (!UpWalkLimit)
937  UpWalkLimit++;
938 
939  MemoryAccess *Current = Start;
940  // This walker pretends uses don't exist. If we're handed one, silently grab
941  // its def. (This has the nice side-effect of ensuring we never cache uses)
942  if (auto *MU = dyn_cast<MemoryUse>(Start))
943  Current = MU->getDefiningAccess();
944 
945  DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
946  // Fast path for the overly-common case (no crazy phi optimization
947  // necessary)
948  UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
949  MemoryAccess *Result;
950  if (WalkResult.IsKnownClobber) {
951  Result = WalkResult.Result;
952  Q.AR = WalkResult.AR;
953  } else {
954  OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
955  Current, Q.StartingLoc);
956  verifyOptResult(OptRes);
957  resetPhiOptznState();
958  Result = OptRes.PrimaryClobber.Clobber;
959  }
960 
961 #ifdef EXPENSIVE_CHECKS
962  if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
963  checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
964 #endif
965  return Result;
966  }
967 };
968 
969 struct RenamePassData {
970  DomTreeNode *DTN;
972  MemoryAccess *IncomingVal;
973 
974  RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
975  MemoryAccess *M)
976  : DTN(D), ChildIt(It), IncomingVal(M) {}
977 
978  void swap(RenamePassData &RHS) {
979  std::swap(DTN, RHS.DTN);
980  std::swap(ChildIt, RHS.ChildIt);
981  std::swap(IncomingVal, RHS.IncomingVal);
982  }
983 };
984 
985 } // end anonymous namespace
986 
987 namespace llvm {
988 
989 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
990  ClobberWalker<AliasAnalysisType> Walker;
991  MemorySSA *MSSA;
992 
993 public:
994  ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
995  : Walker(*M, *A, *D), MSSA(M) {}
996 
997  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
998  const MemoryLocation &,
999  unsigned &);
1000  // Third argument (bool), defines whether the clobber search should skip the
1001  // original queried access. If true, there will be a follow-up query searching
1002  // for a clobber access past "self". Note that the Optimized access is not
1003  // updated if a new clobber is found by this SkipSelf search. If this
1004  // additional query becomes heavily used we may decide to cache the result.
1005  // Walker instantiations will decide how to set the SkipSelf bool.
1006  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
1007 };
1008 
1009 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1010 /// longer does caching on its own, but the name has been retained for the
1011 /// moment.
1012 template <class AliasAnalysisType>
1013 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1014  ClobberWalkerBase<AliasAnalysisType> *Walker;
1015 
1016 public:
1017  CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1018  : MemorySSAWalker(M), Walker(W) {}
1019  ~CachingWalker() override = default;
1020 
1022 
1023  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1024  return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1025  }
1026  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1027  const MemoryLocation &Loc,
1028  unsigned &UWL) {
1029  return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1030  }
1031 
1032  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1033  unsigned UpwardWalkLimit = MaxCheckLimit;
1034  return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1035  }
1036  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1037  const MemoryLocation &Loc) override {
1038  unsigned UpwardWalkLimit = MaxCheckLimit;
1039  return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1040  }
1041 
1042  void invalidateInfo(MemoryAccess *MA) override {
1043  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1044  MUD->resetOptimized();
1045  }
1046 };
1047 
1048 template <class AliasAnalysisType>
1049 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1050  ClobberWalkerBase<AliasAnalysisType> *Walker;
1051 
1052 public:
1053  SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1054  : MemorySSAWalker(M), Walker(W) {}
1055  ~SkipSelfWalker() override = default;
1056 
1058 
1059  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1060  return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1061  }
1062  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1063  const MemoryLocation &Loc,
1064  unsigned &UWL) {
1065  return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1066  }
1067 
1068  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1069  unsigned UpwardWalkLimit = MaxCheckLimit;
1070  return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1071  }
1072  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1073  const MemoryLocation &Loc) override {
1074  unsigned UpwardWalkLimit = MaxCheckLimit;
1075  return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1076  }
1077 
1078  void invalidateInfo(MemoryAccess *MA) override {
1079  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1080  MUD->resetOptimized();
1081  }
1082 };
1083 
1084 } // end namespace llvm
1085 
1086 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1087  bool RenameAllUses) {
1088  // Pass through values to our successors
1089  for (const BasicBlock *S : successors(BB)) {
1090  auto It = PerBlockAccesses.find(S);
1091  // Rename the phi nodes in our successor block
1092  if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1093  continue;
1094  AccessList *Accesses = It->second.get();
1095  auto *Phi = cast<MemoryPhi>(&Accesses->front());
1096  if (RenameAllUses) {
1097  bool ReplacementDone = false;
1098  for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1099  if (Phi->getIncomingBlock(I) == BB) {
1100  Phi->setIncomingValue(I, IncomingVal);
1101  ReplacementDone = true;
1102  }
1103  (void) ReplacementDone;
1104  assert(ReplacementDone && "Incomplete phi during partial rename");
1105  } else
1106  Phi->addIncoming(IncomingVal, BB);
1107  }
1108 }
1109 
1110 /// Rename a single basic block into MemorySSA form.
1111 /// Uses the standard SSA renaming algorithm.
1112 /// \returns The new incoming value.
1113 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1114  bool RenameAllUses) {
1115  auto It = PerBlockAccesses.find(BB);
1116  // Skip most processing if the list is empty.
1117  if (It != PerBlockAccesses.end()) {
1118  AccessList *Accesses = It->second.get();
1119  for (MemoryAccess &L : *Accesses) {
1120  if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1121  if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1122  MUD->setDefiningAccess(IncomingVal);
1123  if (isa<MemoryDef>(&L))
1124  IncomingVal = &L;
1125  } else {
1126  IncomingVal = &L;
1127  }
1128  }
1129  }
1130  return IncomingVal;
1131 }
1132 
1133 /// This is the standard SSA renaming algorithm.
1134 ///
1135 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1136 /// in phi nodes in our successors.
1137 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1139  bool SkipVisited, bool RenameAllUses) {
1140  assert(Root && "Trying to rename accesses in an unreachable block");
1141 
1143  // Skip everything if we already renamed this block and we are skipping.
1144  // Note: You can't sink this into the if, because we need it to occur
1145  // regardless of whether we skip blocks or not.
1146  bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1147  if (SkipVisited && AlreadyVisited)
1148  return;
1149 
1150  IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1151  renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1152  WorkStack.push_back({Root, Root->begin(), IncomingVal});
1153 
1154  while (!WorkStack.empty()) {
1155  DomTreeNode *Node = WorkStack.back().DTN;
1156  DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1157  IncomingVal = WorkStack.back().IncomingVal;
1158 
1159  if (ChildIt == Node->end()) {
1160  WorkStack.pop_back();
1161  } else {
1162  DomTreeNode *Child = *ChildIt;
1163  ++WorkStack.back().ChildIt;
1164  BasicBlock *BB = Child->getBlock();
1165  // Note: You can't sink this into the if, because we need it to occur
1166  // regardless of whether we skip blocks or not.
1167  AlreadyVisited = !Visited.insert(BB).second;
1168  if (SkipVisited && AlreadyVisited) {
1169  // We already visited this during our renaming, which can happen when
1170  // being asked to rename multiple blocks. Figure out the incoming val,
1171  // which is the last def.
1172  // Incoming value can only change if there is a block def, and in that
1173  // case, it's the last block def in the list.
1174  if (auto *BlockDefs = getWritableBlockDefs(BB))
1175  IncomingVal = &*BlockDefs->rbegin();
1176  } else
1177  IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1178  renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1179  WorkStack.push_back({Child, Child->begin(), IncomingVal});
1180  }
1181  }
1182 }
1183 
1184 /// This handles unreachable block accesses by deleting phi nodes in
1185 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1186 /// being uses of the live on entry definition.
1187 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1188  assert(!DT->isReachableFromEntry(BB) &&
1189  "Reachable block found while handling unreachable blocks");
1190 
1191  // Make sure phi nodes in our reachable successors end up with a
1192  // LiveOnEntryDef for our incoming edge, even though our block is forward
1193  // unreachable. We could just disconnect these blocks from the CFG fully,
1194  // but we do not right now.
1195  for (const BasicBlock *S : successors(BB)) {
1196  if (!DT->isReachableFromEntry(S))
1197  continue;
1198  auto It = PerBlockAccesses.find(S);
1199  // Rename the phi nodes in our successor block
1200  if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1201  continue;
1202  AccessList *Accesses = It->second.get();
1203  auto *Phi = cast<MemoryPhi>(&Accesses->front());
1204  Phi->addIncoming(LiveOnEntryDef.get(), BB);
1205  }
1206 
1207  auto It = PerBlockAccesses.find(BB);
1208  if (It == PerBlockAccesses.end())
1209  return;
1210 
1211  auto &Accesses = It->second;
1212  for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1213  auto Next = std::next(AI);
1214  // If we have a phi, just remove it. We are going to replace all
1215  // users with live on entry.
1216  if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1217  UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1218  else
1219  Accesses->erase(AI);
1220  AI = Next;
1221  }
1222 }
1223 
1225  : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1226  SkipWalker(nullptr), NextID(0) {
1227  // Build MemorySSA using a batch alias analysis. This reuses the internal
1228  // state that AA collects during an alias()/getModRefInfo() call. This is
1229  // safe because there are no CFG changes while building MemorySSA and can
1230  // significantly reduce the time spent by the compiler in AA, because we will
1231  // make queries about all the instructions in the Function.
1232  BatchAAResults BatchAA(*AA);
1233  buildMemorySSA(BatchAA);
1234  // Intentionally leave AA to nullptr while building so we don't accidently
1235  // use non-batch AliasAnalysis.
1236  this->AA = AA;
1237  // Also create the walker here.
1238  getWalker();
1239 }
1240 
1242  // Drop all our references
1243  for (const auto &Pair : PerBlockAccesses)
1244  for (MemoryAccess &MA : *Pair.second)
1245  MA.dropAllReferences();
1246 }
1247 
1248 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1249  auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1250 
1251  if (Res.second)
1252  Res.first->second = std::make_unique<AccessList>();
1253  return Res.first->second.get();
1254 }
1255 
1256 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1257  auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1258 
1259  if (Res.second)
1260  Res.first->second = std::make_unique<DefsList>();
1261  return Res.first->second.get();
1262 }
1263 
1264 namespace llvm {
1265 
1266 /// This class is a batch walker of all MemoryUse's in the program, and points
1267 /// their defining access at the thing that actually clobbers them. Because it
1268 /// is a batch walker that touches everything, it does not operate like the
1269 /// other walkers. This walker is basically performing a top-down SSA renaming
1270 /// pass, where the version stack is used as the cache. This enables it to be
1271 /// significantly more time and memory efficient than using the regular walker,
1272 /// which is walking bottom-up.
1274 public:
1275  OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1276  BatchAAResults *BAA, DominatorTree *DT)
1277  : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1278 
1279  void optimizeUses();
1280 
1281 private:
1282  /// This represents where a given memorylocation is in the stack.
1283  struct MemlocStackInfo {
1284  // This essentially is keeping track of versions of the stack. Whenever
1285  // the stack changes due to pushes or pops, these versions increase.
1286  unsigned long StackEpoch;
1287  unsigned long PopEpoch;
1288  // This is the lower bound of places on the stack to check. It is equal to
1289  // the place the last stack walk ended.
1290  // Note: Correctness depends on this being initialized to 0, which densemap
1291  // does
1292  unsigned long LowerBound;
1293  const BasicBlock *LowerBoundBlock;
1294  // This is where the last walk for this memory location ended.
1295  unsigned long LastKill;
1296  bool LastKillValid;
1298  };
1299 
1300  void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1303 
1304  MemorySSA *MSSA;
1305  CachingWalker<BatchAAResults> *Walker;
1306  BatchAAResults *AA;
1307  DominatorTree *DT;
1308 };
1309 
1310 } // end namespace llvm
1311 
1312 /// Optimize the uses in a given block This is basically the SSA renaming
1313 /// algorithm, with one caveat: We are able to use a single stack for all
1314 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1315 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1316 /// going to be some position in that stack of possible ones.
1317 ///
1318 /// We track the stack positions that each MemoryLocation needs
1319 /// to check, and last ended at. This is because we only want to check the
1320 /// things that changed since last time. The same MemoryLocation should
1321 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1322 /// things like this, and if they start, we can modify MemoryLocOrCall to
1323 /// include relevant data)
1324 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1325  const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1326  SmallVectorImpl<MemoryAccess *> &VersionStack,
1328 
1329  /// If no accesses, nothing to do.
1330  MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1331  if (Accesses == nullptr)
1332  return;
1333 
1334  // Pop everything that doesn't dominate the current block off the stack,
1335  // increment the PopEpoch to account for this.
1336  while (true) {
1337  assert(
1338  !VersionStack.empty() &&
1339  "Version stack should have liveOnEntry sentinel dominating everything");
1340  BasicBlock *BackBlock = VersionStack.back()->getBlock();
1341  if (DT->dominates(BackBlock, BB))
1342  break;
1343  while (VersionStack.back()->getBlock() == BackBlock)
1344  VersionStack.pop_back();
1345  ++PopEpoch;
1346  }
1347 
1348  for (MemoryAccess &MA : *Accesses) {
1349  auto *MU = dyn_cast<MemoryUse>(&MA);
1350  if (!MU) {
1351  VersionStack.push_back(&MA);
1352  ++StackEpoch;
1353  continue;
1354  }
1355 
1356  if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1357  MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1358  continue;
1359  }
1360 
1361  MemoryLocOrCall UseMLOC(MU);
1362  auto &LocInfo = LocStackInfo[UseMLOC];
1363  // If the pop epoch changed, it means we've removed stuff from top of
1364  // stack due to changing blocks. We may have to reset the lower bound or
1365  // last kill info.
1366  if (LocInfo.PopEpoch != PopEpoch) {
1367  LocInfo.PopEpoch = PopEpoch;
1368  LocInfo.StackEpoch = StackEpoch;
1369  // If the lower bound was in something that no longer dominates us, we
1370  // have to reset it.
1371  // We can't simply track stack size, because the stack may have had
1372  // pushes/pops in the meantime.
1373  // XXX: This is non-optimal, but only is slower cases with heavily
1374  // branching dominator trees. To get the optimal number of queries would
1375  // be to make lowerbound and lastkill a per-loc stack, and pop it until
1376  // the top of that stack dominates us. This does not seem worth it ATM.
1377  // A much cheaper optimization would be to always explore the deepest
1378  // branch of the dominator tree first. This will guarantee this resets on
1379  // the smallest set of blocks.
1380  if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1381  !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1382  // Reset the lower bound of things to check.
1383  // TODO: Some day we should be able to reset to last kill, rather than
1384  // 0.
1385  LocInfo.LowerBound = 0;
1386  LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1387  LocInfo.LastKillValid = false;
1388  }
1389  } else if (LocInfo.StackEpoch != StackEpoch) {
1390  // If all that has changed is the StackEpoch, we only have to check the
1391  // new things on the stack, because we've checked everything before. In
1392  // this case, the lower bound of things to check remains the same.
1393  LocInfo.PopEpoch = PopEpoch;
1394  LocInfo.StackEpoch = StackEpoch;
1395  }
1396  if (!LocInfo.LastKillValid) {
1397  LocInfo.LastKill = VersionStack.size() - 1;
1398  LocInfo.LastKillValid = true;
1399  LocInfo.AR = MayAlias;
1400  }
1401 
1402  // At this point, we should have corrected last kill and LowerBound to be
1403  // in bounds.
1404  assert(LocInfo.LowerBound < VersionStack.size() &&
1405  "Lower bound out of range");
1406  assert(LocInfo.LastKill < VersionStack.size() &&
1407  "Last kill info out of range");
1408  // In any case, the new upper bound is the top of the stack.
1409  unsigned long UpperBound = VersionStack.size() - 1;
1410 
1411  if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1412  LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1413  << *(MU->getMemoryInst()) << ")"
1414  << " because there are "
1415  << UpperBound - LocInfo.LowerBound
1416  << " stores to disambiguate\n");
1417  // Because we did not walk, LastKill is no longer valid, as this may
1418  // have been a kill.
1419  LocInfo.LastKillValid = false;
1420  continue;
1421  }
1422  bool FoundClobberResult = false;
1423  unsigned UpwardWalkLimit = MaxCheckLimit;
1424  while (UpperBound > LocInfo.LowerBound) {
1425  if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1426  // For phis, use the walker, see where we ended up, go there
1427  MemoryAccess *Result =
1428  Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
1429  // We are guaranteed to find it or something is wrong
1430  while (VersionStack[UpperBound] != Result) {
1431  assert(UpperBound != 0);
1432  --UpperBound;
1433  }
1434  FoundClobberResult = true;
1435  break;
1436  }
1437 
1438  MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1439  // If the lifetime of the pointer ends at this instruction, it's live on
1440  // entry.
1441  if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1442  // Reset UpperBound to liveOnEntryDef's place in the stack
1443  UpperBound = 0;
1444  FoundClobberResult = true;
1445  LocInfo.AR = MustAlias;
1446  break;
1447  }
1448  ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1449  if (CA.IsClobber) {
1450  FoundClobberResult = true;
1451  LocInfo.AR = CA.AR;
1452  break;
1453  }
1454  --UpperBound;
1455  }
1456 
1457  // Note: Phis always have AliasResult AR set to MayAlias ATM.
1458 
1459  // At the end of this loop, UpperBound is either a clobber, or lower bound
1460  // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1461  if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1462  // We were last killed now by where we got to
1463  if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1464  LocInfo.AR = None;
1465  MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1466  LocInfo.LastKill = UpperBound;
1467  } else {
1468  // Otherwise, we checked all the new ones, and now we know we can get to
1469  // LastKill.
1470  MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1471  }
1472  LocInfo.LowerBound = VersionStack.size() - 1;
1473  LocInfo.LowerBoundBlock = BB;
1474  }
1475 }
1476 
1477 /// Optimize uses to point to their actual clobbering definitions.
1479  SmallVector<MemoryAccess *, 16> VersionStack;
1481  VersionStack.push_back(MSSA->getLiveOnEntryDef());
1482 
1483  unsigned long StackEpoch = 1;
1484  unsigned long PopEpoch = 1;
1485  // We perform a non-recursive top-down dominator tree walk.
1486  for (const auto *DomNode : depth_first(DT->getRootNode()))
1487  optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1488  LocStackInfo);
1489 }
1490 
1491 void MemorySSA::placePHINodes(
1492  const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1493  // Determine where our MemoryPhi's should go
1494  ForwardIDFCalculator IDFs(*DT);
1495  IDFs.setDefiningBlocks(DefiningBlocks);
1497  IDFs.calculate(IDFBlocks);
1498 
1499  // Now place MemoryPhi nodes.
1500  for (auto &BB : IDFBlocks)
1501  createMemoryPhi(BB);
1502 }
1503 
1504 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1505  // We create an access to represent "live on entry", for things like
1506  // arguments or users of globals, where the memory they use is defined before
1507  // the beginning of the function. We do not actually insert it into the IR.
1508  // We do not define a live on exit for the immediate uses, and thus our
1509  // semantics do *not* imply that something with no immediate uses can simply
1510  // be removed.
1511  BasicBlock &StartingPoint = F.getEntryBlock();
1512  LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1513  &StartingPoint, NextID++));
1514 
1515  // We maintain lists of memory accesses per-block, trading memory for time. We
1516  // could just look up the memory access for every possible instruction in the
1517  // stream.
1518  SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1519  // Go through each block, figure out where defs occur, and chain together all
1520  // the accesses.
1521  for (BasicBlock &B : F) {
1522  bool InsertIntoDef = false;
1523  AccessList *Accesses = nullptr;
1524  DefsList *Defs = nullptr;
1525  for (Instruction &I : B) {
1526  MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1527  if (!MUD)
1528  continue;
1529 
1530  if (!Accesses)
1531  Accesses = getOrCreateAccessList(&B);
1532  Accesses->push_back(MUD);
1533  if (isa<MemoryDef>(MUD)) {
1534  InsertIntoDef = true;
1535  if (!Defs)
1536  Defs = getOrCreateDefsList(&B);
1537  Defs->push_back(*MUD);
1538  }
1539  }
1540  if (InsertIntoDef)
1541  DefiningBlocks.insert(&B);
1542  }
1543  placePHINodes(DefiningBlocks);
1544 
1545  // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1546  // filled in with all blocks.
1548  renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1549 
1550  ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1551  CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1552  OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
1553 
1554  // Mark the uses in unreachable blocks as live on entry, so that they go
1555  // somewhere.
1556  for (auto &BB : F)
1557  if (!Visited.count(&BB))
1558  markUnreachableAsLiveOnEntry(&BB);
1559 }
1560 
1561 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1562 
1563 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1564  if (Walker)
1565  return Walker.get();
1566 
1567  if (!WalkerBase)
1568  WalkerBase =
1569  std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1570 
1571  Walker =
1572  std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1573  return Walker.get();
1574 }
1575 
1577  if (SkipWalker)
1578  return SkipWalker.get();
1579 
1580  if (!WalkerBase)
1581  WalkerBase =
1582  std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1583 
1584  SkipWalker =
1585  std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1586  return SkipWalker.get();
1587  }
1588 
1589 
1590 // This is a helper function used by the creation routines. It places NewAccess
1591 // into the access and defs lists for a given basic block, at the given
1592 // insertion point.
1594  const BasicBlock *BB,
1595  InsertionPlace Point) {
1596  auto *Accesses = getOrCreateAccessList(BB);
1597  if (Point == Beginning) {
1598  // If it's a phi node, it goes first, otherwise, it goes after any phi
1599  // nodes.
1600  if (isa<MemoryPhi>(NewAccess)) {
1601  Accesses->push_front(NewAccess);
1602  auto *Defs = getOrCreateDefsList(BB);
1603  Defs->push_front(*NewAccess);
1604  } else {
1605  auto AI = find_if_not(
1606  *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1607  Accesses->insert(AI, NewAccess);
1608  if (!isa<MemoryUse>(NewAccess)) {
1609  auto *Defs = getOrCreateDefsList(BB);
1610  auto DI = find_if_not(
1611  *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1612  Defs->insert(DI, *NewAccess);
1613  }
1614  }
1615  } else {
1616  Accesses->push_back(NewAccess);
1617  if (!isa<MemoryUse>(NewAccess)) {
1618  auto *Defs = getOrCreateDefsList(BB);
1619  Defs->push_back(*NewAccess);
1620  }
1621  }
1622  BlockNumberingValid.erase(BB);
1623 }
1624 
1626  AccessList::iterator InsertPt) {
1627  auto *Accesses = getWritableBlockAccesses(BB);
1628  bool WasEnd = InsertPt == Accesses->end();
1629  Accesses->insert(AccessList::iterator(InsertPt), What);
1630  if (!isa<MemoryUse>(What)) {
1631  auto *Defs = getOrCreateDefsList(BB);
1632  // If we got asked to insert at the end, we have an easy job, just shove it
1633  // at the end. If we got asked to insert before an existing def, we also get
1634  // an iterator. If we got asked to insert before a use, we have to hunt for
1635  // the next def.
1636  if (WasEnd) {
1637  Defs->push_back(*What);
1638  } else if (isa<MemoryDef>(InsertPt)) {
1639  Defs->insert(InsertPt->getDefsIterator(), *What);
1640  } else {
1641  while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1642  ++InsertPt;
1643  // Either we found a def, or we are inserting at the end
1644  if (InsertPt == Accesses->end())
1645  Defs->push_back(*What);
1646  else
1647  Defs->insert(InsertPt->getDefsIterator(), *What);
1648  }
1649  }
1650  BlockNumberingValid.erase(BB);
1651 }
1652 
1653 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1654  // Keep it in the lookup tables, remove from the lists
1655  removeFromLists(What, false);
1656 
1657  // Note that moving should implicitly invalidate the optimized state of a
1658  // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1659  // MemoryDef.
1660  if (auto *MD = dyn_cast<MemoryDef>(What))
1661  MD->resetOptimized();
1662  What->setBlock(BB);
1663 }
1664 
1665 // Move What before Where in the IR. The end result is that What will belong to
1666 // the right lists and have the right Block set, but will not otherwise be
1667 // correct. It will not have the right defining access, and if it is a def,
1668 // things below it will not properly be updated.
1670  AccessList::iterator Where) {
1671  prepareForMoveTo(What, BB);
1672  insertIntoListsBefore(What, BB, Where);
1673 }
1674 
1676  InsertionPlace Point) {
1677  if (isa<MemoryPhi>(What)) {
1678  assert(Point == Beginning &&
1679  "Can only move a Phi at the beginning of the block");
1680  // Update lookup table entry
1681  ValueToMemoryAccess.erase(What->getBlock());
1682  bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1683  (void)Inserted;
1684  assert(Inserted && "Cannot move a Phi to a block that already has one");
1685  }
1686 
1687  prepareForMoveTo(What, BB);
1688  insertIntoListsForBlock(What, BB, Point);
1689 }
1690 
1691 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1692  assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1693  MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1694  // Phi's always are placed at the front of the block.
1696  ValueToMemoryAccess[BB] = Phi;
1697  return Phi;
1698 }
1699 
1701  MemoryAccess *Definition,
1702  const MemoryUseOrDef *Template,
1703  bool CreationMustSucceed) {
1704  assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1705  MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1706  if (CreationMustSucceed)
1707  assert(NewAccess != nullptr && "Tried to create a memory access for a "
1708  "non-memory touching instruction");
1709  if (NewAccess)
1710  NewAccess->setDefiningAccess(Definition);
1711  return NewAccess;
1712 }
1713 
1714 // Return true if the instruction has ordering constraints.
1715 // Note specifically that this only considers stores and loads
1716 // because others are still considered ModRef by getModRefInfo.
1717 static inline bool isOrdered(const Instruction *I) {
1718  if (auto *SI = dyn_cast<StoreInst>(I)) {
1719  if (!SI->isUnordered())
1720  return true;
1721  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1722  if (!LI->isUnordered())
1723  return true;
1724  }
1725  return false;
1726 }
1727 
1728 /// Helper function to create new memory accesses
1729 template <typename AliasAnalysisType>
1730 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1731  AliasAnalysisType *AAP,
1732  const MemoryUseOrDef *Template) {
1733  // The assume intrinsic has a control dependency which we model by claiming
1734  // that it writes arbitrarily. Debuginfo intrinsics may be considered
1735  // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1736  // dependencies here.
1737  // FIXME: Replace this special casing with a more accurate modelling of
1738  // assume's control dependency.
1739  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1740  if (II->getIntrinsicID() == Intrinsic::assume)
1741  return nullptr;
1742 
1743  // Using a nonstandard AA pipelines might leave us with unexpected modref
1744  // results for I, so add a check to not model instructions that may not read
1745  // from or write to memory. This is necessary for correctness.
1746  if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1747  return nullptr;
1748 
1749  bool Def, Use;
1750  if (Template) {
1751  Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
1752  Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
1753 #if !defined(NDEBUG)
1754  ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1755  bool DefCheck, UseCheck;
1756  DefCheck = isModSet(ModRef) || isOrdered(I);
1757  UseCheck = isRefSet(ModRef);
1758  assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1759 #endif
1760  } else {
1761  // Find out what affect this instruction has on memory.
1762  ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1763  // The isOrdered check is used to ensure that volatiles end up as defs
1764  // (atomics end up as ModRef right now anyway). Until we separate the
1765  // ordering chain from the memory chain, this enables people to see at least
1766  // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1767  // will still give an answer that bypasses other volatile loads. TODO:
1768  // Separate memory aliasing and ordering into two different chains so that
1769  // we can precisely represent both "what memory will this read/write/is
1770  // clobbered by" and "what instructions can I move this past".
1771  Def = isModSet(ModRef) || isOrdered(I);
1772  Use = isRefSet(ModRef);
1773  }
1774 
1775  // It's possible for an instruction to not modify memory at all. During
1776  // construction, we ignore them.
1777  if (!Def && !Use)
1778  return nullptr;
1779 
1780  MemoryUseOrDef *MUD;
1781  if (Def)
1782  MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1783  else
1784  MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1785  ValueToMemoryAccess[I] = MUD;
1786  return MUD;
1787 }
1788 
1789 /// Returns true if \p Replacer dominates \p Replacee .
1790 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1791  const MemoryAccess *Replacee) const {
1792  if (isa<MemoryUseOrDef>(Replacee))
1793  return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1794  const auto *MP = cast<MemoryPhi>(Replacee);
1795  // For a phi node, the use occurs in the predecessor block of the phi node.
1796  // Since we may occur multiple times in the phi node, we have to check each
1797  // operand to ensure Replacer dominates each operand where Replacee occurs.
1798  for (const Use &Arg : MP->operands()) {
1799  if (Arg.get() != Replacee &&
1800  !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1801  return false;
1802  }
1803  return true;
1804 }
1805 
1806 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1808  assert(MA->use_empty() &&
1809  "Trying to remove memory access that still has uses");
1810  BlockNumbering.erase(MA);
1811  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1812  MUD->setDefiningAccess(nullptr);
1813  // Invalidate our walker's cache if necessary
1814  if (!isa<MemoryUse>(MA))
1815  getWalker()->invalidateInfo(MA);
1816 
1817  Value *MemoryInst;
1818  if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1819  MemoryInst = MUD->getMemoryInst();
1820  else
1821  MemoryInst = MA->getBlock();
1822 
1823  auto VMA = ValueToMemoryAccess.find(MemoryInst);
1824  if (VMA->second == MA)
1825  ValueToMemoryAccess.erase(VMA);
1826 }
1827 
1828 /// Properly remove \p MA from all of MemorySSA's lists.
1829 ///
1830 /// Because of the way the intrusive list and use lists work, it is important to
1831 /// do removal in the right order.
1832 /// ShouldDelete defaults to true, and will cause the memory access to also be
1833 /// deleted, not just removed.
1834 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1835  BasicBlock *BB = MA->getBlock();
1836  // The access list owns the reference, so we erase it from the non-owning list
1837  // first.
1838  if (!isa<MemoryUse>(MA)) {
1839  auto DefsIt = PerBlockDefs.find(BB);
1840  std::unique_ptr<DefsList> &Defs = DefsIt->second;
1841  Defs->remove(*MA);
1842  if (Defs->empty())
1843  PerBlockDefs.erase(DefsIt);
1844  }
1845 
1846  // The erase call here will delete it. If we don't want it deleted, we call
1847  // remove instead.
1848  auto AccessIt = PerBlockAccesses.find(BB);
1849  std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1850  if (ShouldDelete)
1851  Accesses->erase(MA);
1852  else
1853  Accesses->remove(MA);
1854 
1855  if (Accesses->empty()) {
1856  PerBlockAccesses.erase(AccessIt);
1857  BlockNumberingValid.erase(BB);
1858  }
1859 }
1860 
1862  MemorySSAAnnotatedWriter Writer(this);
1863  F.print(OS, &Writer);
1864 }
1865 
1866 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1868 #endif
1869 
1871  verifyDefUses(F);
1873  verifyOrdering(F);
1876  // Previously, the verification used to also verify that the clobberingAccess
1877  // cached by MemorySSA is the same as the clobberingAccess found at a later
1878  // query to AA. This does not hold true in general due to the current fragility
1879  // of BasicAA which has arbitrary caps on the things it analyzes before giving
1880  // up. As a result, transformations that are correct, will lead to BasicAA
1881  // returning different Alias answers before and after that transformation.
1882  // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1883  // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1884  // every transformation, which defeats the purpose of using it. For such an
1885  // example, see test4 added in D51960.
1886 }
1887 
1889 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1890  for (const BasicBlock &BB : F) {
1891  if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1892  for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1893  auto *Pred = Phi->getIncomingBlock(I);
1894  auto *IncAcc = Phi->getIncomingValue(I);
1895  // If Pred has no unreachable predecessors, get last def looking at
1896  // IDoms. If, while walkings IDoms, any of these has an unreachable
1897  // predecessor, then the incoming def can be any access.
1898  if (auto *DTNode = DT->getNode(Pred)) {
1899  while (DTNode) {
1900  if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1901  auto *LastAcc = &*(--DefList->end());
1902  assert(LastAcc == IncAcc &&
1903  "Incorrect incoming access into phi.");
1904  break;
1905  }
1906  DTNode = DTNode->getIDom();
1907  }
1908  } else {
1909  // If Pred has unreachable predecessors, but has at least a Def, the
1910  // incoming access can be the last Def in Pred, or it could have been
1911  // optimized to LoE. After an update, though, the LoE may have been
1912  // replaced by another access, so IncAcc may be any access.
1913  // If Pred has unreachable predecessors and no Defs, incoming access
1914  // should be LoE; However, after an update, it may be any access.
1915  }
1916  }
1917  }
1918  }
1919 #endif
1920 }
1921 
1922 /// Verify that all of the blocks we believe to have valid domination numbers
1923 /// actually have valid domination numbers.
1925 #ifndef NDEBUG
1926  if (BlockNumberingValid.empty())
1927  return;
1928 
1929  SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1930  for (const BasicBlock &BB : F) {
1931  if (!ValidBlocks.count(&BB))
1932  continue;
1933 
1934  ValidBlocks.erase(&BB);
1935 
1936  const AccessList *Accesses = getBlockAccesses(&BB);
1937  // It's correct to say an empty block has valid numbering.
1938  if (!Accesses)
1939  continue;
1940 
1941  // Block numbering starts at 1.
1942  unsigned long LastNumber = 0;
1943  for (const MemoryAccess &MA : *Accesses) {
1944  auto ThisNumberIter = BlockNumbering.find(&MA);
1945  assert(ThisNumberIter != BlockNumbering.end() &&
1946  "MemoryAccess has no domination number in a valid block!");
1947 
1948  unsigned long ThisNumber = ThisNumberIter->second;
1949  assert(ThisNumber > LastNumber &&
1950  "Domination numbers should be strictly increasing!");
1951  LastNumber = ThisNumber;
1952  }
1953  }
1954 
1955  assert(ValidBlocks.empty() &&
1956  "All valid BasicBlocks should exist in F -- dangling pointers?");
1957 #endif
1958 }
1959 
1960 /// Verify that the order and existence of MemoryAccesses matches the
1961 /// order and existence of memory affecting instructions.
1963 #ifndef NDEBUG
1964  // Walk all the blocks, comparing what the lookups think and what the access
1965  // lists think, as well as the order in the blocks vs the order in the access
1966  // lists.
1967  SmallVector<MemoryAccess *, 32> ActualAccesses;
1969  for (BasicBlock &B : F) {
1970  const AccessList *AL = getBlockAccesses(&B);
1971  const auto *DL = getBlockDefs(&B);
1972  MemoryAccess *Phi = getMemoryAccess(&B);
1973  if (Phi) {
1974  ActualAccesses.push_back(Phi);
1975  ActualDefs.push_back(Phi);
1976  }
1977 
1978  for (Instruction &I : B) {
1979  MemoryAccess *MA = getMemoryAccess(&I);
1980  assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1981  "We have memory affecting instructions "
1982  "in this block but they are not in the "
1983  "access list or defs list");
1984  if (MA) {
1985  ActualAccesses.push_back(MA);
1986  if (isa<MemoryDef>(MA))
1987  ActualDefs.push_back(MA);
1988  }
1989  }
1990  // Either we hit the assert, really have no accesses, or we have both
1991  // accesses and an access list.
1992  // Same with defs.
1993  if (!AL && !DL)
1994  continue;
1995  assert(AL->size() == ActualAccesses.size() &&
1996  "We don't have the same number of accesses in the block as on the "
1997  "access list");
1998  assert((DL || ActualDefs.size() == 0) &&
1999  "Either we should have a defs list, or we should have no defs");
2000  assert((!DL || DL->size() == ActualDefs.size()) &&
2001  "We don't have the same number of defs in the block as on the "
2002  "def list");
2003  auto ALI = AL->begin();
2004  auto AAI = ActualAccesses.begin();
2005  while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2006  assert(&*ALI == *AAI && "Not the same accesses in the same order");
2007  ++ALI;
2008  ++AAI;
2009  }
2010  ActualAccesses.clear();
2011  if (DL) {
2012  auto DLI = DL->begin();
2013  auto ADI = ActualDefs.begin();
2014  while (DLI != DL->end() && ADI != ActualDefs.end()) {
2015  assert(&*DLI == *ADI && "Not the same defs in the same order");
2016  ++DLI;
2017  ++ADI;
2018  }
2019  }
2020  ActualDefs.clear();
2021  }
2022 #endif
2023 }
2024 
2025 /// Verify the domination properties of MemorySSA by checking that each
2026 /// definition dominates all of its uses.
2028 #ifndef NDEBUG
2029  for (BasicBlock &B : F) {
2030  // Phi nodes are attached to basic blocks
2031  if (MemoryPhi *MP = getMemoryAccess(&B))
2032  for (const Use &U : MP->uses())
2033  assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
2034 
2035  for (Instruction &I : B) {
2036  MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
2037  if (!MD)
2038  continue;
2039 
2040  for (const Use &U : MD->uses())
2041  assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
2042  }
2043  }
2044 #endif
2045 }
2046 
2047 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2048 /// appears in the use list of \p Def.
2049 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2050 #ifndef NDEBUG
2051  // The live on entry use may cause us to get a NULL def here
2052  if (!Def)
2053  assert(isLiveOnEntryDef(Use) &&
2054  "Null def but use not point to live on entry def");
2055  else
2056  assert(is_contained(Def->users(), Use) &&
2057  "Did not find use in def's use list");
2058 #endif
2059 }
2060 
2061 /// Verify the immediate use information, by walking all the memory
2062 /// accesses and verifying that, for each use, it appears in the
2063 /// appropriate def's use list
2065 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
2066  for (BasicBlock &B : F) {
2067  // Phi nodes are attached to basic blocks
2068  if (MemoryPhi *Phi = getMemoryAccess(&B)) {
2069  assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
2070  pred_begin(&B), pred_end(&B))) &&
2071  "Incomplete MemoryPhi Node");
2072  for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
2073  verifyUseInDefs(Phi->getIncomingValue(I), Phi);
2074  assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
2075  pred_end(&B) &&
2076  "Incoming phi block not a block predecessor");
2077  }
2078  }
2079 
2080  for (Instruction &I : B) {
2081  if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
2082  verifyUseInDefs(MA->getDefiningAccess(), MA);
2083  }
2084  }
2085  }
2086 #endif
2087 }
2088 
2089 /// Perform a local numbering on blocks so that instruction ordering can be
2090 /// determined in constant time.
2091 /// TODO: We currently just number in order. If we numbered by N, we could
2092 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2093 /// log2(N) sequences of mixed before and after) without needing to invalidate
2094 /// the numbering.
2095 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2096  // The pre-increment ensures the numbers really start at 1.
2097  unsigned long CurrentNumber = 0;
2098  const AccessList *AL = getBlockAccesses(B);
2099  assert(AL != nullptr && "Asking to renumber an empty block");
2100  for (const auto &I : *AL)
2101  BlockNumbering[&I] = ++CurrentNumber;
2102  BlockNumberingValid.insert(B);
2103 }
2104 
2105 /// Determine, for two memory accesses in the same block,
2106 /// whether \p Dominator dominates \p Dominatee.
2107 /// \returns True if \p Dominator dominates \p Dominatee.
2109  const MemoryAccess *Dominatee) const {
2110  const BasicBlock *DominatorBlock = Dominator->getBlock();
2111 
2112  assert((DominatorBlock == Dominatee->getBlock()) &&
2113  "Asking for local domination when accesses are in different blocks!");
2114  // A node dominates itself.
2115  if (Dominatee == Dominator)
2116  return true;
2117 
2118  // When Dominatee is defined on function entry, it is not dominated by another
2119  // memory access.
2120  if (isLiveOnEntryDef(Dominatee))
2121  return false;
2122 
2123  // When Dominator is defined on function entry, it dominates the other memory
2124  // access.
2125  if (isLiveOnEntryDef(Dominator))
2126  return true;
2127 
2128  if (!BlockNumberingValid.count(DominatorBlock))
2129  renumberBlock(DominatorBlock);
2130 
2131  unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2132  // All numbers start with 1
2133  assert(DominatorNum != 0 && "Block was not numbered properly");
2134  unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2135  assert(DominateeNum != 0 && "Block was not numbered properly");
2136  return DominatorNum < DominateeNum;
2137 }
2138 
2139 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2140  const MemoryAccess *Dominatee) const {
2141  if (Dominator == Dominatee)
2142  return true;
2143 
2144  if (isLiveOnEntryDef(Dominatee))
2145  return false;
2146 
2147  if (Dominator->getBlock() != Dominatee->getBlock())
2148  return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2149  return locallyDominates(Dominator, Dominatee);
2150 }
2151 
2152 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2153  const Use &Dominatee) const {
2154  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2155  BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2156  // The def must dominate the incoming block of the phi.
2157  if (UseBB != Dominator->getBlock())
2158  return DT->dominates(Dominator->getBlock(), UseBB);
2159  // If the UseBB and the DefBB are the same, compare locally.
2160  return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2161  }
2162  // If it's not a PHI node use, the normal dominates can already handle it.
2163  return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2164 }
2165 
2166 const static char LiveOnEntryStr[] = "liveOnEntry";
2167 
2169  switch (getValueID()) {
2170  case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2171  case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2172  case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2173  }
2174  llvm_unreachable("invalid value id");
2175 }
2176 
2178  MemoryAccess *UO = getDefiningAccess();
2179 
2180  auto printID = [&OS](MemoryAccess *A) {
2181  if (A && A->getID())
2182  OS << A->getID();
2183  else
2184  OS << LiveOnEntryStr;
2185  };
2186 
2187  OS << getID() << " = MemoryDef(";
2188  printID(UO);
2189  OS << ")";
2190 
2191  if (isOptimized()) {
2192  OS << "->";
2193  printID(getOptimized());
2194 
2195  if (Optional<AliasResult> AR = getOptimizedAccessType())
2196  OS << " " << *AR;
2197  }
2198 }
2199 
2201  bool First = true;
2202  OS << getID() << " = MemoryPhi(";
2203  for (const auto &Op : operands()) {
2204  BasicBlock *BB = getIncomingBlock(Op);
2205  MemoryAccess *MA = cast<MemoryAccess>(Op);
2206  if (!First)
2207  OS << ',';
2208  else
2209  First = false;
2210 
2211  OS << '{';
2212  if (BB->hasName())
2213  OS << BB->getName();
2214  else
2215  BB->printAsOperand(OS, false);
2216  OS << ',';
2217  if (unsigned ID = MA->getID())
2218  OS << ID;
2219  else
2220  OS << LiveOnEntryStr;
2221  OS << '}';
2222  }
2223  OS << ')';
2224 }
2225 
2227  MemoryAccess *UO = getDefiningAccess();
2228  OS << "MemoryUse(";
2229  if (UO && UO->getID())
2230  OS << UO->getID();
2231  else
2232  OS << LiveOnEntryStr;
2233  OS << ')';
2234 
2235  if (Optional<AliasResult> AR = getOptimizedAccessType())
2236  OS << " " << *AR;
2237 }
2238 
2239 void MemoryAccess::dump() const {
2240 // Cannot completely remove virtual function even in release mode.
2241 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2242  print(dbgs());
2243  dbgs() << "\n";
2244 #endif
2245 }
2246 
2248 
2251 }
2252 
2254  AU.setPreservesAll();
2255  AU.addRequired<MemorySSAWrapperPass>();
2256 }
2257 
2259  auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2260  MSSA.print(dbgs());
2261  if (VerifyMemorySSA)
2262  MSSA.verifyMemorySSA();
2263  return false;
2264 }
2265 
2266 AnalysisKey MemorySSAAnalysis::Key;
2267 
2270  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2271  auto &AA = AM.getResult<AAManager>(F);
2272  return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2273 }
2274 
2276  Function &F, const PreservedAnalyses &PA,
2278  auto PAC = PA.getChecker<MemorySSAAnalysis>();
2279  return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2280  Inv.invalidate<AAManager>(F, PA) ||
2281  Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2282 }
2283 
2286  OS << "MemorySSA for function: " << F.getName() << "\n";
2287  AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2288 
2289  return PreservedAnalyses::all();
2290 }
2291 
2294  AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2295 
2296  return PreservedAnalyses::all();
2297 }
2298 
2299 char MemorySSAWrapperPass::ID = 0;
2300 
2303 }
2304 
2305 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2306 
2308  AU.setPreservesAll();
2311 }
2312 
2314  auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2315  auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2316  MSSA.reset(new MemorySSA(F, &AA, &DT));
2317  return false;
2318 }
2319 
2320 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
2321 
2323  MSSA->print(OS);
2324 }
2325 
2327 
2328 /// Walk the use-def chains starting at \p StartingAccess and find
2329 /// the MemoryAccess that actually clobbers Loc.
2330 ///
2331 /// \returns our clobbering memory access
2332 template <typename AliasAnalysisType>
2333 MemoryAccess *
2334 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2335  MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2336  unsigned &UpwardWalkLimit) {
2337  if (isa<MemoryPhi>(StartingAccess))
2338  return StartingAccess;
2339 
2340  auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2341  if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2342  return StartingUseOrDef;
2343 
2344  Instruction *I = StartingUseOrDef->getMemoryInst();
2345 
2346  // Conservatively, fences are always clobbers, so don't perform the walk if we
2347  // hit a fence.
2348  if (!isa<CallBase>(I) && I->isFenceLike())
2349  return StartingUseOrDef;
2350 
2351  UpwardsMemoryQuery Q;
2352  Q.OriginalAccess = StartingUseOrDef;
2353  Q.StartingLoc = Loc;
2354  Q.Inst = I;
2355  Q.IsCall = false;
2356 
2357  // Unlike the other function, do not walk to the def of a def, because we are
2358  // handed something we already believe is the clobbering access.
2359  // We never set SkipSelf to true in Q in this method.
2360  MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2361  ? StartingUseOrDef->getDefiningAccess()
2362  : StartingUseOrDef;
2363 
2364  MemoryAccess *Clobber =
2365  Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2366  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2367  LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2368  LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2369  LLVM_DEBUG(dbgs() << *Clobber << "\n");
2370  return Clobber;
2371 }
2372 
2373 template <typename AliasAnalysisType>
2374 MemoryAccess *
2375 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2376  MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
2377  auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2378  // If this is a MemoryPhi, we can't do anything.
2379  if (!StartingAccess)
2380  return MA;
2381 
2382  bool IsOptimized = false;
2383 
2384  // If this is an already optimized use or def, return the optimized result.
2385  // Note: Currently, we store the optimized def result in a separate field,
2386  // since we can't use the defining access.
2387  if (StartingAccess->isOptimized()) {
2388  if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2389  return StartingAccess->getOptimized();
2390  IsOptimized = true;
2391  }
2392 
2393  const Instruction *I = StartingAccess->getMemoryInst();
2394  // We can't sanely do anything with a fence, since they conservatively clobber
2395  // all memory, and have no locations to get pointers from to try to
2396  // disambiguate.
2397  if (!isa<CallBase>(I) && I->isFenceLike())
2398  return StartingAccess;
2399 
2400  UpwardsMemoryQuery Q(I, StartingAccess);
2401 
2402  if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2403  MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2404  StartingAccess->setOptimized(LiveOnEntry);
2405  StartingAccess->setOptimizedAccessType(None);
2406  return LiveOnEntry;
2407  }
2408 
2409  MemoryAccess *OptimizedAccess;
2410  if (!IsOptimized) {
2411  // Start with the thing we already think clobbers this location
2412  MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2413 
2414  // At this point, DefiningAccess may be the live on entry def.
2415  // If it is, we will not get a better result.
2416  if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2417  StartingAccess->setOptimized(DefiningAccess);
2418  StartingAccess->setOptimizedAccessType(None);
2419  return DefiningAccess;
2420  }
2421 
2422  OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2423  StartingAccess->setOptimized(OptimizedAccess);
2424  if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2425  StartingAccess->setOptimizedAccessType(None);
2426  else if (Q.AR == MustAlias)
2427  StartingAccess->setOptimizedAccessType(MustAlias);
2428  } else
2429  OptimizedAccess = StartingAccess->getOptimized();
2430 
2431  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2432  LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2433  LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2434  LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2435 
2436  MemoryAccess *Result;
2437  if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2438  isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2439  assert(isa<MemoryDef>(Q.OriginalAccess));
2440  Q.SkipSelfAccess = true;
2441  Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2442  } else
2443  Result = OptimizedAccess;
2444 
2445  LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2446  LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2447 
2448  return Result;
2449 }
2450 
2451 MemoryAccess *
2453  if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2454  return Use->getDefiningAccess();
2455  return MA;
2456 }
2457 
2459  MemoryAccess *StartingAccess, const MemoryLocation &) {
2460  if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2461  return Use->getDefiningAccess();
2462  return StartingAccess;
2463 }
2464 
2465 void MemoryPhi::deleteMe(DerivedUser *Self) {
2466  delete static_cast<MemoryPhi *>(Self);
2467 }
2468 
2469 void MemoryDef::deleteMe(DerivedUser *Self) {
2470  delete static_cast<MemoryDef *>(Self);
2471 }
2472 
2473 void MemoryUse::deleteMe(DerivedUser *Self) {
2474  delete static_cast<MemoryUse *>(Self);
2475 }
MemorySSAWalker * getWalker()
Definition: MemorySSA.cpp:1561
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Definition: MemorySSA.cpp:2313
The access may reference and may modify the value stored in memory.
uint64_t CallInst * C
void initializeMemorySSAWrapperPassPass(PassRegistry &)
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:803
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
typename std::vector< DomTreeNodeBase *>::const_iterator const_iterator
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:667
iterator_range< use_iterator > uses()
Definition: Value.h:375
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
void dropAllReferences()
Drop all references to operands.
Definition: User.h:294
Atomic ordering constants.
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:83
bool isFenceLike() const
Return true if this instruction behaves like a memory fence: it can load or store to memory location ...
Definition: Instruction.h:561
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:777
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2200
This class represents lattice values for constants.
Definition: AllocatorList.h:23
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:484
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2139
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:66
void push_back(reference Node)
Insert a node at the back; never copies.
Definition: simple_ilist.h:147
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
This class provides various memory handling functions that manipulate MemoryBlock instances...
Definition: Memory.h:53
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess&#39;s for a given basic block.
Definition: MemorySSA.h:758
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: MemorySSA.cpp:2253
Optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:953
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
The two locations do not alias at all.
Definition: AliasAnalysis.h:84
Extension point for the Value hierarchy.
Definition: DerivedUser.h:27
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:375
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:252
static const char LiveOnEntryStr[]
Definition: MemorySSA.cpp:2166
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:743
unsigned second
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: MemorySSA.cpp:2307
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1165
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1100
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:230
F(f)
block Block Frequency true
An instruction for reading from memory.
Definition: Instructions.h:169
memoryssa
Definition: MemorySSA.cpp:65
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
Definition: APFloat.h:1277
This defines the Use class.
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2177
void calculate(SmallVectorImpl< NodeTy *> &IDFBlocks)
Calculate iterated dominance frontiers.
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
Definition: MemorySSA.cpp:1224
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:32
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: PassManager.h:312
INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, true) INITIALIZE_PASS_END(MemorySSAWrapperPass
OptimizeUses(MemorySSA *MSSA, CachingWalker< BatchAAResults > *Walker, BatchAAResults *BAA, DominatorTree *DT)
Definition: MemorySSA.cpp:1275
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:195
Represents read-only accesses to memory.
Definition: MemorySSA.h:319
This class is a batch walker of all MemoryUse&#39;s in the program, and points their defining access at t...
Definition: MemorySSA.cpp:1273
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:965
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:233
static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, BatchAAResults &AA)
Definition: MemorySSA.cpp:359
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
Definition: MemorySSA.h:822
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
Definition: MemorySSA.cpp:1700
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
Definition: MemorySSA.cpp:328
MemorySSAAnnotatedWriter(const MemorySSA *M)
Definition: MemorySSA.cpp:104
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:703
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef&#39;s and MemoryPhi&#39;s for a given basic block.
Definition: MemorySSA.h:766
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition: MemorySSA.cpp:1593
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:224
static bool isOrdered(const Instruction *I)
Definition: MemorySSA.cpp:1717
APInt operator*(APInt a, uint64_t RHS)
Definition: APInt.h:2099
void verifyDefUses(Function &F) const
Verify the immediate use information, by walking all the memory accesses and verifying that...
Definition: MemorySSA.cpp:2064
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
Definition: MemorySSA.cpp:2320
A simple intrusive list implementation.
Definition: simple_ilist.h:78
LLVM_NODISCARD bool isMustSet(const ModRefInfo MRI)
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:40
#define F(x, y, z)
Definition: MD5.cpp:55
static int getID(struct InternalInstruction *insn, const void *miiArg)
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref&#39;ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:720
upward_defs_iterator upward_defs_end()
Definition: MemorySSA.h:1248
early cse memssa
Definition: EarlyCSE.cpp:1425
Memory SSA
Definition: MemorySSA.cpp:65
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
void emitBasicBlockStartAnnot(const BasicBlock *BB, formatted_raw_ostream &OS) override
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
Definition: MemorySSA.cpp:106
void dump() const
Definition: MemorySSA.cpp:1867
static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))
This is the generic walker interface for walkers of MemorySSA.
Definition: MemorySSA.h:996
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:67
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
bool isAtLeastOrStrongerThan(AtomicOrdering ao, AtomicOrdering other)
An assembly annotator class to print Memory SSA information in comments.
Definition: MemorySSA.cpp:98
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA&#39;s lookup tables.
Definition: MemorySSA.cpp:1807
NodeT * getBlock() const
#define P(N)
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:154
MemorySSAWalker * getSkipSelfWalker()
Definition: MemorySSA.cpp:1576
bool hasName() const
Definition: Value.h:252
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
A manager for alias analyses.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:187
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
Definition: MemorySSA.cpp:212
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
early cse Early CSE w MemorySSA
Definition: EarlyCSE.cpp:1425
void dump() const
Definition: MemorySSA.cpp:2239
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:112
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:788
Represent the analysis usage information of a pass.
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2168
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:180
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:115
void verifyDomination(Function &F) const
Verify the domination properties of MemorySSA by checking that each definition dominates all of its u...
Definition: MemorySSA.cpp:2027
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
Memory true print Memory SSA Printer
Definition: MemorySSA.cpp:71
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1193
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2292
auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Definition: STLExtras.h:1198
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Definition: MemorySSA.h:1054
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:160
size_t size() const
Definition: SmallVector.h:52
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1186
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:388
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:4356
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Memory true print Memory SSA static false cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
Definition: MemorySSA.cpp:1478
The two locations may or may not alias. This is the least precise result.
Definition: AliasAnalysis.h:86
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, const Instruction *I)
Definition: MemorySSA.cpp:374
void setDefiningBlocks(const SmallPtrSetImpl< NodeTy *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
Representation for a specific memory location.
The two locations precisely alias each other.
Definition: AliasAnalysis.h:90
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void verifyOrdering(Function &F) const
Verify that the order and existence of MemoryAccesses matches the order and existence of memory affec...
Definition: MemorySSA.cpp:1962
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=MayAlias)
Definition: MemorySSA.h:299
unsigned getNumOperands() const
Definition: User.h:191
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
BlockVerifier::State From
void verifyMemorySSA() const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1870
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false...
Definition: SmallPtrSet.h:377
static LLVM_ATTRIBUTE_UNUSED void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, AliasAnalysisType &AA, bool AllowImpreciseClobber=false)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
Definition: MemorySSA.cpp:399
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition: MemorySSA.cpp:2108
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:248
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Definition: MemorySSA.cpp:2258
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:926
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:374
BasicBlock * getBlock() const
Definition: MemorySSA.h:159
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Definition: MemorySSA.cpp:2275
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:124
#define MAP(n)
void emitInstructionAnnot(const Instruction *I, formatted_raw_ostream &OS) override
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
Definition: MemorySSA.cpp:112
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:742
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2226
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
A range adaptor for a pair of iterators.
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA&#39;s lists.
Definition: MemorySSA.cpp:1834
Target - Wrapper for Target specific information.
void push_back(pointer val)
Definition: ilist.h:311
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:599
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:247
void setPreservesAll()
Set by analyses that do not transform their input at all.
iterator_range< user_iterator > users()
Definition: Value.h:420
An opaque object representing a hash code.
Definition: Hashing.h:70
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:254
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:387
void initializeMemorySSAPrinterLegacyPassPass(PassRegistry &)
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
MemorySSAWalker(MemorySSA *)
Definition: MemorySSA.cpp:2326
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
Definition: MemorySSA.h:1299
cl::opt< bool > EnableMSSALoopDependency
Enables memory ssa as a dependency for loop passes.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
Definition: MemorySSA.cpp:2305
This file provides utility analysis objects describing memory locations.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:214
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
Definition: MemorySSA.cpp:1669
bool mayReadFromMemory() const
Return true if this instruction may read memory.
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
Definition: MemorySSA.cpp:2452
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:738
AnalysisUsage & addRequiredTransitive()
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:649
iterator_range< df_iterator< T > > depth_first(const T &G)
MemoryAccess * getClobberingMemoryAccess(const Instruction *I)
Given a memory Mod/Ref/ModRef&#39;ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1025
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2284
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This templated class represents "all analyses that operate over <a particular IR unit>" (e...
Definition: PassManager.h:92
Result run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2268
static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
Definition: MemorySSA.cpp:225
LLVM Value Representation.
Definition: Value.h:74
static MemoryLocOrCall getTombstoneKey()
Definition: MemorySSA.cpp:193
succ_range successors(Instruction *I)
Definition: CFG.h:259
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair)
Definition: MemorySSA.h:1244
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
Definition: MemorySSA.h:664
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:45
static ClobberAlias instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysisType &AA)
Definition: MemorySSA.cpp:262
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
Definition: MemorySSA.cpp:197
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
hexagon cext opt
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:259
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Definition: MemorySSA.h:212
LLVM_NODISCARD bool isModOrRefSet(const ModRefInfo MRI)
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:1975
void verifyPrevDefInPhis(Function &F) const
Definition: MemorySSA.cpp:1888
void verifyDominationNumbers(const Function &F) const
Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...
Definition: MemorySSA.cpp:1924
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:481
void print(raw_ostream &) const
Definition: MemorySSA.cpp:1861
This header defines various interfaces for pass management in LLVM.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
static MemoryLocOrCall getEmptyKey()
Definition: MemorySSA.cpp:189
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
Definition: MemorySSA.cpp:1625
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:71
bool use_empty() const
Definition: Value.h:343
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:448
hexagon widen stores
reverse_iterator rbegin()
Definition: simple_ilist.h:121
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
Definition: MemorySSA.h:1075
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:43
const BasicBlock * getParent() const
Definition: Instruction.h:66
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
Definition: MemorySSA.cpp:2322
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1224
LLVM_NODISCARD bool isRefSet(const ModRefInfo MRI)