LLVM  10.0.0svn
MemorySSA.h
Go to the documentation of this file.
1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file exposes an interface to building/using memory SSA to
11 /// walk memory instructions using a use/def graph.
12 ///
13 /// Memory SSA class builds an SSA form that links together memory access
14 /// instructions such as loads, stores, atomics, and calls. Additionally, it
15 /// does a trivial form of "heap versioning" Every time the memory state changes
16 /// in the program, we generate a new heap version. It generates
17 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
18 ///
19 /// As a trivial example,
20 /// define i32 @main() #0 {
21 /// entry:
22 /// %call = call noalias i8* @_Znwm(i64 4) #2
23 /// %0 = bitcast i8* %call to i32*
24 /// %call1 = call noalias i8* @_Znwm(i64 4) #2
25 /// %1 = bitcast i8* %call1 to i32*
26 /// store i32 5, i32* %0, align 4
27 /// store i32 7, i32* %1, align 4
28 /// %2 = load i32* %0, align 4
29 /// %3 = load i32* %1, align 4
30 /// %add = add nsw i32 %2, %3
31 /// ret i32 %add
32 /// }
33 ///
34 /// Will become
35 /// define i32 @main() #0 {
36 /// entry:
37 /// ; 1 = MemoryDef(0)
38 /// %call = call noalias i8* @_Znwm(i64 4) #3
39 /// %2 = bitcast i8* %call to i32*
40 /// ; 2 = MemoryDef(1)
41 /// %call1 = call noalias i8* @_Znwm(i64 4) #3
42 /// %4 = bitcast i8* %call1 to i32*
43 /// ; 3 = MemoryDef(2)
44 /// store i32 5, i32* %2, align 4
45 /// ; 4 = MemoryDef(3)
46 /// store i32 7, i32* %4, align 4
47 /// ; MemoryUse(3)
48 /// %7 = load i32* %2, align 4
49 /// ; MemoryUse(4)
50 /// %8 = load i32* %4, align 4
51 /// %add = add nsw i32 %7, %8
52 /// ret i32 %add
53 /// }
54 ///
55 /// Given this form, all the stores that could ever effect the load at %8 can be
56 /// gotten by using the MemoryUse associated with it, and walking from use to
57 /// def until you hit the top of the function.
58 ///
59 /// Each def also has a list of users associated with it, so you can walk from
60 /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
61 /// but not the RHS of MemoryDefs. You can see this above at %7, which would
62 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
63 /// store, all the MemoryUses on its use lists are may-aliases of that store
64 /// (but the MemoryDefs on its use list may not be).
65 ///
66 /// MemoryDefs are not disambiguated because it would require multiple reaching
67 /// definitions, which would require multiple phis, and multiple memoryaccesses
68 /// per instruction.
69 //
70 //===----------------------------------------------------------------------===//
71 
72 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
73 #define LLVM_ANALYSIS_MEMORYSSA_H
74 
75 #include "llvm/ADT/DenseMap.h"
76 #include "llvm/ADT/GraphTraits.h"
77 #include "llvm/ADT/SmallPtrSet.h"
78 #include "llvm/ADT/SmallVector.h"
79 #include "llvm/ADT/ilist.h"
80 #include "llvm/ADT/ilist_node.h"
81 #include "llvm/ADT/iterator.h"
83 #include "llvm/ADT/simple_ilist.h"
87 #include "llvm/IR/BasicBlock.h"
88 #include "llvm/IR/DerivedUser.h"
89 #include "llvm/IR/Dominators.h"
90 #include "llvm/IR/Module.h"
91 #include "llvm/IR/Type.h"
92 #include "llvm/IR/Use.h"
93 #include "llvm/IR/User.h"
94 #include "llvm/IR/Value.h"
95 #include "llvm/IR/ValueHandle.h"
96 #include "llvm/Pass.h"
97 #include "llvm/Support/Casting.h"
98 #include <algorithm>
99 #include <cassert>
100 #include <cstddef>
101 #include <iterator>
102 #include <memory>
103 #include <utility>
104 
105 namespace llvm {
106 
107 /// Enables memory ssa as a dependency for loop passes.
108 extern cl::opt<bool> EnableMSSALoopDependency;
109 
110 class Function;
111 class Instruction;
112 class MemoryAccess;
113 class MemorySSAWalker;
114 class LLVMContext;
115 class raw_ostream;
116 
117 namespace MSSAHelpers {
118 
119 struct AllAccessTag {};
120 struct DefsOnlyTag {};
121 
122 } // end namespace MSSAHelpers
123 
124 enum : unsigned {
125  // Used to signify what the default invalid ID is for MemoryAccess's
126  // getID()
128 };
129 
130 template <class T> class memoryaccess_def_iterator_base;
134 
135 // The base for all memory accesses. All memory accesses in a block are
136 // linked together using an intrusive list.
138  : public DerivedUser,
139  public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
140  public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
141 public:
142  using AllAccessType =
144  using DefsOnlyType =
146 
147  MemoryAccess(const MemoryAccess &) = delete;
148  MemoryAccess &operator=(const MemoryAccess &) = delete;
149 
150  void *operator new(size_t) = delete;
151 
152  // Methods for support type inquiry through isa, cast, and
153  // dyn_cast
154  static bool classof(const Value *V) {
155  unsigned ID = V->getValueID();
156  return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
157  }
158 
159  BasicBlock *getBlock() const { return Block; }
160 
161  void print(raw_ostream &OS) const;
162  void dump() const;
163 
164  /// The user iterators for a memory access
167 
168  /// This iterator walks over all of the defs in a given
169  /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
170  /// MemoryUse/MemoryDef, this walks the defining access.
171  memoryaccess_def_iterator defs_begin();
172  const_memoryaccess_def_iterator defs_begin() const;
173  memoryaccess_def_iterator defs_end();
174  const_memoryaccess_def_iterator defs_end() const;
175 
176  /// Get the iterators for the all access list and the defs only list
177  /// We default to the all access list.
179  return this->AllAccessType::getIterator();
180  }
182  return this->AllAccessType::getIterator();
183  }
185  return this->AllAccessType::getReverseIterator();
186  }
188  return this->AllAccessType::getReverseIterator();
189  }
191  return this->DefsOnlyType::getIterator();
192  }
194  return this->DefsOnlyType::getIterator();
195  }
197  return this->DefsOnlyType::getReverseIterator();
198  }
200  return this->DefsOnlyType::getReverseIterator();
201  }
202 
203 protected:
204  friend class MemoryDef;
205  friend class MemoryPhi;
206  friend class MemorySSA;
207  friend class MemoryUse;
208  friend class MemoryUseOrDef;
209 
210  /// Used by MemorySSA to change the block of a MemoryAccess when it is
211  /// moved.
212  void setBlock(BasicBlock *BB) { Block = BB; }
213 
214  /// Used for debugging and tracking things about MemoryAccesses.
215  /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
216  inline unsigned getID() const;
217 
218  MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
219  BasicBlock *BB, unsigned NumOperands)
220  : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
221  Block(BB) {}
222 
223  // Use deleteValue() to delete a generic MemoryAccess.
224  ~MemoryAccess() = default;
225 
226 private:
227  BasicBlock *Block;
228 };
229 
230 template <>
232  static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
233 };
234 
236  MA.print(OS);
237  return OS;
238 }
239 
240 /// Class that has the common methods + fields of memory uses/defs. It's
241 /// a little awkward to have, but there are many cases where we want either a
242 /// use or def, and there are many cases where uses are needed (defs aren't
243 /// acceptable), and vice-versa.
244 ///
245 /// This class should never be instantiated directly; make a MemoryUse or
246 /// MemoryDef instead.
247 class MemoryUseOrDef : public MemoryAccess {
248 public:
249  void *operator new(size_t) = delete;
250 
252 
253  /// Get the instruction that this MemoryUse represents.
254  Instruction *getMemoryInst() const { return MemoryInstruction; }
255 
256  /// Get the access that produces the memory state used by this Use.
257  MemoryAccess *getDefiningAccess() const { return getOperand(0); }
258 
259  static bool classof(const Value *MA) {
260  return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
261  }
262 
263  // Sadly, these have to be public because they are needed in some of the
264  // iterators.
265  inline bool isOptimized() const;
266  inline MemoryAccess *getOptimized() const;
267  inline void setOptimized(MemoryAccess *);
268 
269  // Retrieve AliasResult type of the optimized access. Ideally this would be
270  // returned by the caching walker and may go away in the future.
272  return OptimizedAccessAlias;
273  }
274 
275  /// Reset the ID of what this MemoryUse was optimized to, causing it to
276  /// be rewalked by the walker if necessary.
277  /// This really should only be called by tests.
278  inline void resetOptimized();
279 
280 protected:
281  friend class MemorySSA;
282  friend class MemorySSAUpdater;
283 
284  MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
285  DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
286  unsigned NumOperands)
287  : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
288  MemoryInstruction(MI), OptimizedAccessAlias(MayAlias) {
289  setDefiningAccess(DMA);
290  }
291 
292  // Use deleteValue() to delete a generic MemoryUseOrDef.
293  ~MemoryUseOrDef() = default;
294 
296  OptimizedAccessAlias = AR;
297  }
298 
299  void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false,
301  if (!Optimized) {
302  setOperand(0, DMA);
303  return;
304  }
305  setOptimized(DMA);
306  setOptimizedAccessType(AR);
307  }
308 
309 private:
310  Instruction *MemoryInstruction;
311  Optional<AliasResult> OptimizedAccessAlias;
312 };
313 
314 /// Represents read-only accesses to memory
315 ///
316 /// In particular, the set of Instructions that will be represented by
317 /// MemoryUse's is exactly the set of Instructions for which
318 /// AliasAnalysis::getModRefInfo returns "Ref".
319 class MemoryUse final : public MemoryUseOrDef {
320 public:
322 
324  : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB,
325  /*NumOperands=*/1) {}
326 
327  // allocate space for exactly one operand
328  void *operator new(size_t s) { return User::operator new(s, 1); }
329 
330  static bool classof(const Value *MA) {
331  return MA->getValueID() == MemoryUseVal;
332  }
333 
334  void print(raw_ostream &OS) const;
335 
337  OptimizedID = DMA->getID();
338  setOperand(0, DMA);
339  }
340 
341  bool isOptimized() const {
342  return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
343  }
344 
346  return getDefiningAccess();
347  }
348 
349  void resetOptimized() {
350  OptimizedID = INVALID_MEMORYACCESS_ID;
351  }
352 
353 protected:
354  friend class MemorySSA;
355 
356 private:
357  static void deleteMe(DerivedUser *Self);
358 
359  unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
360 };
361 
362 template <>
363 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
365 
366 /// Represents a read-write access to memory, whether it is a must-alias,
367 /// or a may-alias.
368 ///
369 /// In particular, the set of Instructions that will be represented by
370 /// MemoryDef's is exactly the set of Instructions for which
371 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
372 /// Note that, in order to provide def-def chains, all defs also have a use
373 /// associated with them. This use points to the nearest reaching
374 /// MemoryDef/MemoryPhi.
375 class MemoryDef final : public MemoryUseOrDef {
376 public:
377  friend class MemorySSA;
378 
380 
382  unsigned Ver)
383  : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB,
384  /*NumOperands=*/2),
385  ID(Ver) {}
386 
387  // allocate space for exactly two operands
388  void *operator new(size_t s) { return User::operator new(s, 2); }
389 
390  static bool classof(const Value *MA) {
391  return MA->getValueID() == MemoryDefVal;
392  }
393 
395  setOperand(1, MA);
396  OptimizedID = MA->getID();
397  }
398 
400  return cast_or_null<MemoryAccess>(getOperand(1));
401  }
402 
403  bool isOptimized() const {
404  return getOptimized() && OptimizedID == getOptimized()->getID();
405  }
406 
407  void resetOptimized() {
408  OptimizedID = INVALID_MEMORYACCESS_ID;
409  setOperand(1, nullptr);
410  }
411 
412  void print(raw_ostream &OS) const;
413 
414  unsigned getID() const { return ID; }
415 
416 private:
417  static void deleteMe(DerivedUser *Self);
418 
419  const unsigned ID;
420  unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
421 };
422 
423 template <>
424 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
426 
427 template <>
429  static Use *op_begin(MemoryUseOrDef *MUD) {
430  if (auto *MU = dyn_cast<MemoryUse>(MUD))
432  return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
433  }
434 
435  static Use *op_end(MemoryUseOrDef *MUD) {
436  if (auto *MU = dyn_cast<MemoryUse>(MUD))
438  return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
439  }
440 
441  static unsigned operands(const MemoryUseOrDef *MUD) {
442  if (const auto *MU = dyn_cast<MemoryUse>(MUD))
444  return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
445  }
446 };
448 
449 /// Represents phi nodes for memory accesses.
450 ///
451 /// These have the same semantic as regular phi nodes, with the exception that
452 /// only one phi will ever exist in a given basic block.
453 /// Guaranteeing one phi per block means guaranteeing there is only ever one
454 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
455 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
456 /// a MemoryPhi's operands.
457 /// That is, given
458 /// if (a) {
459 /// store %a
460 /// store %b
461 /// }
462 /// it *must* be transformed into
463 /// if (a) {
464 /// 1 = MemoryDef(liveOnEntry)
465 /// store %a
466 /// 2 = MemoryDef(1)
467 /// store %b
468 /// }
469 /// and *not*
470 /// if (a) {
471 /// 1 = MemoryDef(liveOnEntry)
472 /// store %a
473 /// 2 = MemoryDef(liveOnEntry)
474 /// store %b
475 /// }
476 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
477 /// end of the branch, and if there are not two phi nodes, one will be
478 /// disconnected completely from the SSA graph below that point.
479 /// Because MemoryUse's do not generate new definitions, they do not have this
480 /// issue.
481 class MemoryPhi final : public MemoryAccess {
482  // allocate space for exactly zero operands
483  void *operator new(size_t s) { return User::operator new(s); }
484 
485 public:
486  /// Provide fast operand accessors
488 
489  MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
490  : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
491  ReservedSpace(NumPreds) {
492  allocHungoffUses(ReservedSpace);
493  }
494 
495  // Block iterator interface. This provides access to the list of incoming
496  // basic blocks, which parallels the list of incoming values.
499 
501  auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
502  return reinterpret_cast<block_iterator>(Ref + 1);
503  }
504 
506  const auto *Ref =
507  reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
508  return reinterpret_cast<const_block_iterator>(Ref + 1);
509  }
510 
511  block_iterator block_end() { return block_begin() + getNumOperands(); }
512 
514  return block_begin() + getNumOperands();
515  }
516 
518  return make_range(block_begin(), block_end());
519  }
520 
522  return make_range(block_begin(), block_end());
523  }
524 
525  op_range incoming_values() { return operands(); }
526 
527  const_op_range incoming_values() const { return operands(); }
528 
529  /// Return the number of incoming edges
530  unsigned getNumIncomingValues() const { return getNumOperands(); }
531 
532  /// Return incoming value number x
533  MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
534  void setIncomingValue(unsigned I, MemoryAccess *V) {
535  assert(V && "PHI node got a null value!");
536  setOperand(I, V);
537  }
538 
539  static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
540  static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
541 
542  /// Return incoming basic block number @p i.
543  BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
544 
545  /// Return incoming basic block corresponding
546  /// to an operand of the PHI.
547  BasicBlock *getIncomingBlock(const Use &U) const {
548  assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
549  return getIncomingBlock(unsigned(&U - op_begin()));
550  }
551 
552  /// Return incoming basic block corresponding
553  /// to value use iterator.
555  return getIncomingBlock(I.getUse());
556  }
557 
558  void setIncomingBlock(unsigned I, BasicBlock *BB) {
559  assert(BB && "PHI node got a null basic block!");
560  block_begin()[I] = BB;
561  }
562 
563  /// Add an incoming value to the end of the PHI list
565  if (getNumOperands() == ReservedSpace)
566  growOperands(); // Get more space!
567  // Initialize some new operands.
568  setNumHungOffUseOperands(getNumOperands() + 1);
569  setIncomingValue(getNumOperands() - 1, V);
570  setIncomingBlock(getNumOperands() - 1, BB);
571  }
572 
573  /// Return the first index of the specified basic
574  /// block in the value list for this PHI. Returns -1 if no instance.
575  int getBasicBlockIndex(const BasicBlock *BB) const {
576  for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
577  if (block_begin()[I] == BB)
578  return I;
579  return -1;
580  }
581 
583  int Idx = getBasicBlockIndex(BB);
584  assert(Idx >= 0 && "Invalid basic block argument!");
585  return getIncomingValue(Idx);
586  }
587 
588  // After deleting incoming position I, the order of incoming may be changed.
589  void unorderedDeleteIncoming(unsigned I) {
590  unsigned E = getNumOperands();
591  assert(I < E && "Cannot remove out of bounds Phi entry.");
592  // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
593  // itself should be deleted.
594  assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
595  "at least 2 values.");
596  setIncomingValue(I, getIncomingValue(E - 1));
597  setIncomingBlock(I, block_begin()[E - 1]);
598  setOperand(E - 1, nullptr);
599  block_begin()[E - 1] = nullptr;
600  setNumHungOffUseOperands(getNumOperands() - 1);
601  }
602 
603  // After deleting entries that satisfy Pred, remaining entries may have
604  // changed order.
605  template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
606  for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
607  if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
608  unorderedDeleteIncoming(I);
609  E = getNumOperands();
610  --I;
611  }
612  assert(getNumOperands() >= 1 &&
613  "Cannot remove all incoming blocks in a MemoryPhi.");
614  }
615 
616  // After deleting incoming block BB, the incoming blocks order may be changed.
618  unorderedDeleteIncomingIf(
619  [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
620  }
621 
622  // After deleting incoming memory access MA, the incoming accesses order may
623  // be changed.
625  unorderedDeleteIncomingIf(
626  [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
627  }
628 
629  static bool classof(const Value *V) {
630  return V->getValueID() == MemoryPhiVal;
631  }
632 
633  void print(raw_ostream &OS) const;
634 
635  unsigned getID() const { return ID; }
636 
637 protected:
638  friend class MemorySSA;
639 
640  /// this is more complicated than the generic
641  /// User::allocHungoffUses, because we have to allocate Uses for the incoming
642  /// values and pointers to the incoming blocks, all in one allocation.
643  void allocHungoffUses(unsigned N) {
644  User::allocHungoffUses(N, /* IsPhi */ true);
645  }
646 
647 private:
648  // For debugging only
649  const unsigned ID;
650  unsigned ReservedSpace;
651 
652  /// This grows the operand list in response to a push_back style of
653  /// operation. This grows the number of ops by 1.5 times.
654  void growOperands() {
655  unsigned E = getNumOperands();
656  // 2 op PHI nodes are VERY common, so reserve at least enough for that.
657  ReservedSpace = std::max(E + E / 2, 2u);
658  growHungoffUses(ReservedSpace, /* IsPhi */ true);
659  }
660 
661  static void deleteMe(DerivedUser *Self);
662 };
663 
664 inline unsigned MemoryAccess::getID() const {
665  assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
666  "only memory defs and phis have ids");
667  if (const auto *MD = dyn_cast<MemoryDef>(this))
668  return MD->getID();
669  return cast<MemoryPhi>(this)->getID();
670 }
671 
672 inline bool MemoryUseOrDef::isOptimized() const {
673  if (const auto *MD = dyn_cast<MemoryDef>(this))
674  return MD->isOptimized();
675  return cast<MemoryUse>(this)->isOptimized();
676 }
677 
679  if (const auto *MD = dyn_cast<MemoryDef>(this))
680  return MD->getOptimized();
681  return cast<MemoryUse>(this)->getOptimized();
682 }
683 
685  if (auto *MD = dyn_cast<MemoryDef>(this))
686  MD->setOptimized(MA);
687  else
688  cast<MemoryUse>(this)->setOptimized(MA);
689 }
690 
692  if (auto *MD = dyn_cast<MemoryDef>(this))
693  MD->resetOptimized();
694  else
695  cast<MemoryUse>(this)->resetOptimized();
696 }
697 
698 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
700 
701 /// Encapsulates MemorySSA, including all data associated with memory
702 /// accesses.
703 class MemorySSA {
704 public:
706 
707  // MemorySSA must remain where it's constructed; Walkers it creates store
708  // pointers to it.
709  MemorySSA(MemorySSA &&) = delete;
710 
711  ~MemorySSA();
712 
713  MemorySSAWalker *getWalker();
714  MemorySSAWalker *getSkipSelfWalker();
715 
716  /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
717  /// access associated with it. If passed a basic block gets the memory phi
718  /// node that exists for that block, if there is one. Otherwise, this will get
719  /// a MemoryUseOrDef.
721  return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
722  }
723 
724  MemoryPhi *getMemoryAccess(const BasicBlock *BB) const {
725  return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
726  }
727 
728  void dump() const;
729  void print(raw_ostream &) const;
730 
731  /// Return true if \p MA represents the live on entry value
732  ///
733  /// Loads and stores from pointer arguments and other global values may be
734  /// defined by memory operations that do not occur in the current function, so
735  /// they may be live on entry to the function. MemorySSA represents such
736  /// memory state by the live on entry definition, which is guaranteed to occur
737  /// before any other memory access in the function.
738  inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
739  return MA == LiveOnEntryDef.get();
740  }
741 
742  inline MemoryAccess *getLiveOnEntryDef() const {
743  return LiveOnEntryDef.get();
744  }
745 
746  // Sadly, iplists, by default, owns and deletes pointers added to the
747  // list. It's not currently possible to have two iplists for the same type,
748  // where one owns the pointers, and one does not. This is because the traits
749  // are per-type, not per-tag. If this ever changes, we should make the
750  // DefList an iplist.
752  using DefsList =
754 
755  /// Return the list of MemoryAccess's for a given basic block.
756  ///
757  /// This list is not modifiable by the user.
758  const AccessList *getBlockAccesses(const BasicBlock *BB) const {
759  return getWritableBlockAccesses(BB);
760  }
761 
762  /// Return the list of MemoryDef's and MemoryPhi's for a given basic
763  /// block.
764  ///
765  /// This list is not modifiable by the user.
766  const DefsList *getBlockDefs(const BasicBlock *BB) const {
767  return getWritableBlockDefs(BB);
768  }
769 
770  /// Given two memory accesses in the same basic block, determine
771  /// whether MemoryAccess \p A dominates MemoryAccess \p B.
772  bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
773 
774  /// Given two memory accesses in potentially different blocks,
775  /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
776  bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
777 
778  /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
779  /// dominates Use \p B.
780  bool dominates(const MemoryAccess *A, const Use &B) const;
781 
782  /// Verify that MemorySSA is self consistent (IE definitions dominate
783  /// all uses, uses appear in the right places). This is used by unit tests.
784  void verifyMemorySSA() const;
785 
786  /// Used in various insertion functions to specify whether we are talking
787  /// about the beginning or end of a block.
788  enum InsertionPlace { Beginning, End };
789 
790 protected:
791  // Used by Memory SSA annotater, dumpers, and wrapper pass
794  friend class MemorySSAUpdater;
795 
796  void verifyPrevDefInPhis(Function &F) const;
797  void verifyDefUses(Function &F) const;
798  void verifyDomination(Function &F) const;
799  void verifyOrdering(Function &F) const;
800  void verifyDominationNumbers(const Function &F) const;
801 
802  // This is used by the use optimizer and updater.
804  auto It = PerBlockAccesses.find(BB);
805  return It == PerBlockAccesses.end() ? nullptr : It->second.get();
806  }
807 
808  // This is used by the use optimizer and updater.
810  auto It = PerBlockDefs.find(BB);
811  return It == PerBlockDefs.end() ? nullptr : It->second.get();
812  }
813 
814  // These is used by the updater to perform various internal MemorySSA
815  // machinsations. They do not always leave the IR in a correct state, and
816  // relies on the updater to fixup what it breaks, so it is not public.
817 
818  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
819  void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
820 
821  // Rename the dominator tree branch rooted at BB.
822  void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
824  renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
825  }
826 
827  void removeFromLookups(MemoryAccess *);
828  void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
829  void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
831  void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
832  AccessList::iterator);
833  MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
834  const MemoryUseOrDef *Template = nullptr,
835  bool CreationMustSucceed = true);
836 
837 private:
838  template <class AliasAnalysisType> class ClobberWalkerBase;
839  template <class AliasAnalysisType> class CachingWalker;
840  template <class AliasAnalysisType> class SkipSelfWalker;
841  class OptimizeUses;
842 
843  CachingWalker<AliasAnalysis> *getWalkerImpl();
844  void buildMemorySSA(BatchAAResults &BAA);
845  void optimizeUses();
846 
847  void prepareForMoveTo(MemoryAccess *, BasicBlock *);
848  void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
849 
852 
853  void
854  determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
855  void markUnreachableAsLiveOnEntry(BasicBlock *BB);
856  bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
857  MemoryPhi *createMemoryPhi(BasicBlock *BB);
858  template <typename AliasAnalysisType>
859  MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *,
860  const MemoryUseOrDef *Template = nullptr);
861  MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
862  void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
863  MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
864  void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
865  void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
867  bool SkipVisited = false, bool RenameAllUses = false);
868  AccessList *getOrCreateAccessList(const BasicBlock *);
869  DefsList *getOrCreateDefsList(const BasicBlock *);
870  void renumberBlock(const BasicBlock *) const;
871  AliasAnalysis *AA;
872  DominatorTree *DT;
873  Function &F;
874 
875  // Memory SSA mappings
876  DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
877 
878  // These two mappings contain the main block to access/def mappings for
879  // MemorySSA. The list contained in PerBlockAccesses really owns all the
880  // MemoryAccesses.
881  // Both maps maintain the invariant that if a block is found in them, the
882  // corresponding list is not empty, and if a block is not found in them, the
883  // corresponding list is empty.
884  AccessMap PerBlockAccesses;
885  DefsMap PerBlockDefs;
886  std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
887 
888  // Domination mappings
889  // Note that the numbering is local to a block, even though the map is
890  // global.
891  mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
892  mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
893 
894  // Memory SSA building info
895  std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase;
896  std::unique_ptr<CachingWalker<AliasAnalysis>> Walker;
897  std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker;
898  unsigned NextID;
899 };
900 
901 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
903 protected:
904  friend class GVNHoist;
905  friend class MemorySSAWalker;
906 
907  // This function should not be used by new passes.
908  static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
909  AliasAnalysis &AA);
910 };
911 
912 // This pass does eager building and then printing of MemorySSA. It is used by
913 // the tests to be able to build, dump, and verify Memory SSA.
915 public:
917 
918  bool runOnFunction(Function &) override;
919  void getAnalysisUsage(AnalysisUsage &AU) const override;
920 
921  static char ID;
922 };
923 
924 /// An analysis that produces \c MemorySSA for a function.
925 ///
926 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
928 
929  static AnalysisKey Key;
930 
931 public:
932  // Wrap MemorySSA result to ensure address stability of internal MemorySSA
933  // pointers after construction. Use a wrapper class instead of plain
934  // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
935  struct Result {
936  Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
937 
938  MemorySSA &getMSSA() { return *MSSA.get(); }
939 
940  std::unique_ptr<MemorySSA> MSSA;
941 
942  bool invalidate(Function &F, const PreservedAnalyses &PA,
944  };
945 
947 };
948 
949 /// Printer pass for \c MemorySSA.
950 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
951  raw_ostream &OS;
952 
953 public:
954  explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
955 
957 };
958 
959 /// Verifier pass for \c MemorySSA.
960 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
962 };
963 
964 /// Legacy analysis pass which computes \c MemorySSA.
966 public:
968 
969  static char ID;
970 
971  bool runOnFunction(Function &) override;
972  void releaseMemory() override;
973  MemorySSA &getMSSA() { return *MSSA; }
974  const MemorySSA &getMSSA() const { return *MSSA; }
975 
976  void getAnalysisUsage(AnalysisUsage &AU) const override;
977 
978  void verifyAnalysis() const override;
979  void print(raw_ostream &OS, const Module *M = nullptr) const override;
980 
981 private:
982  std::unique_ptr<MemorySSA> MSSA;
983 };
984 
985 /// This is the generic walker interface for walkers of MemorySSA.
986 /// Walkers are used to be able to further disambiguate the def-use chains
987 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
988 /// you.
989 /// In particular, while the def-use chains provide basic information, and are
990 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
991 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
992 /// information. In particular, they may want to use SCEV info to further
993 /// disambiguate memory accesses, or they may want the nearest dominating
994 /// may-aliasing MemoryDef for a call or a store. This API enables a
995 /// standardized interface to getting and using that info.
997 public:
999  virtual ~MemorySSAWalker() = default;
1000 
1002 
1003  /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
1004  /// will give you the nearest dominating MemoryAccess that Mod's the location
1005  /// the instruction accesses (by skipping any def which AA can prove does not
1006  /// alias the location(s) accessed by the instruction given).
1007  ///
1008  /// Note that this will return a single access, and it must dominate the
1009  /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
1010  /// this will return the MemoryPhi, not the operand. This means that
1011  /// given:
1012  /// if (a) {
1013  /// 1 = MemoryDef(liveOnEntry)
1014  /// store %a
1015  /// } else {
1016  /// 2 = MemoryDef(liveOnEntry)
1017  /// store %b
1018  /// }
1019  /// 3 = MemoryPhi(2, 1)
1020  /// MemoryUse(3)
1021  /// load %a
1022  ///
1023  /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
1024  /// in the if (a) branch.
1026  MemoryAccess *MA = MSSA->getMemoryAccess(I);
1027  assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
1028  return getClobberingMemoryAccess(MA);
1029  }
1030 
1031  /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
1032  /// but takes a MemoryAccess instead of an Instruction.
1033  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
1034 
1035  /// Given a potentially clobbering memory access and a new location,
1036  /// calling this will give you the nearest dominating clobbering MemoryAccess
1037  /// (by skipping non-aliasing def links).
1038  ///
1039  /// This version of the function is mainly used to disambiguate phi translated
1040  /// pointers, where the value of a pointer may have changed from the initial
1041  /// memory access. Note that this expects to be handed either a MemoryUse,
1042  /// or an already potentially clobbering access. Unlike the above API, if
1043  /// given a MemoryDef that clobbers the pointer as the starting access, it
1044  /// will return that MemoryDef, whereas the above would return the clobber
1045  /// starting from the use side of the memory def.
1046  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1047  const MemoryLocation &) = 0;
1048 
1049  /// Given a memory access, invalidate anything this walker knows about
1050  /// that access.
1051  /// This API is used by walkers that store information to perform basic cache
1052  /// invalidation. This will be called by MemorySSA at appropriate times for
1053  /// the walker it uses or returns.
1054  virtual void invalidateInfo(MemoryAccess *) {}
1055 
1056 protected:
1057  friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
1058  // constructor.
1060 };
1061 
1062 /// A MemorySSAWalker that does no alias queries, or anything else. It
1063 /// simply returns the links as they were constructed by the builder.
1065 public:
1066  // Keep the overrides below from hiding the Instruction overload of
1067  // getClobberingMemoryAccess.
1069 
1070  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
1071  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1072  const MemoryLocation &) override;
1073 };
1074 
1075 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
1076 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
1077 
1078 /// Iterator base class used to implement const and non-const iterators
1079 /// over the defining accesses of a MemoryAccess.
1080 template <class T>
1082  : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
1083  std::forward_iterator_tag, T, ptrdiff_t, T *,
1084  T *> {
1085  using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
1086 
1087 public:
1088  memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
1089  memoryaccess_def_iterator_base() = default;
1090 
1091  bool operator==(const memoryaccess_def_iterator_base &Other) const {
1092  return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
1093  }
1094 
1095  // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
1096  // block from the operand in constant time (In a PHINode, the uselist has
1097  // both, so it's just subtraction). We provide it as part of the
1098  // iterator to avoid callers having to linear walk to get the block.
1099  // If the operation becomes constant time on MemoryPHI's, this bit of
1100  // abstraction breaking should be removed.
1102  MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
1103  assert(MP && "Tried to get phi arg block when not iterating over a PHI");
1104  return MP->getIncomingBlock(ArgNo);
1105  }
1106 
1107  typename BaseT::iterator::pointer operator*() const {
1108  assert(Access && "Tried to access past the end of our iterator");
1109  // Go to the first argument for phis, and the defining access for everything
1110  // else.
1111  if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
1112  return MP->getIncomingValue(ArgNo);
1113  return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1114  }
1115 
1116  using BaseT::operator++;
1118  assert(Access && "Hit end of iterator");
1119  if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1120  if (++ArgNo >= MP->getNumIncomingValues()) {
1121  ArgNo = 0;
1122  Access = nullptr;
1123  }
1124  } else {
1125  Access = nullptr;
1126  }
1127  return *this;
1128  }
1129 
1130 private:
1131  T *Access = nullptr;
1132  unsigned ArgNo = 0;
1133 };
1134 
1136  return memoryaccess_def_iterator(this);
1137 }
1138 
1140  return const_memoryaccess_def_iterator(this);
1141 }
1142 
1144  return memoryaccess_def_iterator();
1145 }
1146 
1149 }
1150 
1151 /// GraphTraits for a MemoryAccess, which walks defs in the normal case,
1152 /// and uses in the inverse case.
1153 template <> struct GraphTraits<MemoryAccess *> {
1156 
1157  static NodeRef getEntryNode(NodeRef N) { return N; }
1159  static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1160 };
1161 
1162 template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1165 
1166  static NodeRef getEntryNode(NodeRef N) { return N; }
1168  static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1169 };
1170 
1171 /// Provide an iterator that walks defs, giving both the memory access,
1172 /// and the current pointer location, updating the pointer location as it
1173 /// changes due to phi node translation.
1174 ///
1175 /// This iterator, while somewhat specialized, is what most clients actually
1176 /// want when walking upwards through MemorySSA def chains. It takes a pair of
1177 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1178 /// memory location through phi nodes for the user.
1180  : public iterator_facade_base<upward_defs_iterator,
1181  std::forward_iterator_tag,
1182  const MemoryAccessPair> {
1183  using BaseT = upward_defs_iterator::iterator_facade_base;
1184 
1185 public:
1187  : DefIterator(Info.first), Location(Info.second),
1188  OriginalAccess(Info.first) {
1189  CurrentPair.first = nullptr;
1190 
1191  WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1192  fillInCurrentPair();
1193  }
1194 
1195  upward_defs_iterator() { CurrentPair.first = nullptr; }
1196 
1197  bool operator==(const upward_defs_iterator &Other) const {
1198  return DefIterator == Other.DefIterator;
1199  }
1200 
1201  BaseT::iterator::reference operator*() const {
1202  assert(DefIterator != OriginalAccess->defs_end() &&
1203  "Tried to access past the end of our iterator");
1204  return CurrentPair;
1205  }
1206 
1207  using BaseT::operator++;
1209  assert(DefIterator != OriginalAccess->defs_end() &&
1210  "Tried to access past the end of the iterator");
1211  ++DefIterator;
1212  if (DefIterator != OriginalAccess->defs_end())
1213  fillInCurrentPair();
1214  return *this;
1215  }
1216 
1217  BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1218 
1219 private:
1220  void fillInCurrentPair() {
1221  CurrentPair.first = *DefIterator;
1222  if (WalkingPhi && Location.Ptr) {
1223  PHITransAddr Translator(
1224  const_cast<Value *>(Location.Ptr),
1225  OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1226  if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1227  DefIterator.getPhiArgBlock(), nullptr,
1228  false))
1229  if (Translator.getAddr() != Location.Ptr) {
1230  CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
1231  return;
1232  }
1233  }
1234  CurrentPair.second = Location;
1235  }
1236 
1237  MemoryAccessPair CurrentPair;
1238  memoryaccess_def_iterator DefIterator;
1239  MemoryLocation Location;
1240  MemoryAccess *OriginalAccess = nullptr;
1241  bool WalkingPhi = false;
1242 };
1243 
1245  return upward_defs_iterator(Pair);
1246 }
1247 
1249 
1252  return make_range(upward_defs_begin(Pair), upward_defs_end());
1253 }
1254 
1255 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1256 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1257 /// comparing against a null def_chain_iterator, this will compare equal only
1258 /// after walking said Phi/liveOnEntry.
1259 ///
1260 /// The UseOptimizedChain flag specifies whether to walk the clobbering
1261 /// access chain, or all the accesses.
1262 ///
1263 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1264 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1265 /// a phi node. The optimized chain walks the clobbering access of a store.
1266 /// So if you are just trying to find, given a store, what the next
1267 /// thing that would clobber the same memory is, you want the optimized chain.
1268 template <class T, bool UseOptimizedChain = false>
1270  : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1271  std::forward_iterator_tag, MemoryAccess *> {
1272  def_chain_iterator() : MA(nullptr) {}
1273  def_chain_iterator(T MA) : MA(MA) {}
1274 
1275  T operator*() const { return MA; }
1276 
1278  // N.B. liveOnEntry has a null defining access.
1279  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1280  if (UseOptimizedChain && MUD->isOptimized())
1281  MA = MUD->getOptimized();
1282  else
1283  MA = MUD->getDefiningAccess();
1284  } else {
1285  MA = nullptr;
1286  }
1287 
1288  return *this;
1289  }
1290 
1291  bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1292 
1293 private:
1294  T MA;
1295 };
1296 
1297 template <class T>
1299 def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1300 #ifdef EXPENSIVE_CHECKS
1301  assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
1302  "UpTo isn't in the def chain!");
1303 #endif
1305 }
1306 
1307 template <class T>
1310  def_chain_iterator<T, true>(nullptr));
1311 }
1312 
1313 } // end namespace llvm
1314 
1315 #endif // LLVM_ANALYSIS_MEMORYSSA_H
uint64_t CallInst * C
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:803
BasicBlock * getIncomingBlock(MemoryAccess::const_user_iterator I) const
Return incoming basic block corresponding to value use iterator.
Definition: MemorySSA.h:554
unsigned getValueID() const
Return an ID for the concrete type of this object.
Definition: Value.h:483
void unorderedDeleteIncomingValue(const MemoryAccess *MA)
Definition: MemorySSA.h:624
This class represents lattice values for constants.
Definition: AllocatorList.h:23
void unorderedDeleteIncomingIf(Fn &&Pred)
Definition: MemorySSA.h:605
Result(std::unique_ptr< MemorySSA > &&MSSA)
Definition: MemorySSA.h:936
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list...
Definition: MemorySSA.h:178
BasicBlock * getIncomingBlock(const Use &U) const
Return incoming basic block corresponding to an operand of the PHI.
Definition: MemorySSA.h:547
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:257
MemorySSAPrinterPass(raw_ostream &OS)
Definition: MemorySSA.h:954
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
void resetOptimized()
Definition: MemorySSA.h:349
const MemorySSA & getMSSA() const
Definition: MemorySSA.h:974
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess&#39;s for a given basic block.
Definition: MemorySSA.h:758
BasicBlock *const * const_block_iterator
Definition: MemorySSA.h:498
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:399
Extension point for the Value hierarchy.
Definition: DerivedUser.h:27
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:375
BaseT::iterator::reference operator*() const
Definition: MemorySSA.h:1201
void setOptimized(MemoryAccess *)
Definition: MemorySSA.h:684
void deleteValue()
Delete a pointer to a generic Value.
Definition: Value.cpp:98
BasicBlock * getPhiArgBlock() const
Definition: MemorySSA.h:1101
memoryaccess_def_iterator defs_begin()
This iterator walks over all of the defs in a given MemoryAccess.
Definition: MemorySSA.h:1135
unsigned second
F(f)
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: MemorySSA.h:575
void(*)(DerivedUser *) DeleteValueTy
Definition: DerivedUser.h:29
This defines the Use class.
AllAccessType::const_self_iterator getIterator() const
Definition: MemorySSA.h:181
DefsOnlyType::self_iterator getDefsIterator()
Definition: MemorySSA.h:190
void setIncomingValue(unsigned I, MemoryAccess *V)
Definition: MemorySSA.h:534
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
MemoryPhi * getMemoryAccess(const BasicBlock *BB) const
Definition: MemorySSA.h:724
Represents read-only accesses to memory.
Definition: MemorySSA.h:319
This class is a batch walker of all MemoryUse&#39;s in the program, and points their defining access at t...
Definition: MemorySSA.cpp:1273
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:965
Definition: BitVector.h:937
void resetOptimized()
Reset the ID of what this MemoryUse was optimized to, causing it to be rewalked by the walker if nece...
Definition: MemorySSA.h:691
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
Definition: MemorySSA.h:822
block_iterator block_begin()
Definition: MemorySSA.h:500
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:703
The access may reference the value stored in memory.
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef&#39;s and MemoryPhi&#39;s for a given basic block.
Definition: MemorySSA.h:766
const_block_iterator block_end() const
Definition: MemorySSA.h:513
bool isOptimized() const
Definition: MemorySSA.h:403
Walks the defining accesses of MemoryDefs.
Definition: MemorySSA.h:1269
def_chain_iterator & operator++()
Definition: MemorySSA.h:1277
bool isOptimized() const
Definition: MemorySSA.h:672
BaseT::iterator::pointer operator*() const
Definition: MemorySSA.h:1107
static bool classof(const Value *MA)
Definition: MemorySSA.h:390
iterator_range< block_iterator > blocks()
Definition: MemorySSA.h:517
A simple intrusive list implementation.
Definition: simple_ilist.h:78
Key
PAL metadata keys.
#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS)
Macro for generating out-of-class operand accessor definitions.
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:40
static ChildIteratorType child_end(NodeRef N)
Definition: MemorySSA.h:1168
static int getID(struct InternalInstruction *insn, const void *miiArg)
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref&#39;ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:720
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:345
AllAccessType::const_reverse_self_iterator getReverseIterator() const
Definition: MemorySSA.h:187
upward_defs_iterator upward_defs_end()
Definition: MemorySSA.h:1248
std::unique_ptr< MemorySSA > MSSA
Definition: MemorySSA.h:940
void unorderedDeleteIncoming(unsigned I)
Definition: MemorySSA.h:589
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:372
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This is the generic walker interface for walkers of MemorySSA.
Definition: MemorySSA.h:996
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:67
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
op_range incoming_values()
Definition: MemorySSA.h:525
memoryaccess_def_iterator defs_end()
Definition: MemorySSA.h:1143
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
unsigned getID() const
Definition: MemorySSA.h:635
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
Definition: MemorySSA.h:533
const_block_iterator block_begin() const
Definition: MemorySSA.h:505
An assembly annotator class to print Memory SSA information in comments.
Definition: MemorySSA.cpp:98
Use delete by default for iplist and ilist.
Definition: ilist.h:40
static bool runOnFunction(Function &F, bool PostInlining)
static Use * op_end(MemoryUseOrDef *MUD)
Definition: MemorySSA.h:435
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const
Definition: MemorySSA.h:199
AllAccessType::reverse_self_iterator getReverseIterator()
Definition: MemorySSA.h:184
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
PointerIntPair - This class implements a pair of a pointer and small integer.
PHITransAddr - An address value which tracks and handles phi translation.
Definition: PHITransAddr.h:35
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
const_op_range incoming_values() const
Definition: MemorySSA.h:527
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
upward_defs_iterator & operator++()
Definition: MemorySSA.h:1208
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: MemorySSA.h:564
early cse Early CSE w MemorySSA
Definition: EarlyCSE.cpp:1425
A CRTP mix-in that provides informational APIs needed for analysis passes.
Definition: PassManager.h:389
static unsigned getIncomingValueNumForOperand(unsigned I)
Definition: MemorySSA.h:540
void setIncomingBlock(unsigned I, BasicBlock *BB)
Definition: MemorySSA.h:558
upward_defs_iterator(const MemoryAccessPair &Info)
Definition: MemorySSA.h:1186
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:788
Represent the analysis usage information of a pass.
iterator_range< def_chain_iterator< T, true > > optimized_def_chain(T MA)
Definition: MemorySSA.h:1308
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2168
Printer pass for MemorySSA.
Definition: MemorySSA.h:950
static unsigned getOperandNumForIncomingValue(unsigned I)
Definition: MemorySSA.h:539
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
static bool classof(const Value *MA)
Definition: MemorySSA.h:330
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static unsigned operands(const MemoryUseOrDef *MUD)
Definition: MemorySSA.h:441
BasicBlock * getPhiArgBlock() const
Definition: MemorySSA.h:1217
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Definition: MemorySSA.h:1054
Iterator base class used to implement const and non-const iterators over the defining accesses of a M...
Definition: MemorySSA.h:130
#define DECLARE_TRANSPARENT_OPERAND_ACCESSORS(VALUECLASS)
Macro for generating in-class operand accessor declarations.
Provide an iterator that walks defs, giving both the memory access, and the current pointer location...
Definition: MemorySSA.h:1179
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1186
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:388
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:678
A MemorySSAWalker that does no alias queries, or anything else.
Definition: MemorySSA.h:1064
std::pair< const MemoryAccess *, MemoryLocation > ConstMemoryAccessPair
Definition: MemorySSA.h:1076
static void deleteNode(MemoryAccess *MA)
Definition: MemorySSA.h:232
unsigned first
bool operator==(const memoryaccess_def_iterator_base &Other) const
Definition: MemorySSA.h:1091
bool isOptimized() const
Definition: MemorySSA.h:341
The two locations may or may not alias. This is the least precise result.
Definition: AliasAnalysis.h:86
Representation for a specific memory location.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=MayAlias)
Definition: MemorySSA.h:299
Iterator for intrusive lists based on ilist_node.
static bool classof(const Value *V)
Definition: MemorySSA.h:629
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:336
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
Module.h This file contains the declarations for the Module class.
memoryaccess_def_iterator_base & operator++()
Definition: MemorySSA.h:1117
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:926
BasicBlock * getBlock() const
Definition: MemorySSA.h:159
void unorderedDeleteIncomingBlock(const BasicBlock *BB)
Definition: MemorySSA.h:617
MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, unsigned NumOperands)
Definition: MemorySSA.h:284
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:742
Verifier pass for MemorySSA.
Definition: MemorySSA.h:960
A range adaptor for a pair of iterators.
MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds=0)
Definition: MemorySSA.h:489
static Use * op_begin(MemoryUseOrDef *MUD)
Definition: MemorySSA.h:429
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:247
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: MemorySSA.h:530
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
Definition: MemorySSA.h:543
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:254
iterator_range< const_block_iterator > blocks() const
Definition: MemorySSA.h:521
user_iterator_impl< const User > const_user_iterator
Definition: Value.h:389
DefsOnlyType::const_self_iterator getDefsIterator() const
Definition: MemorySSA.h:193
void setOptimizedAccessType(Optional< AliasResult > AR)
Definition: MemorySSA.h:295
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
Definition: MemorySSA.h:1299
memoryaccess_def_iterator_base< MemoryAccess > memoryaccess_def_iterator
Definition: MemorySSA.h:131
cl::opt< bool > EnableMSSALoopDependency
Enables memory ssa as a dependency for loop passes.
block_iterator block_end()
Definition: MemorySSA.h:511
bool operator==(const def_chain_iterator &O) const
Definition: MemorySSA.h:1291
This file provides utility analysis objects describing memory locations.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
user_iterator_impl< User > user_iterator
Definition: Value.h:388
MemoryAccess * getIncomingValueForBlock(const BasicBlock *BB) const
Definition: MemorySSA.h:582
Compile-time customization of User operands.
Definition: User.h:42
MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, BasicBlock *BB, unsigned NumOperands)
Definition: MemorySSA.h:218
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:738
Optional< AliasResult > getOptimizedAccessType() const
Definition: MemorySSA.h:271
raw_ostream & operator<<(raw_ostream &OS, const APInt &I)
Definition: APInt.h:2047
static ChildIteratorType child_begin(NodeRef N)
Definition: MemorySSA.h:1167
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:648
MemoryAccess * getClobberingMemoryAccess(const Instruction *I)
Given a memory Mod/Ref/ModRef&#39;ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1025
static ChildIteratorType child_begin(NodeRef N)
Definition: MemorySSA.h:1158
static bool classof(const Value *MA)
Definition: MemorySSA.h:259
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:395
void setOptimized(MemoryAccess *MA)
Definition: MemorySSA.h:394
static NodeRef getEntryNode(NodeRef N)
Definition: MemorySSA.h:1157
LLVM Value Representation.
Definition: Value.h:73
MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, unsigned Ver)
Definition: MemorySSA.h:381
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
Definition: MemorySSA.h:196
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair)
Definition: MemorySSA.h:1244
HungoffOperandTraits - determine the allocation regime of the Use array when it is not a prefix to th...
Definition: OperandTraits.h:95
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:39
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
Definition: MemorySSA.h:664
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:45
const_user_iterator const_iterator
Definition: MemorySSA.h:166
IRTranslator LLVM IR MI
FixedNumOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:30
A container for analyses that lazily runs them and caches their results.
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Definition: MemorySSA.h:212
MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
Definition: MemorySSA.h:323
static bool classof(const Value *V)
Definition: MemorySSA.h:154
iterator_range< upward_defs_iterator > upward_defs(const MemoryAccessPair &Pair)
Definition: MemorySSA.h:1251
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:481
unsigned getID() const
Definition: MemorySSA.h:414
static ChildIteratorType child_end(NodeRef N)
Definition: MemorySSA.h:1159
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition: MemorySSA.h:809
user_iterator iterator
The user iterators for a memory access.
Definition: MemorySSA.h:165
memoryaccess_def_iterator_base< const MemoryAccess > const_memoryaccess_def_iterator
Definition: MemorySSA.h:133
bool operator==(const upward_defs_iterator &Other) const
Definition: MemorySSA.h:1197
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:70
void resetOptimized()
Definition: MemorySSA.h:407
void setOptimized(MemoryAccess *DMA)
Definition: MemorySSA.h:336
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
Definition: MemorySSA.h:1075
void allocHungoffUses(unsigned N)
this is more complicated than the generic User::allocHungoffUses, because we have to allocate Uses fo...
Definition: MemorySSA.h:643
user_iterator user_end()
Definition: Value.h:403