LCOV - code coverage report
Current view: top level - include/llvm/Transforms/Utils - MemorySSA.h (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 147 159 92.5 %
Date: 2017-04-12 01:37:12 Functions: 31 42 73.8 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : ///
      10             : /// \file
      11             : /// \brief This file exposes an interface to building/using memory SSA to
      12             : /// walk memory instructions using a use/def graph.
      13             : ///
      14             : /// Memory SSA class builds an SSA form that links together memory access
      15             : /// instructions such as loads, stores, atomics, and calls. Additionally, it
      16             : /// does a trivial form of "heap versioning" Every time the memory state changes
      17             : /// in the program, we generate a new heap version. It generates
      18             : /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
      19             : ///
      20             : /// As a trivial example,
      21             : /// define i32 @main() #0 {
      22             : /// entry:
      23             : ///   %call = call noalias i8* @_Znwm(i64 4) #2
      24             : ///   %0 = bitcast i8* %call to i32*
      25             : ///   %call1 = call noalias i8* @_Znwm(i64 4) #2
      26             : ///   %1 = bitcast i8* %call1 to i32*
      27             : ///   store i32 5, i32* %0, align 4
      28             : ///   store i32 7, i32* %1, align 4
      29             : ///   %2 = load i32* %0, align 4
      30             : ///   %3 = load i32* %1, align 4
      31             : ///   %add = add nsw i32 %2, %3
      32             : ///   ret i32 %add
      33             : /// }
      34             : ///
      35             : /// Will become
      36             : /// define i32 @main() #0 {
      37             : /// entry:
      38             : ///   ; 1 = MemoryDef(0)
      39             : ///   %call = call noalias i8* @_Znwm(i64 4) #3
      40             : ///   %2 = bitcast i8* %call to i32*
      41             : ///   ; 2 = MemoryDef(1)
      42             : ///   %call1 = call noalias i8* @_Znwm(i64 4) #3
      43             : ///   %4 = bitcast i8* %call1 to i32*
      44             : ///   ; 3 = MemoryDef(2)
      45             : ///   store i32 5, i32* %2, align 4
      46             : ///   ; 4 = MemoryDef(3)
      47             : ///   store i32 7, i32* %4, align 4
      48             : ///   ; MemoryUse(3)
      49             : ///   %7 = load i32* %2, align 4
      50             : ///   ; MemoryUse(4)
      51             : ///   %8 = load i32* %4, align 4
      52             : ///   %add = add nsw i32 %7, %8
      53             : ///   ret i32 %add
      54             : /// }
      55             : ///
      56             : /// Given this form, all the stores that could ever effect the load at %8 can be
      57             : /// gotten by using the MemoryUse associated with it, and walking from use to
      58             : /// def until you hit the top of the function.
      59             : ///
      60             : /// Each def also has a list of users associated with it, so you can walk from
      61             : /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
      62             : /// but not the RHS of MemoryDefs. You can see this above at %7, which would
      63             : /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
      64             : /// store, all the MemoryUses on its use lists are may-aliases of that store
      65             : /// (but the MemoryDefs on its use list may not be).
      66             : ///
      67             : /// MemoryDefs are not disambiguated because it would require multiple reaching
      68             : /// definitions, which would require multiple phis, and multiple memoryaccesses
      69             : /// per instruction.
      70             : //===----------------------------------------------------------------------===//
      71             : 
      72             : #ifndef LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
      73             : #define LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
      74             : 
      75             : #include "llvm/ADT/DenseMap.h"
      76             : #include "llvm/ADT/GraphTraits.h"
      77             : #include "llvm/ADT/SmallPtrSet.h"
      78             : #include "llvm/ADT/SmallVector.h"
      79             : #include "llvm/ADT/ilist.h"
      80             : #include "llvm/ADT/ilist_node.h"
      81             : #include "llvm/ADT/iterator.h"
      82             : #include "llvm/ADT/iterator_range.h"
      83             : #include "llvm/Analysis/AliasAnalysis.h"
      84             : #include "llvm/Analysis/MemoryLocation.h"
      85             : #include "llvm/Analysis/PHITransAddr.h"
      86             : #include "llvm/IR/BasicBlock.h"
      87             : #include "llvm/IR/Dominators.h"
      88             : #include "llvm/IR/Module.h"
      89             : #include "llvm/IR/OperandTraits.h"
      90             : #include "llvm/IR/Type.h"
      91             : #include "llvm/IR/Use.h"
      92             : #include "llvm/IR/User.h"
      93             : #include "llvm/IR/Value.h"
      94             : #include "llvm/Pass.h"
      95             : #include "llvm/Support/Casting.h"
      96             : #include "llvm/Support/ErrorHandling.h"
      97             : #include <algorithm>
      98             : #include <cassert>
      99             : #include <cstddef>
     100             : #include <iterator>
     101             : #include <memory>
     102             : #include <utility>
     103             : 
     104             : namespace llvm {
     105             : 
     106             : class Function;
     107             : class Instruction;
     108             : class MemoryAccess;
     109             : class LLVMContext;
     110             : class raw_ostream;
     111             : namespace MSSAHelpers {
     112             : struct AllAccessTag {};
     113             : struct DefsOnlyTag {};
     114             : }
     115             : 
     116             : enum {
     117             :   // Used to signify what the default invalid ID is for MemoryAccess's
     118             :   // getID()
     119             :   INVALID_MEMORYACCESS_ID = 0
     120             : };
     121             : 
     122             : template <class T> class memoryaccess_def_iterator_base;
     123             : using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
     124             : using const_memoryaccess_def_iterator =
     125             :     memoryaccess_def_iterator_base<const MemoryAccess>;
     126             : 
     127             : // \brief The base for all memory accesses. All memory accesses in a block are
     128             : // linked together using an intrusive list.
     129             : class MemoryAccess
     130             :     : public User,
     131             :       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
     132             :       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
     133             : public:
     134             :   using AllAccessType =
     135             :       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
     136             :   using DefsOnlyType =
     137             :       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
     138             : 
     139             :   // Methods for support type inquiry through isa, cast, and
     140             :   // dyn_cast
     141             :   static inline bool classof(const Value *V) {
     142             :     unsigned ID = V->getValueID();
     143             :     return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
     144             :   }
     145             : 
     146             :   MemoryAccess(const MemoryAccess &) = delete;
     147             :   MemoryAccess &operator=(const MemoryAccess &) = delete;
     148             :   ~MemoryAccess() override;
     149             : 
     150             :   void *operator new(size_t, unsigned) = delete;
     151             :   void *operator new(size_t) = delete;
     152             : 
     153             :   BasicBlock *getBlock() const { return Block; }
     154             : 
     155             :   virtual void print(raw_ostream &OS) const = 0;
     156             :   virtual void dump() const;
     157             : 
     158             :   /// \brief The user iterators for a memory access
     159             :   typedef user_iterator iterator;
     160             :   typedef const_user_iterator const_iterator;
     161             : 
     162             :   /// \brief This iterator walks over all of the defs in a given
     163             :   /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
     164             :   /// MemoryUse/MemoryDef, this walks the defining access.
     165             :   memoryaccess_def_iterator defs_begin();
     166             :   const_memoryaccess_def_iterator defs_begin() const;
     167             :   memoryaccess_def_iterator defs_end();
     168             :   const_memoryaccess_def_iterator defs_end() const;
     169             : 
     170             :   /// \brief Get the iterators for the all access list and the defs only list
     171             :   /// We default to the all access list.
     172             :   AllAccessType::self_iterator getIterator() {
     173          12 :     return this->AllAccessType::getIterator();
     174             :   }
     175             :   AllAccessType::const_self_iterator getIterator() const {
     176             :     return this->AllAccessType::getIterator();
     177             :   }
     178             :   AllAccessType::reverse_self_iterator getReverseIterator() {
     179           0 :     return this->AllAccessType::getReverseIterator();
     180             :   }
     181             :   AllAccessType::const_reverse_self_iterator getReverseIterator() const {
     182             :     return this->AllAccessType::getReverseIterator();
     183             :   }
     184             :   DefsOnlyType::self_iterator getDefsIterator() {
     185          14 :     return this->DefsOnlyType::getIterator();
     186             :   }
     187             :   DefsOnlyType::const_self_iterator getDefsIterator() const {
     188             :     return this->DefsOnlyType::getIterator();
     189             :   }
     190             :   DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
     191          22 :     return this->DefsOnlyType::getReverseIterator();
     192             :   }
     193             :   DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
     194             :     return this->DefsOnlyType::getReverseIterator();
     195             :   }
     196             : 
     197             : protected:
     198             :   friend class MemorySSA;
     199             :   friend class MemoryUseOrDef;
     200             :   friend class MemoryUse;
     201             :   friend class MemoryDef;
     202             :   friend class MemoryPhi;
     203             : 
     204             :   /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
     205             :   /// moved.
     206           5 :   void setBlock(BasicBlock *BB) { Block = BB; }
     207             : 
     208             :   /// \brief Used for debugging and tracking things about MemoryAccesses.
     209             :   /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
     210             :   virtual unsigned getID() const = 0;
     211             : 
     212      267242 :   MemoryAccess(LLVMContext &C, unsigned Vty, BasicBlock *BB,
     213             :                unsigned NumOperands)
     214     1068968 :       : User(Type::getVoidTy(C), Vty, nullptr, NumOperands), Block(BB) {}
     215             : 
     216             : private:
     217             :   BasicBlock *Block;
     218             : };
     219             : 
     220             : inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
     221         432 :   MA.print(OS);
     222             :   return OS;
     223             : }
     224             : 
     225             : /// \brief Class that has the common methods + fields of memory uses/defs. It's
     226             : /// a little awkward to have, but there are many cases where we want either a
     227             : /// use or def, and there are many cases where uses are needed (defs aren't
     228             : /// acceptable), and vice-versa.
     229             : ///
     230             : /// This class should never be instantiated directly; make a MemoryUse or
     231             : /// MemoryDef instead.
     232      241538 : class MemoryUseOrDef : public MemoryAccess {
     233             : public:
     234             :   void *operator new(size_t, unsigned) = delete;
     235             :   void *operator new(size_t) = delete;
     236             : 
     237             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     238             : 
     239             :   /// \brief Get the instruction that this MemoryUse represents.
     240             :   Instruction *getMemoryInst() const { return MemoryInst; }
     241             : 
     242             :   /// \brief Get the access that produces the memory state used by this Use.
     243      227333 :   MemoryAccess *getDefiningAccess() const { return getOperand(0); }
     244             : 
     245             :   static inline bool classof(const Value *MA) {
     246      707064 :     return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
     247             :   }
     248             : 
     249             :   // Sadly, these have to be public because they are needed in some of the
     250             :   // iterators.
     251             :   virtual bool isOptimized() const = 0;
     252             :   virtual MemoryAccess *getOptimized() const = 0;
     253             :   virtual void setOptimized(MemoryAccess *) = 0;
     254             : 
     255             :   /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
     256             :   /// be rewalked by the walker if necessary.
     257             :   /// This really should only be called by tests.
     258             :   virtual void resetOptimized() = 0;
     259             : 
     260             : protected:
     261             :   friend class MemorySSA;
     262             :   friend class MemorySSAUpdater;
     263             :   MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
     264             :                  Instruction *MI, BasicBlock *BB)
     265      241538 :       : MemoryAccess(C, Vty, BB, 1), MemoryInst(MI) {
     266      241538 :     setDefiningAccess(DMA);
     267             :   }
     268             :   void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
     269             :     if (!Optimized) {
     270             :       setOperand(0, DMA);
     271             :       return;
     272             :     }
     273       20073 :     setOptimized(DMA);
     274             :   }
     275             : 
     276             : private:
     277             :   Instruction *MemoryInst;
     278             : };
     279             : 
     280             : template <>
     281             : struct OperandTraits<MemoryUseOrDef>
     282             :     : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
     283     1433166 : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
     284             : 
     285             : /// \brief Represents read-only accesses to memory
     286             : ///
     287             : /// In particular, the set of Instructions that will be represented by
     288             : /// MemoryUse's is exactly the set of Instructions for which
     289             : /// AliasAnalysis::getModRefInfo returns "Ref".
     290       50198 : class MemoryUse final : public MemoryUseOrDef {
     291             : public:
     292             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     293             : 
     294       25099 :   MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
     295       50198 :       : MemoryUseOrDef(C, DMA, MemoryUseVal, MI, BB), OptimizedID(0) {}
     296             : 
     297             :   // allocate space for exactly one operand
     298       25099 :   void *operator new(size_t s) { return User::operator new(s, 1); }
     299             :   void *operator new(size_t, unsigned) = delete;
     300             : 
     301             :   static inline bool classof(const Value *MA) {
     302      242442 :     return MA->getValueID() == MemoryUseVal;
     303             :   }
     304             : 
     305             :   void print(raw_ostream &OS) const override;
     306             : 
     307       22314 :   virtual void setOptimized(MemoryAccess *DMA) override {
     308       22314 :     OptimizedID = DMA->getID();
     309       22314 :     setOperand(0, DMA);
     310       22314 :   }
     311             : 
     312        2662 :   virtual bool isOptimized() const override {
     313        7986 :     return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
     314             :   }
     315             : 
     316         421 :   virtual MemoryAccess *getOptimized() const override {
     317         842 :     return getDefiningAccess();
     318             :   }
     319           8 :   virtual void resetOptimized() override {
     320           9 :     OptimizedID = INVALID_MEMORYACCESS_ID;
     321           8 :   }
     322             : 
     323             : protected:
     324             :   friend class MemorySSA;
     325             : 
     326           0 :   unsigned getID() const override {
     327           0 :     llvm_unreachable("MemoryUses do not have IDs");
     328             :   }
     329             : 
     330             : private:
     331             :   unsigned int OptimizedID;
     332             : };
     333             : 
     334             : template <>
     335             : struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
     336       44628 : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
     337             : 
     338             : /// \brief Represents a read-write access to memory, whether it is a must-alias,
     339             : /// or a may-alias.
     340             : ///
     341             : /// In particular, the set of Instructions that will be represented by
     342             : /// MemoryDef's is exactly the set of Instructions for which
     343             : /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
     344             : /// Note that, in order to provide def-def chains, all defs also have a use
     345             : /// associated with them. This use points to the nearest reaching
     346             : /// MemoryDef/MemoryPhi.
     347      432878 : class MemoryDef final : public MemoryUseOrDef {
     348             : public:
     349             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     350             : 
     351      216439 :   MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
     352             :             unsigned Ver)
     353      216439 :       : MemoryUseOrDef(C, DMA, MemoryDefVal, MI, BB), ID(Ver),
     354      432878 :         Optimized(nullptr), OptimizedID(INVALID_MEMORYACCESS_ID) {}
     355             : 
     356             :   // allocate space for exactly one operand
     357      216439 :   void *operator new(size_t s) { return User::operator new(s, 1); }
     358             :   void *operator new(size_t, unsigned) = delete;
     359             : 
     360             :   static inline bool classof(const Value *MA) {
     361      529030 :     return MA->getValueID() == MemoryDefVal;
     362             :   }
     363             : 
     364           6 :   virtual void setOptimized(MemoryAccess *MA) override {
     365           6 :     Optimized = MA;
     366          12 :     OptimizedID = getDefiningAccess()->getID();
     367           6 :   }
     368          12 :   virtual MemoryAccess *getOptimized() const override { return Optimized; }
     369          12 :   virtual bool isOptimized() const override {
     370          12 :     return getOptimized() && getDefiningAccess() &&
     371          12 :            OptimizedID == getDefiningAccess()->getID();
     372             :   }
     373         145 :   virtual void resetOptimized() override {
     374         145 :     OptimizedID = INVALID_MEMORYACCESS_ID;
     375         145 :   }
     376             : 
     377             :   void print(raw_ostream &OS) const override;
     378             : 
     379             : protected:
     380             :   friend class MemorySSA;
     381             : 
     382       19561 :   unsigned getID() const override { return ID; }
     383             : 
     384             : private:
     385             :   const unsigned ID;
     386             :   MemoryAccess *Optimized;
     387             :   unsigned int OptimizedID;
     388             : };
     389             : 
     390             : template <>
     391             : struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
     392             : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
     393             : 
     394             : /// \brief Represents phi nodes for memory accesses.
     395             : ///
     396             : /// These have the same semantic as regular phi nodes, with the exception that
     397             : /// only one phi will ever exist in a given basic block.
     398             : /// Guaranteeing one phi per block means guaranteeing there is only ever one
     399             : /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
     400             : /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
     401             : /// a MemoryPhi's operands.
     402             : /// That is, given
     403             : /// if (a) {
     404             : ///   store %a
     405             : ///   store %b
     406             : /// }
     407             : /// it *must* be transformed into
     408             : /// if (a) {
     409             : ///    1 = MemoryDef(liveOnEntry)
     410             : ///    store %a
     411             : ///    2 = MemoryDef(1)
     412             : ///    store %b
     413             : /// }
     414             : /// and *not*
     415             : /// if (a) {
     416             : ///    1 = MemoryDef(liveOnEntry)
     417             : ///    store %a
     418             : ///    2 = MemoryDef(liveOnEntry)
     419             : ///    store %b
     420             : /// }
     421             : /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
     422             : /// end of the branch, and if there are not two phi nodes, one will be
     423             : /// disconnected completely from the SSA graph below that point.
     424             : /// Because MemoryUse's do not generate new definitions, they do not have this
     425             : /// issue.
     426       25704 : class MemoryPhi final : public MemoryAccess {
     427             :   // allocate space for exactly zero operands
     428       25704 :   void *operator new(size_t s) { return User::operator new(s); }
     429             : 
     430             : public:
     431             :   /// Provide fast operand accessors
     432             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     433             : 
     434       25704 :   MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
     435       25704 :       : MemoryAccess(C, MemoryPhiVal, BB, 0), ID(Ver), ReservedSpace(NumPreds) {
     436       51408 :     allocHungoffUses(ReservedSpace);
     437       25704 :   }
     438             : 
     439             :   void *operator new(size_t, unsigned) = delete;
     440             : 
     441             :   // Block iterator interface. This provides access to the list of incoming
     442             :   // basic blocks, which parallels the list of incoming values.
     443             :   typedef BasicBlock **block_iterator;
     444             :   typedef BasicBlock *const *const_block_iterator;
     445             : 
     446             :   block_iterator block_begin() {
     447       58693 :     auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
     448           0 :     return reinterpret_cast<block_iterator>(Ref + 1);
     449             :   }
     450             : 
     451             :   const_block_iterator block_begin() const {
     452             :     const auto *Ref =
     453       63258 :         reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
     454             :     return reinterpret_cast<const_block_iterator>(Ref + 1);
     455             :   }
     456             : 
     457           4 :   block_iterator block_end() { return block_begin() + getNumOperands(); }
     458             : 
     459             :   const_block_iterator block_end() const {
     460             :     return block_begin() + getNumOperands();
     461             :   }
     462             : 
     463             :   iterator_range<block_iterator> blocks() {
     464             :     return make_range(block_begin(), block_end());
     465             :   }
     466             : 
     467             :   iterator_range<const_block_iterator> blocks() const {
     468             :     return make_range(block_begin(), block_end());
     469             :   }
     470             : 
     471          34 :   op_range incoming_values() { return operands(); }
     472             : 
     473             :   const_op_range incoming_values() const { return operands(); }
     474             : 
     475             :   /// \brief Return the number of incoming edges
     476       62999 :   unsigned getNumIncomingValues() const { return getNumOperands(); }
     477             : 
     478             :   /// \brief Return incoming value number x
     479         208 :   MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
     480             :   void setIncomingValue(unsigned I, MemoryAccess *V) {
     481             :     assert(V && "PHI node got a null value!");
     482       58693 :     setOperand(I, V);
     483             :   }
     484             :   static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
     485             :   static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
     486             : 
     487             :   /// \brief Return incoming basic block number @p i.
     488       63253 :   BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
     489             : 
     490             :   /// \brief Return incoming basic block corresponding
     491             :   /// to an operand of the PHI.
     492             :   BasicBlock *getIncomingBlock(const Use &U) const {
     493             :     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
     494        1197 :     return getIncomingBlock(unsigned(&U - op_begin()));
     495             :   }
     496             : 
     497             :   /// \brief Return incoming basic block corresponding
     498             :   /// to value use iterator.
     499             :   BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
     500             :     return getIncomingBlock(I.getUse());
     501             :   }
     502             : 
     503             :   void setIncomingBlock(unsigned I, BasicBlock *BB) {
     504             :     assert(BB && "PHI node got a null basic block!");
     505       58690 :     block_begin()[I] = BB;
     506             :   }
     507             : 
     508             :   /// \brief Add an incoming value to the end of the PHI list
     509       58690 :   void addIncoming(MemoryAccess *V, BasicBlock *BB) {
     510       58690 :     if (getNumOperands() == ReservedSpace)
     511       31146 :       growOperands(); // Get more space!
     512             :     // Initialize some new operands.
     513      117380 :     setNumHungOffUseOperands(getNumOperands() + 1);
     514      117380 :     setIncomingValue(getNumOperands() - 1, V);
     515      117380 :     setIncomingBlock(getNumOperands() - 1, BB);
     516       58690 :   }
     517             : 
     518             :   /// \brief Return the first index of the specified basic
     519             :   /// block in the value list for this PHI.  Returns -1 if no instance.
     520           3 :   int getBasicBlockIndex(const BasicBlock *BB) const {
     521           8 :     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
     522           5 :       if (block_begin()[I] == BB)
     523           3 :         return I;
     524             :     return -1;
     525             :   }
     526             : 
     527             :   Value *getIncomingValueForBlock(const BasicBlock *BB) const {
     528             :     int Idx = getBasicBlockIndex(BB);
     529             :     assert(Idx >= 0 && "Invalid basic block argument!");
     530             :     return getIncomingValue(Idx);
     531             :   }
     532             : 
     533             :   static inline bool classof(const Value *V) {
     534      373270 :     return V->getValueID() == MemoryPhiVal;
     535             :   }
     536             : 
     537             :   void print(raw_ostream &OS) const override;
     538             : 
     539             : protected:
     540             :   friend class MemorySSA;
     541             : 
     542             :   /// \brief this is more complicated than the generic
     543             :   /// User::allocHungoffUses, because we have to allocate Uses for the incoming
     544             :   /// values and pointers to the incoming blocks, all in one allocation.
     545             :   void allocHungoffUses(unsigned N) {
     546       25704 :     User::allocHungoffUses(N, /* IsPhi */ true);
     547             :   }
     548             : 
     549        6494 :   unsigned getID() const final { return ID; }
     550             : 
     551             : private:
     552             :   // For debugging only
     553             :   const unsigned ID;
     554             :   unsigned ReservedSpace;
     555             : 
     556             :   /// \brief This grows the operand list in response to a push_back style of
     557             :   /// operation.  This grows the number of ops by 1.5 times.
     558       31146 :   void growOperands() {
     559       31146 :     unsigned E = getNumOperands();
     560             :     // 2 op PHI nodes are VERY common, so reserve at least enough for that.
     561       62292 :     ReservedSpace = std::max(E + E / 2, 2u);
     562       31146 :     growHungoffUses(ReservedSpace, /* IsPhi */ true);
     563       31146 :   }
     564             : };
     565             : 
     566             : template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
     567     1205061 : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
     568             : 
     569             : class MemorySSAWalker;
     570             : 
     571             : /// \brief Encapsulates MemorySSA, including all data associated with memory
     572             : /// accesses.
     573             : class MemorySSA {
     574             : public:
     575             :   MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
     576             :   ~MemorySSA();
     577             : 
     578             :   MemorySSAWalker *getWalker();
     579             : 
     580             :   /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
     581             :   /// access associated with it. If passed a basic block gets the memory phi
     582             :   /// node that exists for that block, if there is one. Otherwise, this will get
     583             :   /// a MemoryUseOrDef.
     584             :   MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
     585             :   MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
     586             : 
     587             :   void dump() const;
     588             :   void print(raw_ostream &) const;
     589             : 
     590             :   /// \brief Return true if \p MA represents the live on entry value
     591             :   ///
     592             :   /// Loads and stores from pointer arguments and other global values may be
     593             :   /// defined by memory operations that do not occur in the current function, so
     594             :   /// they may be live on entry to the function. MemorySSA represents such
     595             :   /// memory state by the live on entry definition, which is guaranteed to occur
     596             :   /// before any other memory access in the function.
     597             :   inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
     598      176094 :     return MA == LiveOnEntryDef.get();
     599             :   }
     600             : 
     601             :   inline MemoryAccess *getLiveOnEntryDef() const {
     602      101718 :     return LiveOnEntryDef.get();
     603             :   }
     604             : 
     605             :   // Sadly, iplists, by default, owns and deletes pointers added to the
     606             :   // list. It's not currently possible to have two iplists for the same type,
     607             :   // where one owns the pointers, and one does not. This is because the traits
     608             :   // are per-type, not per-tag.  If this ever changes, we should make the
     609             :   // DefList an iplist.
     610             :   using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
     611             :   using DefsList =
     612             :       simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
     613             : 
     614             :   /// \brief Return the list of MemoryAccess's for a given basic block.
     615             :   ///
     616             :   /// This list is not modifiable by the user.
     617             :   const AccessList *getBlockAccesses(const BasicBlock *BB) const {
     618         317 :     return getWritableBlockAccesses(BB);
     619             :   }
     620             : 
     621             :   /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
     622             :   /// block.
     623             :   ///
     624             :   /// This list is not modifiable by the user.
     625             :   const DefsList *getBlockDefs(const BasicBlock *BB) const {
     626       18032 :     return getWritableBlockDefs(BB);
     627             :   }
     628             : 
     629             :   /// \brief Given two memory accesses in the same basic block, determine
     630             :   /// whether MemoryAccess \p A dominates MemoryAccess \p B.
     631             :   bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
     632             : 
     633             :   /// \brief Given two memory accesses in potentially different blocks,
     634             :   /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
     635             :   bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
     636             : 
     637             :   /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
     638             :   /// dominates Use \p B.
     639             :   bool dominates(const MemoryAccess *A, const Use &B) const;
     640             : 
     641             :   /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
     642             :   /// all uses, uses appear in the right places).  This is used by unit tests.
     643             :   void verifyMemorySSA() const;
     644             : 
     645             :   /// Used in various insertion functions to specify whether we are talking
     646             :   /// about the beginning or end of a block.
     647             :   enum InsertionPlace { Beginning, End };
     648             : 
     649             : protected:
     650             :   // Used by Memory SSA annotater, dumpers, and wrapper pass
     651             :   friend class MemorySSAAnnotatedWriter;
     652             :   friend class MemorySSAPrinterLegacyPass;
     653             :   friend class MemorySSAUpdater;
     654             : 
     655             :   void verifyDefUses(Function &F) const;
     656             :   void verifyDomination(Function &F) const;
     657             :   void verifyOrdering(Function &F) const;
     658             : 
     659             :   // This is used by the use optimizer and updater.
     660      120256 :   AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
     661      120256 :     auto It = PerBlockAccesses.find(BB);
     662      337474 :     return It == PerBlockAccesses.end() ? nullptr : It->second.get();
     663             :   }
     664             : 
     665             :   // This is used by the use optimizer and updater.
     666       18076 :   DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
     667       18076 :     auto It = PerBlockDefs.find(BB);
     668       52452 :     return It == PerBlockDefs.end() ? nullptr : It->second.get();
     669             :   }
     670             : 
     671             :   // These is used by the updater to perform various internal MemorySSA
     672             :   // machinsations.  They do not always leave the IR in a correct state, and
     673             :   // relies on the updater to fixup what it breaks, so it is not public.
     674             : 
     675             :   void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
     676             :   void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
     677             :   // Rename the dominator tree branch rooted at BB.
     678           1 :   void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
     679             :                   SmallPtrSetImpl<BasicBlock *> &Visited) {
     680           1 :     renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
     681           1 :   }
     682             :   void removeFromLookups(MemoryAccess *);
     683             :   void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
     684             :   void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
     685             :                                InsertionPlace);
     686             :   void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
     687             :                              AccessList::iterator);
     688             :   MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
     689             : 
     690             : private:
     691             :   class CachingWalker;
     692             :   class OptimizeUses;
     693             : 
     694             :   CachingWalker *getWalkerImpl();
     695             :   void buildMemorySSA();
     696             :   void optimizeUses();
     697             : 
     698             :   void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
     699             :   using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
     700             :   using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
     701             : 
     702             :   void
     703             :   determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
     704             :   void markUnreachableAsLiveOnEntry(BasicBlock *BB);
     705             :   bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
     706             :   MemoryPhi *createMemoryPhi(BasicBlock *BB);
     707             :   MemoryUseOrDef *createNewAccess(Instruction *);
     708             :   MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
     709             :   void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &,
     710             :                      const DenseMap<const BasicBlock *, unsigned int> &);
     711             :   MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
     712             :   void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
     713             :   void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
     714             :                   SmallPtrSetImpl<BasicBlock *> &Visited,
     715             :                   bool SkipVisited = false, bool RenameAllUses = false);
     716             :   AccessList *getOrCreateAccessList(const BasicBlock *);
     717             :   DefsList *getOrCreateDefsList(const BasicBlock *);
     718             :   void renumberBlock(const BasicBlock *) const;
     719             :   AliasAnalysis *AA;
     720             :   DominatorTree *DT;
     721             :   Function &F;
     722             : 
     723             :   // Memory SSA mappings
     724             :   DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
     725             :   // These two mappings contain the main block to access/def mappings for
     726             :   // MemorySSA. The list contained in PerBlockAccesses really owns all the
     727             :   // MemoryAccesses.
     728             :   // Both maps maintain the invariant that if a block is found in them, the
     729             :   // corresponding list is not empty, and if a block is not found in them, the
     730             :   // corresponding list is empty.
     731             :   AccessMap PerBlockAccesses;
     732             :   DefsMap PerBlockDefs;
     733             :   std::unique_ptr<MemoryAccess> LiveOnEntryDef;
     734             : 
     735             :   // Domination mappings
     736             :   // Note that the numbering is local to a block, even though the map is
     737             :   // global.
     738             :   mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
     739             :   mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
     740             : 
     741             :   // Memory SSA building info
     742             :   std::unique_ptr<CachingWalker> Walker;
     743             :   unsigned NextID;
     744             : };
     745             : 
     746             : // Internal MemorySSA utils, for use by MemorySSA classes and walkers
     747             : class MemorySSAUtil {
     748             : protected:
     749             :   friend class MemorySSAWalker;
     750             :   friend class GVNHoist;
     751             :   // This function should not be used by new passes.
     752             :   static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
     753             :                                   AliasAnalysis &AA);
     754             : };
     755             : 
     756             : // This pass does eager building and then printing of MemorySSA. It is used by
     757             : // the tests to be able to build, dump, and verify Memory SSA.
     758          40 : class MemorySSAPrinterLegacyPass : public FunctionPass {
     759             : public:
     760             :   MemorySSAPrinterLegacyPass();
     761             : 
     762             :   bool runOnFunction(Function &) override;
     763             :   void getAnalysisUsage(AnalysisUsage &AU) const override;
     764             : 
     765             :   static char ID;
     766             : };
     767             : 
     768             : /// An analysis that produces \c MemorySSA for a function.
     769             : ///
     770             : class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
     771             :   friend AnalysisInfoMixin<MemorySSAAnalysis>;
     772             : 
     773             :   static AnalysisKey Key;
     774             : 
     775             : public:
     776             :   // Wrap MemorySSA result to ensure address stability of internal MemorySSA
     777             :   // pointers after construction.  Use a wrapper class instead of plain
     778             :   // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
     779         707 :   struct Result {
     780         202 :     Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
     781         268 :     MemorySSA &getMSSA() { return *MSSA.get(); }
     782             : 
     783             :     std::unique_ptr<MemorySSA> MSSA;
     784             :   };
     785             : 
     786             :   Result run(Function &F, FunctionAnalysisManager &AM);
     787             : };
     788             : 
     789             : /// \brief Printer pass for \c MemorySSA.
     790             : class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
     791             :   raw_ostream &OS;
     792             : 
     793             : public:
     794          19 :   explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
     795             : 
     796             :   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
     797             : };
     798             : 
     799             : /// \brief Verifier pass for \c MemorySSA.
     800             : struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
     801             :   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
     802             : };
     803             : 
     804             : /// \brief Legacy analysis pass which computes \c MemorySSA.
     805        1582 : class MemorySSAWrapperPass : public FunctionPass {
     806             : public:
     807             :   MemorySSAWrapperPass();
     808             : 
     809             :   static char ID;
     810             : 
     811             :   bool runOnFunction(Function &) override;
     812             :   void releaseMemory() override;
     813       65326 :   MemorySSA &getMSSA() { return *MSSA; }
     814             :   const MemorySSA &getMSSA() const { return *MSSA; }
     815             : 
     816             :   void getAnalysisUsage(AnalysisUsage &AU) const override;
     817             : 
     818             :   void verifyAnalysis() const override;
     819             :   void print(raw_ostream &OS, const Module *M = nullptr) const override;
     820             : 
     821             : private:
     822             :   std::unique_ptr<MemorySSA> MSSA;
     823             : };
     824             : 
     825             : /// \brief This is the generic walker interface for walkers of MemorySSA.
     826             : /// Walkers are used to be able to further disambiguate the def-use chains
     827             : /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
     828             : /// you.
     829             : /// In particular, while the def-use chains provide basic information, and are
     830             : /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
     831             : /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
     832             : /// information. In particular, they may want to use SCEV info to further
     833             : /// disambiguate memory accesses, or they may want the nearest dominating
     834             : /// may-aliasing MemoryDef for a call or a store. This API enables a
     835             : /// standardized interface to getting and using that info.
     836             : class MemorySSAWalker {
     837             : public:
     838             :   MemorySSAWalker(MemorySSA *);
     839             :   virtual ~MemorySSAWalker() = default;
     840             : 
     841             :   using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
     842             : 
     843             :   /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
     844             :   /// will give you the nearest dominating MemoryAccess that Mod's the location
     845             :   /// the instruction accesses (by skipping any def which AA can prove does not
     846             :   /// alias the location(s) accessed by the instruction given).
     847             :   ///
     848             :   /// Note that this will return a single access, and it must dominate the
     849             :   /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
     850             :   /// this will return the MemoryPhi, not the operand. This means that
     851             :   /// given:
     852             :   /// if (a) {
     853             :   ///   1 = MemoryDef(liveOnEntry)
     854             :   ///   store %a
     855             :   /// } else {
     856             :   ///   2 = MemoryDef(liveOnEntry)
     857             :   ///   store %b
     858             :   /// }
     859             :   /// 3 = MemoryPhi(2, 1)
     860             :   /// MemoryUse(3)
     861             :   /// load %a
     862             :   ///
     863             :   /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
     864             :   /// in the if (a) branch.
     865        2670 :   MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
     866        2670 :     MemoryAccess *MA = MSSA->getMemoryAccess(I);
     867             :     assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
     868        2670 :     return getClobberingMemoryAccess(MA);
     869             :   }
     870             : 
     871             :   /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
     872             :   /// but takes a MemoryAccess instead of an Instruction.
     873             :   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
     874             : 
     875             :   /// \brief Given a potentially clobbering memory access and a new location,
     876             :   /// calling this will give you the nearest dominating clobbering MemoryAccess
     877             :   /// (by skipping non-aliasing def links).
     878             :   ///
     879             :   /// This version of the function is mainly used to disambiguate phi translated
     880             :   /// pointers, where the value of a pointer may have changed from the initial
     881             :   /// memory access. Note that this expects to be handed either a MemoryUse,
     882             :   /// or an already potentially clobbering access. Unlike the above API, if
     883             :   /// given a MemoryDef that clobbers the pointer as the starting access, it
     884             :   /// will return that MemoryDef, whereas the above would return the clobber
     885             :   /// starting from the use side of  the memory def.
     886             :   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
     887             :                                                   const MemoryLocation &) = 0;
     888             : 
     889             :   /// \brief Given a memory access, invalidate anything this walker knows about
     890             :   /// that access.
     891             :   /// This API is used by walkers that store information to perform basic cache
     892             :   /// invalidation.  This will be called by MemorySSA at appropriate times for
     893             :   /// the walker it uses or returns.
     894           0 :   virtual void invalidateInfo(MemoryAccess *) {}
     895             : 
     896           0 :   virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
     897             : 
     898             : protected:
     899             :   friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
     900             :                           // constructor.
     901             :   MemorySSA *MSSA;
     902             : };
     903             : 
     904             : /// \brief A MemorySSAWalker that does no alias queries, or anything else. It
     905             : /// simply returns the links as they were constructed by the builder.
     906           0 : class DoNothingMemorySSAWalker final : public MemorySSAWalker {
     907             : public:
     908             :   // Keep the overrides below from hiding the Instruction overload of
     909             :   // getClobberingMemoryAccess.
     910             :   using MemorySSAWalker::getClobberingMemoryAccess;
     911             : 
     912             :   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
     913             :   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
     914             :                                           const MemoryLocation &) override;
     915             : };
     916             : 
     917             : using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
     918             : using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
     919             : 
     920             : /// \brief Iterator base class used to implement const and non-const iterators
     921             : /// over the defining accesses of a MemoryAccess.
     922             : template <class T>
     923             : class memoryaccess_def_iterator_base
     924             :     : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
     925             :                                   std::forward_iterator_tag, T, ptrdiff_t, T *,
     926             :                                   T *> {
     927             :   using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
     928             : 
     929             : public:
     930       62508 :   memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
     931       94172 :   memoryaccess_def_iterator_base() = default;
     932             : 
     933             :   bool operator==(const memoryaccess_def_iterator_base &Other) const {
     934      157090 :     return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
     935             :   }
     936             : 
     937             :   // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
     938             :   // block from the operand in constant time (In a PHINode, the uselist has
     939             :   // both, so it's just subtraction). We provide it as part of the
     940             :   // iterator to avoid callers having to linear walk to get the block.
     941             :   // If the operation becomes constant time on MemoryPHI's, this bit of
     942             :   // abstraction breaking should be removed.
     943       62854 :   BasicBlock *getPhiArgBlock() const {
     944      125708 :     MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
     945             :     assert(MP && "Tried to get phi arg block when not iterating over a PHI");
     946      125708 :     return MP->getIncomingBlock(ArgNo);
     947             :   }
     948       62918 :   typename BaseT::iterator::pointer operator*() const {
     949             :     assert(Access && "Tried to access past the end of our iterator");
     950             :     // Go to the first argument for phis, and the defining access for everything
     951             :     // else.
     952      125836 :     if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
     953       62918 :       return MP->getIncomingValue(ArgNo);
     954           0 :     return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
     955             :   }
     956             :   using BaseT::operator++;
     957             :   memoryaccess_def_iterator &operator++() {
     958             :     assert(Access && "Hit end of iterator");
     959      125836 :     if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
     960      125836 :       if (++ArgNo >= MP->getNumIncomingValues()) {
     961       31254 :         ArgNo = 0;
     962       31254 :         Access = nullptr;
     963             :       }
     964             :     } else {
     965           0 :       Access = nullptr;
     966             :     }
     967             :     return *this;
     968             :   }
     969             : 
     970             : private:
     971             :   T *Access = nullptr;
     972             :   unsigned ArgNo = 0;
     973             : };
     974             : 
     975             : inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
     976             :   return memoryaccess_def_iterator(this);
     977             : }
     978             : 
     979             : inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
     980             :   return const_memoryaccess_def_iterator(this);
     981             : }
     982             : 
     983             : inline memoryaccess_def_iterator MemoryAccess::defs_end() {
     984      125836 :   return memoryaccess_def_iterator();
     985             : }
     986             : 
     987             : inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
     988             :   return const_memoryaccess_def_iterator();
     989             : }
     990             : 
     991             : /// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
     992             : /// and uses in the inverse case.
     993             : template <> struct GraphTraits<MemoryAccess *> {
     994             :   using NodeRef = MemoryAccess *;
     995             :   using ChildIteratorType = memoryaccess_def_iterator;
     996             : 
     997             :   static NodeRef getEntryNode(NodeRef N) { return N; }
     998             :   static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
     999             :   static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
    1000             : };
    1001             : 
    1002             : template <> struct GraphTraits<Inverse<MemoryAccess *>> {
    1003             :   using NodeRef = MemoryAccess *;
    1004             :   using ChildIteratorType = MemoryAccess::iterator;
    1005             : 
    1006             :   static NodeRef getEntryNode(NodeRef N) { return N; }
    1007             :   static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
    1008             :   static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
    1009             : };
    1010             : 
    1011             : /// \brief Provide an iterator that walks defs, giving both the memory access,
    1012             : /// and the current pointer location, updating the pointer location as it
    1013             : /// changes due to phi node translation.
    1014             : ///
    1015             : /// This iterator, while somewhat specialized, is what most clients actually
    1016             : /// want when walking upwards through MemorySSA def chains. It takes a pair of
    1017             : /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
    1018             : /// memory location through phi nodes for the user.
    1019             : class upward_defs_iterator
    1020             :     : public iterator_facade_base<upward_defs_iterator,
    1021             :                                   std::forward_iterator_tag,
    1022             :                                   const MemoryAccessPair> {
    1023             :   using BaseT = upward_defs_iterator::iterator_facade_base;
    1024             : 
    1025             : public:
    1026       31254 :   upward_defs_iterator(const MemoryAccessPair &Info)
    1027       62508 :       : DefIterator(Info.first), Location(Info.second),
    1028      125016 :         OriginalAccess(Info.first) {
    1029             :     CurrentPair.first = nullptr;
    1030             : 
    1031       62508 :     WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
    1032       31254 :     fillInCurrentPair();
    1033       31254 :   }
    1034             : 
    1035      125016 :   upward_defs_iterator() { CurrentPair.first = nullptr; }
    1036             : 
    1037             :   bool operator==(const upward_defs_iterator &Other) const {
    1038      188344 :     return DefIterator == Other.DefIterator;
    1039             :   }
    1040             : 
    1041             :   BaseT::iterator::reference operator*() const {
    1042             :     assert(DefIterator != OriginalAccess->defs_end() &&
    1043             :            "Tried to access past the end of our iterator");
    1044             :     return CurrentPair;
    1045             :   }
    1046             : 
    1047             :   using BaseT::operator++;
    1048       62918 :   upward_defs_iterator &operator++() {
    1049             :     assert(DefIterator != OriginalAccess->defs_end() &&
    1050             :            "Tried to access past the end of the iterator");
    1051      125836 :     ++DefIterator;
    1052      188754 :     if (DefIterator != OriginalAccess->defs_end())
    1053       31664 :       fillInCurrentPair();
    1054       62918 :     return *this;
    1055             :   }
    1056             : 
    1057             :   BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
    1058             : 
    1059             : private:
    1060       62918 :   void fillInCurrentPair() {
    1061       62918 :     CurrentPair.first = *DefIterator;
    1062       62918 :     if (WalkingPhi && Location.Ptr) {
    1063             :       PHITransAddr Translator(
    1064       62854 :           const_cast<Value *>(Location.Ptr),
    1065      251416 :           OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
    1066       62854 :       if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
    1067             :                                         DefIterator.getPhiArgBlock(), nullptr,
    1068             :                                         false))
    1069           0 :         if (Translator.getAddr() != Location.Ptr) {
    1070           0 :           CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
    1071           0 :           return;
    1072             :         }
    1073             :     }
    1074       62918 :     CurrentPair.second = Location;
    1075             :   }
    1076             : 
    1077             :   MemoryAccessPair CurrentPair;
    1078             :   memoryaccess_def_iterator DefIterator;
    1079             :   MemoryLocation Location;
    1080             :   MemoryAccess *OriginalAccess = nullptr;
    1081             :   bool WalkingPhi = false;
    1082             : };
    1083             : 
    1084             : inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
    1085       31254 :   return upward_defs_iterator(Pair);
    1086             : }
    1087             : 
    1088       31254 : inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
    1089             : 
    1090             : inline iterator_range<upward_defs_iterator>
    1091             : upward_defs(const MemoryAccessPair &Pair) {
    1092             :   return make_range(upward_defs_begin(Pair), upward_defs_end());
    1093             : }
    1094             : 
    1095             : /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
    1096             : /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
    1097             : /// comparing against a null def_chain_iterator, this will compare equal only
    1098             : /// after walking said Phi/liveOnEntry.
    1099             : ///
    1100             : /// The UseOptimizedChain flag specifies whether to walk the clobbering
    1101             : /// access chain, or all the accesses.
    1102             : ///
    1103             : /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
    1104             : /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
    1105             : /// a phi node.  The optimized chain walks the clobbering access of a store.
    1106             : /// So if you are just trying to find, given a store, what the next
    1107             : /// thing that would clobber the same memory is, you want the optimized chain.
    1108             : template <class T, bool UseOptimizedChain = false>
    1109             : struct def_chain_iterator
    1110             :     : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
    1111             :                                   std::forward_iterator_tag, MemoryAccess *> {
    1112             :   def_chain_iterator() : MA(nullptr) {}
    1113      252096 :   def_chain_iterator(T MA) : MA(MA) {}
    1114             : 
    1115             :   T operator*() const { return MA; }
    1116             : 
    1117             :   def_chain_iterator &operator++() {
    1118             :     // N.B. liveOnEntry has a null defining access.
    1119      214280 :     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
    1120             :       if (UseOptimizedChain && MUD->isOptimized())
    1121             :         MA = MUD->getOptimized();
    1122             :       else
    1123             :         MA = MUD->getDefiningAccess();
    1124             :     } else {
    1125             :       MA = nullptr;
    1126             :     }
    1127             : 
    1128             :     return *this;
    1129             :   }
    1130             : 
    1131             :   bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
    1132             : 
    1133             : private:
    1134             :   T MA;
    1135             : };
    1136             : 
    1137             : template <class T>
    1138             : inline iterator_range<def_chain_iterator<T>>
    1139             : def_chain(T MA, MemoryAccess *UpTo = nullptr) {
    1140             : #ifdef EXPENSIVE_CHECKS
    1141             :   assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
    1142             :          "UpTo isn't in the def chain!");
    1143             : #endif
    1144      189072 :   return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
    1145             : }
    1146             : 
    1147             : template <class T>
    1148             : inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
    1149             :   return make_range(def_chain_iterator<T, true>(MA),
    1150             :                     def_chain_iterator<T, true>(nullptr));
    1151             : }
    1152             : 
    1153             : } // end namespace llvm
    1154             : 
    1155             : #endif // LLVM_TRANSFORMS_UTILS_MEMORYSSA_H

Generated by: LCOV version 1.13