LCOV - code coverage report
Current view: top level - include/llvm/Analysis - MemorySSA.h (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 119 139 85.6 %
Date: 2018-07-13 00:08:38 Functions: 20 28 71.4 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : /// \file
      11             : /// This file exposes an interface to building/using memory SSA to
      12             : /// walk memory instructions using a use/def graph.
      13             : ///
      14             : /// Memory SSA class builds an SSA form that links together memory access
      15             : /// instructions such as loads, stores, atomics, and calls. Additionally, it
      16             : /// does a trivial form of "heap versioning" Every time the memory state changes
      17             : /// in the program, we generate a new heap version. It generates
      18             : /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
      19             : ///
      20             : /// As a trivial example,
      21             : /// define i32 @main() #0 {
      22             : /// entry:
      23             : ///   %call = call noalias i8* @_Znwm(i64 4) #2
      24             : ///   %0 = bitcast i8* %call to i32*
      25             : ///   %call1 = call noalias i8* @_Znwm(i64 4) #2
      26             : ///   %1 = bitcast i8* %call1 to i32*
      27             : ///   store i32 5, i32* %0, align 4
      28             : ///   store i32 7, i32* %1, align 4
      29             : ///   %2 = load i32* %0, align 4
      30             : ///   %3 = load i32* %1, align 4
      31             : ///   %add = add nsw i32 %2, %3
      32             : ///   ret i32 %add
      33             : /// }
      34             : ///
      35             : /// Will become
      36             : /// define i32 @main() #0 {
      37             : /// entry:
      38             : ///   ; 1 = MemoryDef(0)
      39             : ///   %call = call noalias i8* @_Znwm(i64 4) #3
      40             : ///   %2 = bitcast i8* %call to i32*
      41             : ///   ; 2 = MemoryDef(1)
      42             : ///   %call1 = call noalias i8* @_Znwm(i64 4) #3
      43             : ///   %4 = bitcast i8* %call1 to i32*
      44             : ///   ; 3 = MemoryDef(2)
      45             : ///   store i32 5, i32* %2, align 4
      46             : ///   ; 4 = MemoryDef(3)
      47             : ///   store i32 7, i32* %4, align 4
      48             : ///   ; MemoryUse(3)
      49             : ///   %7 = load i32* %2, align 4
      50             : ///   ; MemoryUse(4)
      51             : ///   %8 = load i32* %4, align 4
      52             : ///   %add = add nsw i32 %7, %8
      53             : ///   ret i32 %add
      54             : /// }
      55             : ///
      56             : /// Given this form, all the stores that could ever effect the load at %8 can be
      57             : /// gotten by using the MemoryUse associated with it, and walking from use to
      58             : /// def until you hit the top of the function.
      59             : ///
      60             : /// Each def also has a list of users associated with it, so you can walk from
      61             : /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
      62             : /// but not the RHS of MemoryDefs. You can see this above at %7, which would
      63             : /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
      64             : /// store, all the MemoryUses on its use lists are may-aliases of that store
      65             : /// (but the MemoryDefs on its use list may not be).
      66             : ///
      67             : /// MemoryDefs are not disambiguated because it would require multiple reaching
      68             : /// definitions, which would require multiple phis, and multiple memoryaccesses
      69             : /// per instruction.
      70             : //
      71             : //===----------------------------------------------------------------------===//
      72             : 
      73             : #ifndef LLVM_ANALYSIS_MEMORYSSA_H
      74             : #define LLVM_ANALYSIS_MEMORYSSA_H
      75             : 
      76             : #include "llvm/ADT/DenseMap.h"
      77             : #include "llvm/ADT/GraphTraits.h"
      78             : #include "llvm/ADT/SmallPtrSet.h"
      79             : #include "llvm/ADT/SmallVector.h"
      80             : #include "llvm/ADT/ilist.h"
      81             : #include "llvm/ADT/ilist_node.h"
      82             : #include "llvm/ADT/iterator.h"
      83             : #include "llvm/ADT/iterator_range.h"
      84             : #include "llvm/ADT/simple_ilist.h"
      85             : #include "llvm/Analysis/AliasAnalysis.h"
      86             : #include "llvm/Analysis/MemoryLocation.h"
      87             : #include "llvm/Analysis/PHITransAddr.h"
      88             : #include "llvm/IR/BasicBlock.h"
      89             : #include "llvm/IR/DerivedUser.h"
      90             : #include "llvm/IR/Dominators.h"
      91             : #include "llvm/IR/Module.h"
      92             : #include "llvm/IR/Type.h"
      93             : #include "llvm/IR/Use.h"
      94             : #include "llvm/IR/User.h"
      95             : #include "llvm/IR/Value.h"
      96             : #include "llvm/IR/ValueHandle.h"
      97             : #include "llvm/Pass.h"
      98             : #include "llvm/Support/Casting.h"
      99             : #include <algorithm>
     100             : #include <cassert>
     101             : #include <cstddef>
     102             : #include <iterator>
     103             : #include <memory>
     104             : #include <utility>
     105             : 
     106             : namespace llvm {
     107             : 
     108             : class Function;
     109             : class Instruction;
     110             : class MemoryAccess;
     111             : class MemorySSAWalker;
     112             : class LLVMContext;
     113             : class raw_ostream;
     114             : 
     115             : namespace MSSAHelpers {
     116             : 
     117             : struct AllAccessTag {};
     118             : struct DefsOnlyTag {};
     119             : 
     120             : } // end namespace MSSAHelpers
     121             : 
     122             : enum : unsigned {
     123             :   // Used to signify what the default invalid ID is for MemoryAccess's
     124             :   // getID()
     125             :   INVALID_MEMORYACCESS_ID = -1U
     126             : };
     127             : 
     128             : template <class T> class memoryaccess_def_iterator_base;
     129             : using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
     130             : using const_memoryaccess_def_iterator =
     131             :     memoryaccess_def_iterator_base<const MemoryAccess>;
     132             : 
     133             : // The base for all memory accesses. All memory accesses in a block are
     134             : // linked together using an intrusive list.
     135             : class MemoryAccess
     136             :     : public DerivedUser,
     137             :       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
     138             :       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
     139             : public:
     140             :   using AllAccessType =
     141             :       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
     142             :   using DefsOnlyType =
     143             :       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
     144             : 
     145             :   MemoryAccess(const MemoryAccess &) = delete;
     146             :   MemoryAccess &operator=(const MemoryAccess &) = delete;
     147             : 
     148             :   void *operator new(size_t) = delete;
     149             : 
     150             :   // Methods for support type inquiry through isa, cast, and
     151             :   // dyn_cast
     152             :   static bool classof(const Value *V) {
     153             :     unsigned ID = V->getValueID();
     154             :     return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
     155             :   }
     156             : 
     157             :   BasicBlock *getBlock() const { return Block; }
     158             : 
     159             :   void print(raw_ostream &OS) const;
     160             :   void dump() const;
     161             : 
     162             :   /// The user iterators for a memory access
     163             :   using iterator = user_iterator;
     164             :   using const_iterator = const_user_iterator;
     165             : 
     166             :   /// This iterator walks over all of the defs in a given
     167             :   /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
     168             :   /// MemoryUse/MemoryDef, this walks the defining access.
     169             :   memoryaccess_def_iterator defs_begin();
     170             :   const_memoryaccess_def_iterator defs_begin() const;
     171             :   memoryaccess_def_iterator defs_end();
     172             :   const_memoryaccess_def_iterator defs_end() const;
     173             : 
     174             :   /// Get the iterators for the all access list and the defs only list
     175             :   /// We default to the all access list.
     176             :   AllAccessType::self_iterator getIterator() {
     177           6 :     return this->AllAccessType::getIterator();
     178             :   }
     179             :   AllAccessType::const_self_iterator getIterator() const {
     180             :     return this->AllAccessType::getIterator();
     181             :   }
     182             :   AllAccessType::reverse_self_iterator getReverseIterator() {
     183          19 :     return this->AllAccessType::getReverseIterator();
     184             :   }
     185             :   AllAccessType::const_reverse_self_iterator getReverseIterator() const {
     186             :     return this->AllAccessType::getReverseIterator();
     187             :   }
     188             :   DefsOnlyType::self_iterator getDefsIterator() {
     189          10 :     return this->DefsOnlyType::getIterator();
     190             :   }
     191             :   DefsOnlyType::const_self_iterator getDefsIterator() const {
     192             :     return this->DefsOnlyType::getIterator();
     193             :   }
     194             :   DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
     195          28 :     return this->DefsOnlyType::getReverseIterator();
     196             :   }
     197             :   DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
     198             :     return this->DefsOnlyType::getReverseIterator();
     199             :   }
     200             : 
     201             : protected:
     202             :   friend class MemoryDef;
     203             :   friend class MemoryPhi;
     204             :   friend class MemorySSA;
     205             :   friend class MemoryUse;
     206             :   friend class MemoryUseOrDef;
     207             : 
     208             :   /// Used by MemorySSA to change the block of a MemoryAccess when it is
     209             :   /// moved.
     210          58 :   void setBlock(BasicBlock *BB) { Block = BB; }
     211             : 
     212             :   /// Used for debugging and tracking things about MemoryAccesses.
     213             :   /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
     214             :   inline unsigned getID() const;
     215             : 
     216             :   MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
     217             :                BasicBlock *BB, unsigned NumOperands)
     218     1389966 :       : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
     219     2779932 :         Block(BB) {}
     220             : 
     221             :   // Use deleteValue() to delete a generic MemoryAccess.
     222             :   ~MemoryAccess() = default;
     223             : 
     224             : private:
     225             :   BasicBlock *Block;
     226             : };
     227             : 
     228             : template <>
     229             : struct ilist_alloc_traits<MemoryAccess> {
     230     1353587 :   static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
     231             : };
     232             : 
     233             : inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
     234         472 :   MA.print(OS);
     235             :   return OS;
     236             : }
     237             : 
     238             : /// Class that has the common methods + fields of memory uses/defs. It's
     239             : /// a little awkward to have, but there are many cases where we want either a
     240             : /// use or def, and there are many cases where uses are needed (defs aren't
     241             : /// acceptable), and vice-versa.
     242             : ///
     243             : /// This class should never be instantiated directly; make a MemoryUse or
     244             : /// MemoryDef instead.
     245             : class MemoryUseOrDef : public MemoryAccess {
     246             : public:
     247             :   void *operator new(size_t) = delete;
     248             : 
     249             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     250             : 
     251             :   /// Get the instruction that this MemoryUse represents.
     252             :   Instruction *getMemoryInst() const { return MemoryInstruction; }
     253             : 
     254             :   /// Get the access that produces the memory state used by this Use.
     255             :   MemoryAccess *getDefiningAccess() const { return getOperand(0); }
     256             : 
     257             :   static bool classof(const Value *MA) {
     258     4812766 :     return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
     259             :   }
     260             : 
     261             :   // Sadly, these have to be public because they are needed in some of the
     262             :   // iterators.
     263             :   inline bool isOptimized() const;
     264             :   inline MemoryAccess *getOptimized() const;
     265             :   inline void setOptimized(MemoryAccess *);
     266             : 
     267             :   // Retrieve AliasResult type of the optimized access. Ideally this would be
     268             :   // returned by the caching walker and may go away in the future.
     269             :   Optional<AliasResult> getOptimizedAccessType() const {
     270             :     return OptimizedAccessAlias;
     271             :   }
     272             : 
     273             :   /// Reset the ID of what this MemoryUse was optimized to, causing it to
     274             :   /// be rewalked by the walker if necessary.
     275             :   /// This really should only be called by tests.
     276             :   inline void resetOptimized();
     277             : 
     278             : protected:
     279             :   friend class MemorySSA;
     280             :   friend class MemorySSAUpdater;
     281             : 
     282     1345886 :   MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
     283             :                  DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
     284     1345886 :       : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInstruction(MI),
     285     1345886 :         OptimizedAccessAlias(MayAlias) {
     286     1345886 :     setDefiningAccess(DMA);
     287     1345886 :   }
     288             : 
     289             :   // Use deleteValue() to delete a generic MemoryUseOrDef.
     290             :   ~MemoryUseOrDef() = default;
     291             : 
     292             :   void setOptimizedAccessType(Optional<AliasResult> AR) {
     293             :     OptimizedAccessAlias = AR;
     294             :   }
     295             : 
     296     3033698 :   void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false,
     297             :                          Optional<AliasResult> AR = MayAlias) {
     298     3033698 :     if (!Optimized) {
     299             :       setOperand(0, DMA);
     300             :       return;
     301             :     }
     302      358184 :     setOptimized(DMA);
     303             :     setOptimizedAccessType(AR);
     304             :   }
     305             : 
     306             : private:
     307             :   Instruction *MemoryInstruction;
     308             :   Optional<AliasResult> OptimizedAccessAlias;
     309             : };
     310             : 
     311             : template <>
     312             : struct OperandTraits<MemoryUseOrDef>
     313             :     : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
     314     5054782 : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
     315             : 
     316             : /// Represents read-only accesses to memory
     317             : ///
     318             : /// In particular, the set of Instructions that will be represented by
     319             : /// MemoryUse's is exactly the set of Instructions for which
     320             : /// AliasAnalysis::getModRefInfo returns "Ref".
     321      462823 : class MemoryUse final : public MemoryUseOrDef {
     322             : public:
     323             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     324             : 
     325             :   MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
     326      462823 :       : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
     327             : 
     328             :   // allocate space for exactly one operand
     329      462823 :   void *operator new(size_t s) { return User::operator new(s, 1); }
     330             : 
     331             :   static bool classof(const Value *MA) {
     332     1490029 :     return MA->getValueID() == MemoryUseVal;
     333             :   }
     334             : 
     335             :   void print(raw_ostream &OS) const;
     336             : 
     337             :   void setOptimized(MemoryAccess *DMA) {
     338      457193 :     OptimizedID = DMA->getID();
     339             :     setOperand(0, DMA);
     340             :   }
     341             : 
     342             :   bool isOptimized() const {
     343      315628 :     return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
     344             :   }
     345             : 
     346             :   MemoryAccess *getOptimized() const {
     347             :     return getDefiningAccess();
     348             :   }
     349             : 
     350             :   void resetOptimized() {
     351           9 :     OptimizedID = INVALID_MEMORYACCESS_ID;
     352             :   }
     353             : 
     354             : protected:
     355             :   friend class MemorySSA;
     356             : 
     357             : private:
     358             :   static void deleteMe(DerivedUser *Self);
     359             : 
     360             :   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
     361             : };
     362             : 
     363             : template <>
     364             : struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
     365      457193 : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
     366             : 
     367             : /// Represents a read-write access to memory, whether it is a must-alias,
     368             : /// or a may-alias.
     369             : ///
     370             : /// In particular, the set of Instructions that will be represented by
     371             : /// MemoryDef's is exactly the set of Instructions for which
     372             : /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
     373             : /// Note that, in order to provide def-def chains, all defs also have a use
     374             : /// associated with them. This use points to the nearest reaching
     375             : /// MemoryDef/MemoryPhi.
     376     1766126 : class MemoryDef final : public MemoryUseOrDef {
     377             : public:
     378             :   friend class MemorySSA;
     379             : 
     380             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     381             : 
     382             :   MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
     383             :             unsigned Ver)
     384      883063 :       : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
     385             : 
     386             :   // allocate space for exactly one operand
     387      883063 :   void *operator new(size_t s) { return User::operator new(s, 1); }
     388             : 
     389             :   static bool classof(const Value *MA) {
     390     4807387 :     return MA->getValueID() == MemoryDefVal;
     391             :   }
     392             : 
     393          28 :   void setOptimized(MemoryAccess *MA) {
     394             :     Optimized = MA;
     395          28 :     OptimizedID = getDefiningAccess()->getID();
     396          28 :   }
     397             : 
     398             :   MemoryAccess *getOptimized() const {
     399             :     return cast_or_null<MemoryAccess>(Optimized);
     400             :   }
     401             : 
     402             :   bool isOptimized() const {
     403          14 :     return getOptimized() && getDefiningAccess() &&
     404          14 :            OptimizedID == getDefiningAccess()->getID();
     405             :   }
     406             : 
     407             :   void resetOptimized() {
     408         640 :     OptimizedID = INVALID_MEMORYACCESS_ID;
     409             :   }
     410             : 
     411             :   void print(raw_ostream &OS) const;
     412             : 
     413             :   unsigned getID() const { return ID; }
     414             : 
     415             : private:
     416             :   static void deleteMe(DerivedUser *Self);
     417             : 
     418             :   const unsigned ID;
     419             :   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
     420             :   WeakVH Optimized;
     421             : };
     422             : 
     423             : template <>
     424             : struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
     425             : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
     426             : 
     427             : /// Represents phi nodes for memory accesses.
     428             : ///
     429             : /// These have the same semantic as regular phi nodes, with the exception that
     430             : /// only one phi will ever exist in a given basic block.
     431             : /// Guaranteeing one phi per block means guaranteeing there is only ever one
     432             : /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
     433             : /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
     434             : /// a MemoryPhi's operands.
     435             : /// That is, given
     436             : /// if (a) {
     437             : ///   store %a
     438             : ///   store %b
     439             : /// }
     440             : /// it *must* be transformed into
     441             : /// if (a) {
     442             : ///    1 = MemoryDef(liveOnEntry)
     443             : ///    store %a
     444             : ///    2 = MemoryDef(1)
     445             : ///    store %b
     446             : /// }
     447             : /// and *not*
     448             : /// if (a) {
     449             : ///    1 = MemoryDef(liveOnEntry)
     450             : ///    store %a
     451             : ///    2 = MemoryDef(liveOnEntry)
     452             : ///    store %b
     453             : /// }
     454             : /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
     455             : /// end of the branch, and if there are not two phi nodes, one will be
     456             : /// disconnected completely from the SSA graph below that point.
     457             : /// Because MemoryUse's do not generate new definitions, they do not have this
     458             : /// issue.
     459       44080 : class MemoryPhi final : public MemoryAccess {
     460             :   // allocate space for exactly zero operands
     461       44080 :   void *operator new(size_t s) { return User::operator new(s); }
     462             : 
     463             : public:
     464             :   /// Provide fast operand accessors
     465             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     466             : 
     467       44080 :   MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
     468       44080 :       : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
     469       44080 :         ReservedSpace(NumPreds) {
     470             :     allocHungoffUses(ReservedSpace);
     471       44080 :   }
     472             : 
     473             :   // Block iterator interface. This provides access to the list of incoming
     474             :   // basic blocks, which parallels the list of incoming values.
     475             :   using block_iterator = BasicBlock **;
     476             :   using const_block_iterator = BasicBlock *const *;
     477             : 
     478             :   block_iterator block_begin() {
     479       99034 :     auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
     480           0 :     return reinterpret_cast<block_iterator>(Ref + 1);
     481             :   }
     482             : 
     483             :   const_block_iterator block_begin() const {
     484             :     const auto *Ref =
     485      248612 :         reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
     486             :     return reinterpret_cast<const_block_iterator>(Ref + 1);
     487             :   }
     488             : 
     489           6 :   block_iterator block_end() { return block_begin() + getNumOperands(); }
     490             : 
     491             :   const_block_iterator block_end() const {
     492             :     return block_begin() + getNumOperands();
     493             :   }
     494             : 
     495             :   iterator_range<block_iterator> blocks() {
     496             :     return make_range(block_begin(), block_end());
     497             :   }
     498             : 
     499             :   iterator_range<const_block_iterator> blocks() const {
     500             :     return make_range(block_begin(), block_end());
     501             :   }
     502             : 
     503          12 :   op_range incoming_values() { return operands(); }
     504             : 
     505             :   const_op_range incoming_values() const { return operands(); }
     506             : 
     507             :   /// Return the number of incoming edges
     508             :   unsigned getNumIncomingValues() const { return getNumOperands(); }
     509             : 
     510             :   /// Return incoming value number x
     511             :   MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
     512             :   void setIncomingValue(unsigned I, MemoryAccess *V) {
     513             :     assert(V && "PHI node got a null value!");
     514       99030 :     setOperand(I, V);
     515             :   }
     516             : 
     517             :   static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
     518             :   static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
     519             : 
     520             :   /// Return incoming basic block number @p i.
     521      248603 :   BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
     522             : 
     523             :   /// Return incoming basic block corresponding
     524             :   /// to an operand of the PHI.
     525             :   BasicBlock *getIncomingBlock(const Use &U) const {
     526             :     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
     527        1126 :     return getIncomingBlock(unsigned(&U - op_begin()));
     528             :   }
     529             : 
     530             :   /// Return incoming basic block corresponding
     531             :   /// to value use iterator.
     532             :   BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
     533             :     return getIncomingBlock(I.getUse());
     534             :   }
     535             : 
     536             :   void setIncomingBlock(unsigned I, BasicBlock *BB) {
     537             :     assert(BB && "PHI node got a null basic block!");
     538       99025 :     block_begin()[I] = BB;
     539             :   }
     540             : 
     541             :   /// Add an incoming value to the end of the PHI list
     542       99025 :   void addIncoming(MemoryAccess *V, BasicBlock *BB) {
     543       99025 :     if (getNumOperands() == ReservedSpace)
     544       53480 :       growOperands(); // Get more space!
     545             :     // Initialize some new operands.
     546       99025 :     setNumHungOffUseOperands(getNumOperands() + 1);
     547       99025 :     setIncomingValue(getNumOperands() - 1, V);
     548       99025 :     setIncomingBlock(getNumOperands() - 1, BB);
     549       99025 :   }
     550             : 
     551             :   /// Return the first index of the specified basic
     552             :   /// block in the value list for this PHI.  Returns -1 if no instance.
     553           5 :   int getBasicBlockIndex(const BasicBlock *BB) const {
     554          13 :     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
     555           9 :       if (block_begin()[I] == BB)
     556           5 :         return I;
     557             :     return -1;
     558             :   }
     559             : 
     560             :   MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
     561             :     int Idx = getBasicBlockIndex(BB);
     562             :     assert(Idx >= 0 && "Invalid basic block argument!");
     563             :     return getIncomingValue(Idx);
     564             :   }
     565             : 
     566             :   // After deleting incoming position I, the order of incoming may be changed.
     567           0 :   void unorderedDeleteIncoming(unsigned I) {
     568             :     unsigned E = getNumOperands();
     569             :     assert(I < E && "Cannot remove out of bounds Phi entry.");
     570             :     // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
     571             :     // itself should be deleted.
     572             :     assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
     573             :                      "at least 2 values.");
     574           0 :     setIncomingValue(I, getIncomingValue(E - 1));
     575           0 :     setIncomingBlock(I, block_begin()[E - 1]);
     576           0 :     setOperand(E - 1, nullptr);
     577           0 :     block_begin()[E - 1] = nullptr;
     578           0 :     setNumHungOffUseOperands(getNumOperands() - 1);
     579           0 :   }
     580             : 
     581             :   // After deleting incoming block BB, the incoming blocks order may be changed.
     582           0 :   void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
     583           0 :     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
     584           0 :       if (block_begin()[I] == BB) {
     585           0 :         unorderedDeleteIncoming(I);
     586             :         E = getNumOperands();
     587           0 :         --I;
     588             :       }
     589             :     assert(getNumOperands() >= 1 &&
     590             :            "Cannot remove all incoming blocks in a MemoryPhi.");
     591           0 :   }
     592             : 
     593             :   // After deleting incoming memory access MA, the incoming accesses order may
     594             :   // be changed.
     595             :   void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
     596             :     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
     597             :       if (getIncomingValue(I) == MA) {
     598             :         unorderedDeleteIncoming(I);
     599             :         E = getNumOperands();
     600             :         --I;
     601             :       }
     602             :     assert(getNumOperands() >= 1 &&
     603             :            "Cannot remove all incoming values in a MemoryPhi.");
     604             :   }
     605             : 
     606             :   static bool classof(const Value *V) {
     607     2564811 :     return V->getValueID() == MemoryPhiVal;
     608             :   }
     609             : 
     610             :   void print(raw_ostream &OS) const;
     611             : 
     612             :   unsigned getID() const { return ID; }
     613             : 
     614             : protected:
     615             :   friend class MemorySSA;
     616             : 
     617             :   /// this is more complicated than the generic
     618             :   /// User::allocHungoffUses, because we have to allocate Uses for the incoming
     619             :   /// values and pointers to the incoming blocks, all in one allocation.
     620             :   void allocHungoffUses(unsigned N) {
     621       44080 :     User::allocHungoffUses(N, /* IsPhi */ true);
     622             :   }
     623             : 
     624             : private:
     625             :   // For debugging only
     626             :   const unsigned ID;
     627             :   unsigned ReservedSpace;
     628             : 
     629             :   /// This grows the operand list in response to a push_back style of
     630             :   /// operation.  This grows the number of ops by 1.5 times.
     631       53480 :   void growOperands() {
     632             :     unsigned E = getNumOperands();
     633             :     // 2 op PHI nodes are VERY common, so reserve at least enough for that.
     634      106960 :     ReservedSpace = std::max(E + E / 2, 2u);
     635       53480 :     growHungoffUses(ReservedSpace, /* IsPhi */ true);
     636       53480 :   }
     637             : 
     638             :   static void deleteMe(DerivedUser *Self);
     639             : };
     640             : 
     641             : inline unsigned MemoryAccess::getID() const {
     642             :   assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
     643             :          "only memory defs and phis have ids");
     644             :   if (const auto *MD = dyn_cast<MemoryDef>(this))
     645      446525 :     return MD->getID();
     646      169090 :   return cast<MemoryPhi>(this)->getID();
     647             : }
     648             : 
     649      157843 : inline bool MemoryUseOrDef::isOptimized() const {
     650             :   if (const auto *MD = dyn_cast<MemoryDef>(this))
     651             :     return MD->isOptimized();
     652             :   return cast<MemoryUse>(this)->isOptimized();
     653             : }
     654             : 
     655             : inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
     656             :   if (const auto *MD = dyn_cast<MemoryDef>(this))
     657             :     return MD->getOptimized();
     658             :   return cast<MemoryUse>(this)->getOptimized();
     659             : }
     660             : 
     661      457221 : inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
     662             :   if (auto *MD = dyn_cast<MemoryDef>(this))
     663          28 :     MD->setOptimized(MA);
     664             :   else
     665             :     cast<MemoryUse>(this)->setOptimized(MA);
     666      457221 : }
     667             : 
     668             : inline void MemoryUseOrDef::resetOptimized() {
     669             :   if (auto *MD = dyn_cast<MemoryDef>(this))
     670             :     MD->resetOptimized();
     671             :   else
     672             :     cast<MemoryUse>(this)->resetOptimized();
     673             : }
     674             : 
     675             : template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
     676     1142194 : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
     677             : 
     678             : /// Encapsulates MemorySSA, including all data associated with memory
     679             : /// accesses.
     680             : class MemorySSA {
     681             : public:
     682             :   MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
     683             :   ~MemorySSA();
     684             : 
     685             :   MemorySSAWalker *getWalker();
     686             : 
     687             :   /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
     688             :   /// access associated with it. If passed a basic block gets the memory phi
     689             :   /// node that exists for that block, if there is one. Otherwise, this will get
     690             :   /// a MemoryUseOrDef.
     691             :   MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
     692             :   MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
     693             : 
     694             :   void dump() const;
     695             :   void print(raw_ostream &) const;
     696             : 
     697             :   /// Return true if \p MA represents the live on entry value
     698             :   ///
     699             :   /// Loads and stores from pointer arguments and other global values may be
     700             :   /// defined by memory operations that do not occur in the current function, so
     701             :   /// they may be live on entry to the function. MemorySSA represents such
     702             :   /// memory state by the live on entry definition, which is guaranteed to occur
     703             :   /// before any other memory access in the function.
     704             :   inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
     705          11 :     return MA == LiveOnEntryDef.get();
     706             :   }
     707             : 
     708             :   inline MemoryAccess *getLiveOnEntryDef() const {
     709             :     return LiveOnEntryDef.get();
     710             :   }
     711             : 
     712             :   // Sadly, iplists, by default, owns and deletes pointers added to the
     713             :   // list. It's not currently possible to have two iplists for the same type,
     714             :   // where one owns the pointers, and one does not. This is because the traits
     715             :   // are per-type, not per-tag.  If this ever changes, we should make the
     716             :   // DefList an iplist.
     717             :   using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
     718             :   using DefsList =
     719             :       simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
     720             : 
     721             :   /// Return the list of MemoryAccess's for a given basic block.
     722             :   ///
     723             :   /// This list is not modifiable by the user.
     724             :   const AccessList *getBlockAccesses(const BasicBlock *BB) const {
     725             :     return getWritableBlockAccesses(BB);
     726             :   }
     727             : 
     728             :   /// Return the list of MemoryDef's and MemoryPhi's for a given basic
     729             :   /// block.
     730             :   ///
     731             :   /// This list is not modifiable by the user.
     732             :   const DefsList *getBlockDefs(const BasicBlock *BB) const {
     733             :     return getWritableBlockDefs(BB);
     734             :   }
     735             : 
     736             :   /// Given two memory accesses in the same basic block, determine
     737             :   /// whether MemoryAccess \p A dominates MemoryAccess \p B.
     738             :   bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
     739             : 
     740             :   /// Given two memory accesses in potentially different blocks,
     741             :   /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
     742             :   bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
     743             : 
     744             :   /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
     745             :   /// dominates Use \p B.
     746             :   bool dominates(const MemoryAccess *A, const Use &B) const;
     747             : 
     748             :   /// Verify that MemorySSA is self consistent (IE definitions dominate
     749             :   /// all uses, uses appear in the right places).  This is used by unit tests.
     750             :   void verifyMemorySSA() const;
     751             : 
     752             :   /// Used in various insertion functions to specify whether we are talking
     753             :   /// about the beginning or end of a block.
     754             :   enum InsertionPlace { Beginning, End };
     755             : 
     756             : protected:
     757             :   // Used by Memory SSA annotater, dumpers, and wrapper pass
     758             :   friend class MemorySSAAnnotatedWriter;
     759             :   friend class MemorySSAPrinterLegacyPass;
     760             :   friend class MemorySSAUpdater;
     761             : 
     762             :   void verifyDefUses(Function &F) const;
     763             :   void verifyDomination(Function &F) const;
     764             :   void verifyOrdering(Function &F) const;
     765             :   void verifyDominationNumbers(const Function &F) const;
     766             : 
     767             :   // This is used by the use optimizer and updater.
     768             :   AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
     769      203245 :     auto It = PerBlockAccesses.find(BB);
     770      203245 :     return It == PerBlockAccesses.end() ? nullptr : It->second.get();
     771             :   }
     772             : 
     773             :   // This is used by the use optimizer and updater.
     774             :   DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
     775       90459 :     auto It = PerBlockDefs.find(BB);
     776       90459 :     return It == PerBlockDefs.end() ? nullptr : It->second.get();
     777             :   }
     778             : 
     779             :   // These is used by the updater to perform various internal MemorySSA
     780             :   // machinsations.  They do not always leave the IR in a correct state, and
     781             :   // relies on the updater to fixup what it breaks, so it is not public.
     782             : 
     783             :   void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
     784             :   void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
     785             : 
     786             :   // Rename the dominator tree branch rooted at BB.
     787           1 :   void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
     788             :                   SmallPtrSetImpl<BasicBlock *> &Visited) {
     789           2 :     renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
     790           1 :   }
     791             : 
     792             :   void removeFromLookups(MemoryAccess *);
     793             :   void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
     794             :   void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
     795             :                                InsertionPlace);
     796             :   void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
     797             :                              AccessList::iterator);
     798             :   MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
     799             : 
     800             : private:
     801             :   class CachingWalker;
     802             :   class OptimizeUses;
     803             : 
     804             :   CachingWalker *getWalkerImpl();
     805             :   void buildMemorySSA();
     806             :   void optimizeUses();
     807             : 
     808             :   void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
     809             : 
     810             :   using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
     811             :   using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
     812             : 
     813             :   void
     814             :   determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
     815             :   void markUnreachableAsLiveOnEntry(BasicBlock *BB);
     816             :   bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
     817             :   MemoryPhi *createMemoryPhi(BasicBlock *BB);
     818             :   MemoryUseOrDef *createNewAccess(Instruction *);
     819             :   MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
     820             :   void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
     821             :   MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
     822             :   void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
     823             :   void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
     824             :                   SmallPtrSetImpl<BasicBlock *> &Visited,
     825             :                   bool SkipVisited = false, bool RenameAllUses = false);
     826             :   AccessList *getOrCreateAccessList(const BasicBlock *);
     827             :   DefsList *getOrCreateDefsList(const BasicBlock *);
     828             :   void renumberBlock(const BasicBlock *) const;
     829             :   AliasAnalysis *AA;
     830             :   DominatorTree *DT;
     831             :   Function &F;
     832             : 
     833             :   // Memory SSA mappings
     834             :   DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
     835             : 
     836             :   // These two mappings contain the main block to access/def mappings for
     837             :   // MemorySSA. The list contained in PerBlockAccesses really owns all the
     838             :   // MemoryAccesses.
     839             :   // Both maps maintain the invariant that if a block is found in them, the
     840             :   // corresponding list is not empty, and if a block is not found in them, the
     841             :   // corresponding list is empty.
     842             :   AccessMap PerBlockAccesses;
     843             :   DefsMap PerBlockDefs;
     844             :   std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
     845             : 
     846             :   // Domination mappings
     847             :   // Note that the numbering is local to a block, even though the map is
     848             :   // global.
     849             :   mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
     850             :   mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
     851             : 
     852             :   // Memory SSA building info
     853             :   std::unique_ptr<CachingWalker> Walker;
     854             :   unsigned NextID;
     855             : };
     856             : 
     857             : // Internal MemorySSA utils, for use by MemorySSA classes and walkers
     858             : class MemorySSAUtil {
     859             : protected:
     860             :   friend class GVNHoist;
     861             :   friend class MemorySSAWalker;
     862             : 
     863             :   // This function should not be used by new passes.
     864             :   static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
     865             :                                   AliasAnalysis &AA);
     866             : };
     867             : 
     868             : // This pass does eager building and then printing of MemorySSA. It is used by
     869             : // the tests to be able to build, dump, and verify Memory SSA.
     870          40 : class MemorySSAPrinterLegacyPass : public FunctionPass {
     871             : public:
     872             :   MemorySSAPrinterLegacyPass();
     873             : 
     874             :   bool runOnFunction(Function &) override;
     875             :   void getAnalysisUsage(AnalysisUsage &AU) const override;
     876             : 
     877             :   static char ID;
     878             : };
     879             : 
     880             : /// An analysis that produces \c MemorySSA for a function.
     881             : ///
     882             : class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
     883             :   friend AnalysisInfoMixin<MemorySSAAnalysis>;
     884             : 
     885             :   static AnalysisKey Key;
     886             : 
     887             : public:
     888             :   // Wrap MemorySSA result to ensure address stability of internal MemorySSA
     889             :   // pointers after construction.  Use a wrapper class instead of plain
     890             :   // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
     891         543 :   struct Result {
     892             :     Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
     893             : 
     894             :     MemorySSA &getMSSA() { return *MSSA.get(); }
     895             : 
     896             :     std::unique_ptr<MemorySSA> MSSA;
     897             :   };
     898             : 
     899             :   Result run(Function &F, FunctionAnalysisManager &AM);
     900             : };
     901             : 
     902             : /// Printer pass for \c MemorySSA.
     903             : class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
     904             :   raw_ostream &OS;
     905             : 
     906             : public:
     907          20 :   explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
     908             : 
     909             :   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
     910             : };
     911             : 
     912             : /// Verifier pass for \c MemorySSA.
     913             : struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
     914             :   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
     915             : };
     916             : 
     917             : /// Legacy analysis pass which computes \c MemorySSA.
     918        2462 : class MemorySSAWrapperPass : public FunctionPass {
     919             : public:
     920             :   MemorySSAWrapperPass();
     921             : 
     922             :   static char ID;
     923             : 
     924             :   bool runOnFunction(Function &) override;
     925             :   void releaseMemory() override;
     926             :   MemorySSA &getMSSA() { return *MSSA; }
     927             :   const MemorySSA &getMSSA() const { return *MSSA; }
     928             : 
     929             :   void getAnalysisUsage(AnalysisUsage &AU) const override;
     930             : 
     931             :   void verifyAnalysis() const override;
     932             :   void print(raw_ostream &OS, const Module *M = nullptr) const override;
     933             : 
     934             : private:
     935             :   std::unique_ptr<MemorySSA> MSSA;
     936             : };
     937             : 
     938             : /// This is the generic walker interface for walkers of MemorySSA.
     939             : /// Walkers are used to be able to further disambiguate the def-use chains
     940             : /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
     941             : /// you.
     942             : /// In particular, while the def-use chains provide basic information, and are
     943             : /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
     944             : /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
     945             : /// information. In particular, they may want to use SCEV info to further
     946             : /// disambiguate memory accesses, or they may want the nearest dominating
     947             : /// may-aliasing MemoryDef for a call or a store. This API enables a
     948             : /// standardized interface to getting and using that info.
     949             : class MemorySSAWalker {
     950             : public:
     951             :   MemorySSAWalker(MemorySSA *);
     952             :   virtual ~MemorySSAWalker() = default;
     953             : 
     954             :   using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
     955             : 
     956             :   /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
     957             :   /// will give you the nearest dominating MemoryAccess that Mod's the location
     958             :   /// the instruction accesses (by skipping any def which AA can prove does not
     959             :   /// alias the location(s) accessed by the instruction given).
     960             :   ///
     961             :   /// Note that this will return a single access, and it must dominate the
     962             :   /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
     963             :   /// this will return the MemoryPhi, not the operand. This means that
     964             :   /// given:
     965             :   /// if (a) {
     966             :   ///   1 = MemoryDef(liveOnEntry)
     967             :   ///   store %a
     968             :   /// } else {
     969             :   ///   2 = MemoryDef(liveOnEntry)
     970             :   ///   store %b
     971             :   /// }
     972             :   /// 3 = MemoryPhi(2, 1)
     973             :   /// MemoryUse(3)
     974             :   /// load %a
     975             :   ///
     976             :   /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
     977             :   /// in the if (a) branch.
     978      157356 :   MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
     979      157356 :     MemoryAccess *MA = MSSA->getMemoryAccess(I);
     980             :     assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
     981      157356 :     return getClobberingMemoryAccess(MA);
     982             :   }
     983             : 
     984             :   /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
     985             :   /// but takes a MemoryAccess instead of an Instruction.
     986             :   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
     987             : 
     988             :   /// Given a potentially clobbering memory access and a new location,
     989             :   /// calling this will give you the nearest dominating clobbering MemoryAccess
     990             :   /// (by skipping non-aliasing def links).
     991             :   ///
     992             :   /// This version of the function is mainly used to disambiguate phi translated
     993             :   /// pointers, where the value of a pointer may have changed from the initial
     994             :   /// memory access. Note that this expects to be handed either a MemoryUse,
     995             :   /// or an already potentially clobbering access. Unlike the above API, if
     996             :   /// given a MemoryDef that clobbers the pointer as the starting access, it
     997             :   /// will return that MemoryDef, whereas the above would return the clobber
     998             :   /// starting from the use side of  the memory def.
     999             :   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
    1000             :                                                   const MemoryLocation &) = 0;
    1001             : 
    1002             :   /// Given a memory access, invalidate anything this walker knows about
    1003             :   /// that access.
    1004             :   /// This API is used by walkers that store information to perform basic cache
    1005             :   /// invalidation.  This will be called by MemorySSA at appropriate times for
    1006             :   /// the walker it uses or returns.
    1007           0 :   virtual void invalidateInfo(MemoryAccess *) {}
    1008             : 
    1009           0 :   virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
    1010             : 
    1011             : protected:
    1012             :   friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
    1013             :                           // constructor.
    1014             :   MemorySSA *MSSA;
    1015             : };
    1016             : 
    1017             : /// A MemorySSAWalker that does no alias queries, or anything else. It
    1018             : /// simply returns the links as they were constructed by the builder.
    1019           0 : class DoNothingMemorySSAWalker final : public MemorySSAWalker {
    1020             : public:
    1021             :   // Keep the overrides below from hiding the Instruction overload of
    1022             :   // getClobberingMemoryAccess.
    1023             :   using MemorySSAWalker::getClobberingMemoryAccess;
    1024             : 
    1025             :   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
    1026             :   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
    1027             :                                           const MemoryLocation &) override;
    1028             : };
    1029             : 
    1030             : using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
    1031             : using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
    1032             : 
    1033             : /// Iterator base class used to implement const and non-const iterators
    1034             : /// over the defining accesses of a MemoryAccess.
    1035             : template <class T>
    1036             : class memoryaccess_def_iterator_base
    1037             :     : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
    1038             :                                   std::forward_iterator_tag, T, ptrdiff_t, T *,
    1039             :                                   T *> {
    1040             :   using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
    1041             : 
    1042             : public:
    1043      116598 :   memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
    1044             :   memoryaccess_def_iterator_base() = default;
    1045             : 
    1046             :   bool operator==(const memoryaccess_def_iterator_base &Other) const {
    1047      613802 :     return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
    1048             :   }
    1049             : 
    1050             :   // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
    1051             :   // block from the operand in constant time (In a PHINode, the uselist has
    1052             :   // both, so it's just subtraction). We provide it as part of the
    1053             :   // iterator to avoid callers having to linear walk to get the block.
    1054             :   // If the operation becomes constant time on MemoryPHI's, this bit of
    1055             :   // abstraction breaking should be removed.
    1056      248040 :   BasicBlock *getPhiArgBlock() const {
    1057      248040 :     MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
    1058             :     assert(MP && "Tried to get phi arg block when not iterating over a PHI");
    1059      496080 :     return MP->getIncomingBlock(ArgNo);
    1060             :   }
    1061             : 
    1062      248602 :   typename BaseT::iterator::pointer operator*() const {
    1063             :     assert(Access && "Tried to access past the end of our iterator");
    1064             :     // Go to the first argument for phis, and the defining access for everything
    1065             :     // else.
    1066      248602 :     if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
    1067      248602 :       return MP->getIncomingValue(ArgNo);
    1068             :     return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
    1069             :   }
    1070             : 
    1071             :   using BaseT::operator++;
    1072             :   memoryaccess_def_iterator &operator++() {
    1073             :     assert(Access && "Hit end of iterator");
    1074      248602 :     if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
    1075      497204 :       if (++ArgNo >= MP->getNumIncomingValues()) {
    1076      116598 :         ArgNo = 0;
    1077      116598 :         Access = nullptr;
    1078             :       }
    1079             :     } else {
    1080           0 :       Access = nullptr;
    1081             :     }
    1082             :     return *this;
    1083             :   }
    1084             : 
    1085             : private:
    1086             :   T *Access = nullptr;
    1087             :   unsigned ArgNo = 0;
    1088             : };
    1089             : 
    1090             : inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
    1091             :   return memoryaccess_def_iterator(this);
    1092             : }
    1093             : 
    1094             : inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
    1095             :   return const_memoryaccess_def_iterator(this);
    1096             : }
    1097             : 
    1098             : inline memoryaccess_def_iterator MemoryAccess::defs_end() {
    1099             :   return memoryaccess_def_iterator();
    1100             : }
    1101             : 
    1102             : inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
    1103             :   return const_memoryaccess_def_iterator();
    1104             : }
    1105             : 
    1106             : /// GraphTraits for a MemoryAccess, which walks defs in the normal case,
    1107             : /// and uses in the inverse case.
    1108             : template <> struct GraphTraits<MemoryAccess *> {
    1109             :   using NodeRef = MemoryAccess *;
    1110             :   using ChildIteratorType = memoryaccess_def_iterator;
    1111             : 
    1112             :   static NodeRef getEntryNode(NodeRef N) { return N; }
    1113             :   static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
    1114             :   static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
    1115             : };
    1116             : 
    1117             : template <> struct GraphTraits<Inverse<MemoryAccess *>> {
    1118             :   using NodeRef = MemoryAccess *;
    1119             :   using ChildIteratorType = MemoryAccess::iterator;
    1120             : 
    1121             :   static NodeRef getEntryNode(NodeRef N) { return N; }
    1122             :   static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
    1123             :   static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
    1124             : };
    1125             : 
    1126             : /// Provide an iterator that walks defs, giving both the memory access,
    1127             : /// and the current pointer location, updating the pointer location as it
    1128             : /// changes due to phi node translation.
    1129             : ///
    1130             : /// This iterator, while somewhat specialized, is what most clients actually
    1131             : /// want when walking upwards through MemorySSA def chains. It takes a pair of
    1132             : /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
    1133             : /// memory location through phi nodes for the user.
    1134             : class upward_defs_iterator
    1135             :     : public iterator_facade_base<upward_defs_iterator,
    1136             :                                   std::forward_iterator_tag,
    1137             :                                   const MemoryAccessPair> {
    1138             :   using BaseT = upward_defs_iterator::iterator_facade_base;
    1139             : 
    1140             : public:
    1141      116598 :   upward_defs_iterator(const MemoryAccessPair &Info)
    1142      233196 :       : DefIterator(Info.first), Location(Info.second),
    1143      116598 :         OriginalAccess(Info.first) {
    1144             :     CurrentPair.first = nullptr;
    1145             : 
    1146      233196 :     WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
    1147      116598 :     fillInCurrentPair();
    1148      116598 :   }
    1149             : 
    1150             :   upward_defs_iterator() { CurrentPair.first = nullptr; }
    1151             : 
    1152             :   bool operator==(const upward_defs_iterator &Other) const {
    1153             :     return DefIterator == Other.DefIterator;
    1154             :   }
    1155             : 
    1156             :   BaseT::iterator::reference operator*() const {
    1157             :     assert(DefIterator != OriginalAccess->defs_end() &&
    1158             :            "Tried to access past the end of our iterator");
    1159             :     return CurrentPair;
    1160             :   }
    1161             : 
    1162             :   using BaseT::operator++;
    1163      248602 :   upward_defs_iterator &operator++() {
    1164             :     assert(DefIterator != OriginalAccess->defs_end() &&
    1165             :            "Tried to access past the end of the iterator");
    1166             :     ++DefIterator;
    1167             :     if (DefIterator != OriginalAccess->defs_end())
    1168      132004 :       fillInCurrentPair();
    1169      248602 :     return *this;
    1170             :   }
    1171             : 
    1172             :   BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
    1173             : 
    1174             : private:
    1175      248602 :   void fillInCurrentPair() {
    1176      248602 :     CurrentPair.first = *DefIterator;
    1177      248602 :     if (WalkingPhi && Location.Ptr) {
    1178             :       PHITransAddr Translator(
    1179      248040 :           const_cast<Value *>(Location.Ptr),
    1180      496080 :           OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
    1181      248040 :       if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
    1182             :                                         DefIterator.getPhiArgBlock(), nullptr,
    1183             :                                         false))
    1184           0 :         if (Translator.getAddr() != Location.Ptr) {
    1185           0 :           CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
    1186             :           return;
    1187             :         }
    1188             :     }
    1189      248602 :     CurrentPair.second = Location;
    1190             :   }
    1191             : 
    1192             :   MemoryAccessPair CurrentPair;
    1193             :   memoryaccess_def_iterator DefIterator;
    1194             :   MemoryLocation Location;
    1195             :   MemoryAccess *OriginalAccess = nullptr;
    1196             :   bool WalkingPhi = false;
    1197             : };
    1198             : 
    1199             : inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
    1200      116598 :   return upward_defs_iterator(Pair);
    1201             : }
    1202             : 
    1203             : inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
    1204             : 
    1205             : inline iterator_range<upward_defs_iterator>
    1206             : upward_defs(const MemoryAccessPair &Pair) {
    1207             :   return make_range(upward_defs_begin(Pair), upward_defs_end());
    1208             : }
    1209             : 
    1210             : /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
    1211             : /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
    1212             : /// comparing against a null def_chain_iterator, this will compare equal only
    1213             : /// after walking said Phi/liveOnEntry.
    1214             : ///
    1215             : /// The UseOptimizedChain flag specifies whether to walk the clobbering
    1216             : /// access chain, or all the accesses.
    1217             : ///
    1218             : /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
    1219             : /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
    1220             : /// a phi node.  The optimized chain walks the clobbering access of a store.
    1221             : /// So if you are just trying to find, given a store, what the next
    1222             : /// thing that would clobber the same memory is, you want the optimized chain.
    1223             : template <class T, bool UseOptimizedChain = false>
    1224             : struct def_chain_iterator
    1225             :     : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
    1226             :                                   std::forward_iterator_tag, MemoryAccess *> {
    1227             :   def_chain_iterator() : MA(nullptr) {}
    1228             :   def_chain_iterator(T MA) : MA(MA) {}
    1229             : 
    1230             :   T operator*() const { return MA; }
    1231             : 
    1232             :   def_chain_iterator &operator++() {
    1233             :     // N.B. liveOnEntry has a null defining access.
    1234             :     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
    1235             :       if (UseOptimizedChain && MUD->isOptimized())
    1236             :         MA = MUD->getOptimized();
    1237             :       else
    1238             :         MA = MUD->getDefiningAccess();
    1239             :     } else {
    1240             :       MA = nullptr;
    1241             :     }
    1242             : 
    1243             :     return *this;
    1244             :   }
    1245             : 
    1246             :   bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
    1247             : 
    1248             : private:
    1249             :   T MA;
    1250             : };
    1251             : 
    1252             : template <class T>
    1253             : inline iterator_range<def_chain_iterator<T>>
    1254             : def_chain(T MA, MemoryAccess *UpTo = nullptr) {
    1255             : #ifdef EXPENSIVE_CHECKS
    1256             :   assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
    1257             :          "UpTo isn't in the def chain!");
    1258             : #endif
    1259             :   return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
    1260             : }
    1261             : 
    1262             : template <class T>
    1263             : inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
    1264             :   return make_range(def_chain_iterator<T, true>(MA),
    1265             :                     def_chain_iterator<T, true>(nullptr));
    1266             : }
    1267             : 
    1268             : } // end namespace llvm
    1269             : 
    1270             : #endif // LLVM_ANALYSIS_MEMORYSSA_H

Generated by: LCOV version 1.13