LCOV - code coverage report
Current view: top level - include/llvm/Analysis - MemorySSA.h (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 119 126 94.4 %
Date: 2018-05-20 00:06:23 Functions: 20 26 76.9 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : /// \file
      11             : /// This file exposes an interface to building/using memory SSA to
      12             : /// walk memory instructions using a use/def graph.
      13             : ///
      14             : /// Memory SSA class builds an SSA form that links together memory access
      15             : /// instructions such as loads, stores, atomics, and calls. Additionally, it
      16             : /// does a trivial form of "heap versioning" Every time the memory state changes
      17             : /// in the program, we generate a new heap version. It generates
      18             : /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
      19             : ///
      20             : /// As a trivial example,
      21             : /// define i32 @main() #0 {
      22             : /// entry:
      23             : ///   %call = call noalias i8* @_Znwm(i64 4) #2
      24             : ///   %0 = bitcast i8* %call to i32*
      25             : ///   %call1 = call noalias i8* @_Znwm(i64 4) #2
      26             : ///   %1 = bitcast i8* %call1 to i32*
      27             : ///   store i32 5, i32* %0, align 4
      28             : ///   store i32 7, i32* %1, align 4
      29             : ///   %2 = load i32* %0, align 4
      30             : ///   %3 = load i32* %1, align 4
      31             : ///   %add = add nsw i32 %2, %3
      32             : ///   ret i32 %add
      33             : /// }
      34             : ///
      35             : /// Will become
      36             : /// define i32 @main() #0 {
      37             : /// entry:
      38             : ///   ; 1 = MemoryDef(0)
      39             : ///   %call = call noalias i8* @_Znwm(i64 4) #3
      40             : ///   %2 = bitcast i8* %call to i32*
      41             : ///   ; 2 = MemoryDef(1)
      42             : ///   %call1 = call noalias i8* @_Znwm(i64 4) #3
      43             : ///   %4 = bitcast i8* %call1 to i32*
      44             : ///   ; 3 = MemoryDef(2)
      45             : ///   store i32 5, i32* %2, align 4
      46             : ///   ; 4 = MemoryDef(3)
      47             : ///   store i32 7, i32* %4, align 4
      48             : ///   ; MemoryUse(3)
      49             : ///   %7 = load i32* %2, align 4
      50             : ///   ; MemoryUse(4)
      51             : ///   %8 = load i32* %4, align 4
      52             : ///   %add = add nsw i32 %7, %8
      53             : ///   ret i32 %add
      54             : /// }
      55             : ///
      56             : /// Given this form, all the stores that could ever effect the load at %8 can be
      57             : /// gotten by using the MemoryUse associated with it, and walking from use to
      58             : /// def until you hit the top of the function.
      59             : ///
      60             : /// Each def also has a list of users associated with it, so you can walk from
      61             : /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
      62             : /// but not the RHS of MemoryDefs. You can see this above at %7, which would
      63             : /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
      64             : /// store, all the MemoryUses on its use lists are may-aliases of that store
      65             : /// (but the MemoryDefs on its use list may not be).
      66             : ///
      67             : /// MemoryDefs are not disambiguated because it would require multiple reaching
      68             : /// definitions, which would require multiple phis, and multiple memoryaccesses
      69             : /// per instruction.
      70             : //
      71             : //===----------------------------------------------------------------------===//
      72             : 
      73             : #ifndef LLVM_ANALYSIS_MEMORYSSA_H
      74             : #define LLVM_ANALYSIS_MEMORYSSA_H
      75             : 
      76             : #include "llvm/ADT/DenseMap.h"
      77             : #include "llvm/ADT/GraphTraits.h"
      78             : #include "llvm/ADT/SmallPtrSet.h"
      79             : #include "llvm/ADT/SmallVector.h"
      80             : #include "llvm/ADT/ilist.h"
      81             : #include "llvm/ADT/ilist_node.h"
      82             : #include "llvm/ADT/iterator.h"
      83             : #include "llvm/ADT/iterator_range.h"
      84             : #include "llvm/ADT/simple_ilist.h"
      85             : #include "llvm/Analysis/AliasAnalysis.h"
      86             : #include "llvm/Analysis/MemoryLocation.h"
      87             : #include "llvm/Analysis/PHITransAddr.h"
      88             : #include "llvm/IR/BasicBlock.h"
      89             : #include "llvm/IR/DerivedUser.h"
      90             : #include "llvm/IR/Dominators.h"
      91             : #include "llvm/IR/Module.h"
      92             : #include "llvm/IR/Type.h"
      93             : #include "llvm/IR/Use.h"
      94             : #include "llvm/IR/User.h"
      95             : #include "llvm/IR/Value.h"
      96             : #include "llvm/IR/ValueHandle.h"
      97             : #include "llvm/Pass.h"
      98             : #include "llvm/Support/Casting.h"
      99             : #include <algorithm>
     100             : #include <cassert>
     101             : #include <cstddef>
     102             : #include <iterator>
     103             : #include <memory>
     104             : #include <utility>
     105             : 
     106             : namespace llvm {
     107             : 
     108             : class Function;
     109             : class Instruction;
     110             : class MemoryAccess;
     111             : class MemorySSAWalker;
     112             : class LLVMContext;
     113             : class raw_ostream;
     114             : 
     115             : namespace MSSAHelpers {
     116             : 
     117             : struct AllAccessTag {};
     118             : struct DefsOnlyTag {};
     119             : 
     120             : } // end namespace MSSAHelpers
     121             : 
     122             : enum : unsigned {
     123             :   // Used to signify what the default invalid ID is for MemoryAccess's
     124             :   // getID()
     125             :   INVALID_MEMORYACCESS_ID = -1U
     126             : };
     127             : 
     128             : template <class T> class memoryaccess_def_iterator_base;
     129             : using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
     130             : using const_memoryaccess_def_iterator =
     131             :     memoryaccess_def_iterator_base<const MemoryAccess>;
     132             : 
     133             : // The base for all memory accesses. All memory accesses in a block are
     134             : // linked together using an intrusive list.
     135             : class MemoryAccess
     136             :     : public DerivedUser,
     137             :       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
     138             :       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
     139             : public:
     140             :   using AllAccessType =
     141             :       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
     142             :   using DefsOnlyType =
     143             :       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
     144             : 
     145             :   MemoryAccess(const MemoryAccess &) = delete;
     146             :   MemoryAccess &operator=(const MemoryAccess &) = delete;
     147             : 
     148             :   void *operator new(size_t) = delete;
     149             : 
     150             :   // Methods for support type inquiry through isa, cast, and
     151             :   // dyn_cast
     152             :   static bool classof(const Value *V) {
     153             :     unsigned ID = V->getValueID();
     154             :     return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
     155             :   }
     156             : 
     157             :   BasicBlock *getBlock() const { return Block; }
     158             : 
     159             :   void print(raw_ostream &OS) const;
     160             :   void dump() const;
     161             : 
     162             :   /// The user iterators for a memory access
     163             :   using iterator = user_iterator;
     164             :   using const_iterator = const_user_iterator;
     165             : 
     166             :   /// This iterator walks over all of the defs in a given
     167             :   /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
     168             :   /// MemoryUse/MemoryDef, this walks the defining access.
     169             :   memoryaccess_def_iterator defs_begin();
     170             :   const_memoryaccess_def_iterator defs_begin() const;
     171             :   memoryaccess_def_iterator defs_end();
     172             :   const_memoryaccess_def_iterator defs_end() const;
     173             : 
     174             :   /// Get the iterators for the all access list and the defs only list
     175             :   /// We default to the all access list.
     176             :   AllAccessType::self_iterator getIterator() {
     177           6 :     return this->AllAccessType::getIterator();
     178             :   }
     179             :   AllAccessType::const_self_iterator getIterator() const {
     180             :     return this->AllAccessType::getIterator();
     181             :   }
     182             :   AllAccessType::reverse_self_iterator getReverseIterator() {
     183          19 :     return this->AllAccessType::getReverseIterator();
     184             :   }
     185             :   AllAccessType::const_reverse_self_iterator getReverseIterator() const {
     186             :     return this->AllAccessType::getReverseIterator();
     187             :   }
     188             :   DefsOnlyType::self_iterator getDefsIterator() {
     189          10 :     return this->DefsOnlyType::getIterator();
     190             :   }
     191             :   DefsOnlyType::const_self_iterator getDefsIterator() const {
     192             :     return this->DefsOnlyType::getIterator();
     193             :   }
     194             :   DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
     195          28 :     return this->DefsOnlyType::getReverseIterator();
     196             :   }
     197             :   DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
     198             :     return this->DefsOnlyType::getReverseIterator();
     199             :   }
     200             : 
     201             : protected:
     202             :   friend class MemoryDef;
     203             :   friend class MemoryPhi;
     204             :   friend class MemorySSA;
     205             :   friend class MemoryUse;
     206             :   friend class MemoryUseOrDef;
     207             : 
     208             :   /// Used by MemorySSA to change the block of a MemoryAccess when it is
     209             :   /// moved.
     210          58 :   void setBlock(BasicBlock *BB) { Block = BB; }
     211             : 
     212             :   /// Used for debugging and tracking things about MemoryAccesses.
     213             :   /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
     214             :   inline unsigned getID() const;
     215             : 
     216             :   MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
     217             :                BasicBlock *BB, unsigned NumOperands)
     218     1351163 :       : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
     219     2702326 :         Block(BB) {}
     220             : 
     221             :   // Use deleteValue() to delete a generic MemoryAccess.
     222             :   ~MemoryAccess() = default;
     223             : 
     224             : private:
     225             :   BasicBlock *Block;
     226             : };
     227             : 
     228             : template <>
     229             : struct ilist_alloc_traits<MemoryAccess> {
     230     1316367 :   static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
     231             : };
     232             : 
     233             : inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
     234         462 :   MA.print(OS);
     235             :   return OS;
     236             : }
     237             : 
     238             : /// Class that has the common methods + fields of memory uses/defs. It's
     239             : /// a little awkward to have, but there are many cases where we want either a
     240             : /// use or def, and there are many cases where uses are needed (defs aren't
     241             : /// acceptable), and vice-versa.
     242             : ///
     243             : /// This class should never be instantiated directly; make a MemoryUse or
     244             : /// MemoryDef instead.
     245             : class MemoryUseOrDef : public MemoryAccess {
     246             : public:
     247             :   void *operator new(size_t) = delete;
     248             : 
     249             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     250             : 
     251             :   /// Get the instruction that this MemoryUse represents.
     252             :   Instruction *getMemoryInst() const { return MemoryInstruction; }
     253             : 
     254             :   /// Get the access that produces the memory state used by this Use.
     255             :   MemoryAccess *getDefiningAccess() const { return getOperand(0); }
     256             : 
     257             :   static bool classof(const Value *MA) {
     258     4694314 :     return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
     259             :   }
     260             : 
     261             :   // Sadly, these have to be public because they are needed in some of the
     262             :   // iterators.
     263             :   inline bool isOptimized() const;
     264             :   inline MemoryAccess *getOptimized() const;
     265             :   inline void setOptimized(MemoryAccess *);
     266             : 
     267             :   // Retrieve AliasResult type of the optimized access. Ideally this would be
     268             :   // returned by the caching walker and may go away in the future.
     269             :   Optional<AliasResult> getOptimizedAccessType() const {
     270             :     return OptimizedAccessAlias;
     271             :   }
     272             : 
     273             :   /// Reset the ID of what this MemoryUse was optimized to, causing it to
     274             :   /// be rewalked by the walker if necessary.
     275             :   /// This really should only be called by tests.
     276             :   inline void resetOptimized();
     277             : 
     278             : protected:
     279             :   friend class MemorySSA;
     280             :   friend class MemorySSAUpdater;
     281             : 
     282     1308115 :   MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
     283             :                  DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
     284     1308115 :       : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInstruction(MI),
     285     1308115 :         OptimizedAccessAlias(MayAlias) {
     286     1308115 :     setDefiningAccess(DMA);
     287     1308115 :   }
     288             : 
     289             :   // Use deleteValue() to delete a generic MemoryUseOrDef.
     290             :   ~MemoryUseOrDef() = default;
     291             : 
     292             :   void setOptimizedAccessType(Optional<AliasResult> AR) {
     293             :     OptimizedAccessAlias = AR;
     294             :   }
     295             : 
     296     2946820 :   void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false,
     297             :                          Optional<AliasResult> AR = MayAlias) {
     298     2946820 :     if (!Optimized) {
     299             :       setOperand(0, DMA);
     300             :       return;
     301             :     }
     302      345711 :     setOptimized(DMA);
     303             :     setOptimizedAccessType(AR);
     304             :   }
     305             : 
     306             : private:
     307             :   Instruction *MemoryInstruction;
     308             :   Optional<AliasResult> OptimizedAccessAlias;
     309             : };
     310             : 
     311             : template <>
     312             : struct OperandTraits<MemoryUseOrDef>
     313             :     : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
     314     4921210 : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
     315             : 
     316             : /// Represents read-only accesses to memory
     317             : ///
     318             : /// In particular, the set of Instructions that will be represented by
     319             : /// MemoryUse's is exactly the set of Instructions for which
     320             : /// AliasAnalysis::getModRefInfo returns "Ref".
     321      449294 : class MemoryUse final : public MemoryUseOrDef {
     322             : public:
     323             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     324             : 
     325             :   MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
     326      449294 :       : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
     327             : 
     328             :   // allocate space for exactly one operand
     329      449294 :   void *operator new(size_t s) { return User::operator new(s, 1); }
     330             : 
     331             :   static bool classof(const Value *MA) {
     332     1450100 :     return MA->getValueID() == MemoryUseVal;
     333             :   }
     334             : 
     335             :   void print(raw_ostream &OS) const;
     336             : 
     337             :   void setOptimized(MemoryAccess *DMA) {
     338      442561 :     OptimizedID = DMA->getID();
     339             :     setOperand(0, DMA);
     340             :   }
     341             : 
     342             :   bool isOptimized() const {
     343      308752 :     return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
     344             :   }
     345             : 
     346             :   MemoryAccess *getOptimized() const {
     347             :     return getDefiningAccess();
     348             :   }
     349             : 
     350             :   void resetOptimized() {
     351           9 :     OptimizedID = INVALID_MEMORYACCESS_ID;
     352             :   }
     353             : 
     354             : protected:
     355             :   friend class MemorySSA;
     356             : 
     357             : private:
     358             :   static void deleteMe(DerivedUser *Self);
     359             : 
     360             :   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
     361             : };
     362             : 
     363             : template <>
     364             : struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
     365      442561 : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
     366             : 
     367             : /// Represents a read-write access to memory, whether it is a must-alias,
     368             : /// or a may-alias.
     369             : ///
     370             : /// In particular, the set of Instructions that will be represented by
     371             : /// MemoryDef's is exactly the set of Instructions for which
     372             : /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
     373             : /// Note that, in order to provide def-def chains, all defs also have a use
     374             : /// associated with them. This use points to the nearest reaching
     375             : /// MemoryDef/MemoryPhi.
     376     1717642 : class MemoryDef final : public MemoryUseOrDef {
     377             : public:
     378             :   friend class MemorySSA;
     379             : 
     380             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     381             : 
     382             :   MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
     383             :             unsigned Ver)
     384      858821 :       : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
     385             : 
     386             :   // allocate space for exactly one operand
     387      858821 :   void *operator new(size_t s) { return User::operator new(s, 1); }
     388             : 
     389             :   static bool classof(const Value *MA) {
     390     4679169 :     return MA->getValueID() == MemoryDefVal;
     391             :   }
     392             : 
     393          28 :   void setOptimized(MemoryAccess *MA) {
     394             :     Optimized = MA;
     395          28 :     OptimizedID = getDefiningAccess()->getID();
     396          28 :   }
     397             : 
     398             :   MemoryAccess *getOptimized() const {
     399             :     return cast_or_null<MemoryAccess>(Optimized);
     400             :   }
     401             : 
     402             :   bool isOptimized() const {
     403          14 :     return getOptimized() && getDefiningAccess() &&
     404          14 :            OptimizedID == getDefiningAccess()->getID();
     405             :   }
     406             : 
     407             :   void resetOptimized() {
     408         640 :     OptimizedID = INVALID_MEMORYACCESS_ID;
     409             :   }
     410             : 
     411             :   void print(raw_ostream &OS) const;
     412             : 
     413             :   unsigned getID() const { return ID; }
     414             : 
     415             : private:
     416             :   static void deleteMe(DerivedUser *Self);
     417             : 
     418             :   const unsigned ID;
     419             :   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
     420             :   WeakVH Optimized;
     421             : };
     422             : 
     423             : template <>
     424             : struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
     425             : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
     426             : 
     427             : /// Represents phi nodes for memory accesses.
     428             : ///
     429             : /// These have the same semantic as regular phi nodes, with the exception that
     430             : /// only one phi will ever exist in a given basic block.
     431             : /// Guaranteeing one phi per block means guaranteeing there is only ever one
     432             : /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
     433             : /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
     434             : /// a MemoryPhi's operands.
     435             : /// That is, given
     436             : /// if (a) {
     437             : ///   store %a
     438             : ///   store %b
     439             : /// }
     440             : /// it *must* be transformed into
     441             : /// if (a) {
     442             : ///    1 = MemoryDef(liveOnEntry)
     443             : ///    store %a
     444             : ///    2 = MemoryDef(1)
     445             : ///    store %b
     446             : /// }
     447             : /// and *not*
     448             : /// if (a) {
     449             : ///    1 = MemoryDef(liveOnEntry)
     450             : ///    store %a
     451             : ///    2 = MemoryDef(liveOnEntry)
     452             : ///    store %b
     453             : /// }
     454             : /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
     455             : /// end of the branch, and if there are not two phi nodes, one will be
     456             : /// disconnected completely from the SSA graph below that point.
     457             : /// Because MemoryUse's do not generate new definitions, they do not have this
     458             : /// issue.
     459       43048 : class MemoryPhi final : public MemoryAccess {
     460             :   // allocate space for exactly zero operands
     461       43048 :   void *operator new(size_t s) { return User::operator new(s); }
     462             : 
     463             : public:
     464             :   /// Provide fast operand accessors
     465             :   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
     466             : 
     467       43048 :   MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
     468       43048 :       : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
     469       43048 :         ReservedSpace(NumPreds) {
     470             :     allocHungoffUses(ReservedSpace);
     471       43048 :   }
     472             : 
     473             :   // Block iterator interface. This provides access to the list of incoming
     474             :   // basic blocks, which parallels the list of incoming values.
     475             :   using block_iterator = BasicBlock **;
     476             :   using const_block_iterator = BasicBlock *const *;
     477             : 
     478             :   block_iterator block_begin() {
     479       96762 :     auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
     480           0 :     return reinterpret_cast<block_iterator>(Ref + 1);
     481             :   }
     482             : 
     483             :   const_block_iterator block_begin() const {
     484             :     const auto *Ref =
     485      244718 :         reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
     486             :     return reinterpret_cast<const_block_iterator>(Ref + 1);
     487             :   }
     488             : 
     489           6 :   block_iterator block_end() { return block_begin() + getNumOperands(); }
     490             : 
     491             :   const_block_iterator block_end() const {
     492             :     return block_begin() + getNumOperands();
     493             :   }
     494             : 
     495             :   iterator_range<block_iterator> blocks() {
     496             :     return make_range(block_begin(), block_end());
     497             :   }
     498             : 
     499             :   iterator_range<const_block_iterator> blocks() const {
     500             :     return make_range(block_begin(), block_end());
     501             :   }
     502             : 
     503          12 :   op_range incoming_values() { return operands(); }
     504             : 
     505             :   const_op_range incoming_values() const { return operands(); }
     506             : 
     507             :   /// Return the number of incoming edges
     508             :   unsigned getNumIncomingValues() const { return getNumOperands(); }
     509             : 
     510             :   /// Return incoming value number x
     511             :   MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
     512             :   void setIncomingValue(unsigned I, MemoryAccess *V) {
     513             :     assert(V && "PHI node got a null value!");
     514       96758 :     setOperand(I, V);
     515             :   }
     516             : 
     517             :   static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
     518             :   static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
     519             : 
     520             :   /// Return incoming basic block number @p i.
     521      244709 :   BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
     522             : 
     523             :   /// Return incoming basic block corresponding
     524             :   /// to an operand of the PHI.
     525             :   BasicBlock *getIncomingBlock(const Use &U) const {
     526             :     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
     527        1126 :     return getIncomingBlock(unsigned(&U - op_begin()));
     528             :   }
     529             : 
     530             :   /// Return incoming basic block corresponding
     531             :   /// to value use iterator.
     532             :   BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
     533             :     return getIncomingBlock(I.getUse());
     534             :   }
     535             : 
     536             :   void setIncomingBlock(unsigned I, BasicBlock *BB) {
     537             :     assert(BB && "PHI node got a null basic block!");
     538       96753 :     block_begin()[I] = BB;
     539             :   }
     540             : 
     541             :   /// Add an incoming value to the end of the PHI list
     542       96753 :   void addIncoming(MemoryAccess *V, BasicBlock *BB) {
     543       96753 :     if (getNumOperands() == ReservedSpace)
     544       52268 :       growOperands(); // Get more space!
     545             :     // Initialize some new operands.
     546       96753 :     setNumHungOffUseOperands(getNumOperands() + 1);
     547       96753 :     setIncomingValue(getNumOperands() - 1, V);
     548       96753 :     setIncomingBlock(getNumOperands() - 1, BB);
     549       96753 :   }
     550             : 
     551             :   /// Return the first index of the specified basic
     552             :   /// block in the value list for this PHI.  Returns -1 if no instance.
     553           5 :   int getBasicBlockIndex(const BasicBlock *BB) const {
     554          13 :     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
     555           9 :       if (block_begin()[I] == BB)
     556           5 :         return I;
     557             :     return -1;
     558             :   }
     559             : 
     560             :   MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
     561             :     int Idx = getBasicBlockIndex(BB);
     562             :     assert(Idx >= 0 && "Invalid basic block argument!");
     563             :     return getIncomingValue(Idx);
     564             :   }
     565             : 
     566             :   static bool classof(const Value *V) {
     567     2502569 :     return V->getValueID() == MemoryPhiVal;
     568             :   }
     569             : 
     570             :   void print(raw_ostream &OS) const;
     571             : 
     572             :   unsigned getID() const { return ID; }
     573             : 
     574             : protected:
     575             :   friend class MemorySSA;
     576             : 
     577             :   /// this is more complicated than the generic
     578             :   /// User::allocHungoffUses, because we have to allocate Uses for the incoming
     579             :   /// values and pointers to the incoming blocks, all in one allocation.
     580             :   void allocHungoffUses(unsigned N) {
     581       43048 :     User::allocHungoffUses(N, /* IsPhi */ true);
     582             :   }
     583             : 
     584             : private:
     585             :   // For debugging only
     586             :   const unsigned ID;
     587             :   unsigned ReservedSpace;
     588             : 
     589             :   /// This grows the operand list in response to a push_back style of
     590             :   /// operation.  This grows the number of ops by 1.5 times.
     591       52268 :   void growOperands() {
     592             :     unsigned E = getNumOperands();
     593             :     // 2 op PHI nodes are VERY common, so reserve at least enough for that.
     594      104536 :     ReservedSpace = std::max(E + E / 2, 2u);
     595       52268 :     growHungoffUses(ReservedSpace, /* IsPhi */ true);
     596       52268 :   }
     597             : 
     598             :   static void deleteMe(DerivedUser *Self);
     599             : };
     600             : 
     601             : inline unsigned MemoryAccess::getID() const {
     602             :   assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
     603             :          "only memory defs and phis have ids");
     604             :   if (const auto *MD = dyn_cast<MemoryDef>(this))
     605      432790 :     return MD->getID();
     606      164741 :   return cast<MemoryPhi>(this)->getID();
     607             : }
     608             : 
     609      154405 : inline bool MemoryUseOrDef::isOptimized() const {
     610             :   if (const auto *MD = dyn_cast<MemoryDef>(this))
     611             :     return MD->isOptimized();
     612             :   return cast<MemoryUse>(this)->isOptimized();
     613             : }
     614             : 
     615             : inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
     616             :   if (const auto *MD = dyn_cast<MemoryDef>(this))
     617             :     return MD->getOptimized();
     618             :   return cast<MemoryUse>(this)->getOptimized();
     619             : }
     620             : 
     621      442589 : inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
     622             :   if (auto *MD = dyn_cast<MemoryDef>(this))
     623          28 :     MD->setOptimized(MA);
     624             :   else
     625             :     cast<MemoryUse>(this)->setOptimized(MA);
     626      442589 : }
     627             : 
     628             : inline void MemoryUseOrDef::resetOptimized() {
     629             :   if (auto *MD = dyn_cast<MemoryDef>(this))
     630             :     MD->resetOptimized();
     631             :   else
     632             :     cast<MemoryUse>(this)->resetOptimized();
     633             : }
     634             : 
     635             : template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
     636     1121666 : DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
     637             : 
     638             : /// Encapsulates MemorySSA, including all data associated with memory
     639             : /// accesses.
     640             : class MemorySSA {
     641             : public:
     642             :   MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
     643             :   ~MemorySSA();
     644             : 
     645             :   MemorySSAWalker *getWalker();
     646             : 
     647             :   /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
     648             :   /// access associated with it. If passed a basic block gets the memory phi
     649             :   /// node that exists for that block, if there is one. Otherwise, this will get
     650             :   /// a MemoryUseOrDef.
     651             :   MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
     652             :   MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
     653             : 
     654             :   void dump() const;
     655             :   void print(raw_ostream &) const;
     656             : 
     657             :   /// Return true if \p MA represents the live on entry value
     658             :   ///
     659             :   /// Loads and stores from pointer arguments and other global values may be
     660             :   /// defined by memory operations that do not occur in the current function, so
     661             :   /// they may be live on entry to the function. MemorySSA represents such
     662             :   /// memory state by the live on entry definition, which is guaranteed to occur
     663             :   /// before any other memory access in the function.
     664             :   inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
     665          11 :     return MA == LiveOnEntryDef.get();
     666             :   }
     667             : 
     668             :   inline MemoryAccess *getLiveOnEntryDef() const {
     669             :     return LiveOnEntryDef.get();
     670             :   }
     671             : 
     672             :   // Sadly, iplists, by default, owns and deletes pointers added to the
     673             :   // list. It's not currently possible to have two iplists for the same type,
     674             :   // where one owns the pointers, and one does not. This is because the traits
     675             :   // are per-type, not per-tag.  If this ever changes, we should make the
     676             :   // DefList an iplist.
     677             :   using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
     678             :   using DefsList =
     679             :       simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
     680             : 
     681             :   /// Return the list of MemoryAccess's for a given basic block.
     682             :   ///
     683             :   /// This list is not modifiable by the user.
     684             :   const AccessList *getBlockAccesses(const BasicBlock *BB) const {
     685             :     return getWritableBlockAccesses(BB);
     686             :   }
     687             : 
     688             :   /// Return the list of MemoryDef's and MemoryPhi's for a given basic
     689             :   /// block.
     690             :   ///
     691             :   /// This list is not modifiable by the user.
     692             :   const DefsList *getBlockDefs(const BasicBlock *BB) const {
     693             :     return getWritableBlockDefs(BB);
     694             :   }
     695             : 
     696             :   /// Given two memory accesses in the same basic block, determine
     697             :   /// whether MemoryAccess \p A dominates MemoryAccess \p B.
     698             :   bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
     699             : 
     700             :   /// Given two memory accesses in potentially different blocks,
     701             :   /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
     702             :   bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
     703             : 
     704             :   /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
     705             :   /// dominates Use \p B.
     706             :   bool dominates(const MemoryAccess *A, const Use &B) const;
     707             : 
     708             :   /// Verify that MemorySSA is self consistent (IE definitions dominate
     709             :   /// all uses, uses appear in the right places).  This is used by unit tests.
     710             :   void verifyMemorySSA() const;
     711             : 
     712             :   /// Used in various insertion functions to specify whether we are talking
     713             :   /// about the beginning or end of a block.
     714             :   enum InsertionPlace { Beginning, End };
     715             : 
     716             : protected:
     717             :   // Used by Memory SSA annotater, dumpers, and wrapper pass
     718             :   friend class MemorySSAAnnotatedWriter;
     719             :   friend class MemorySSAPrinterLegacyPass;
     720             :   friend class MemorySSAUpdater;
     721             : 
     722             :   void verifyDefUses(Function &F) const;
     723             :   void verifyDomination(Function &F) const;
     724             :   void verifyOrdering(Function &F) const;
     725             : 
     726             :   // This is used by the use optimizer and updater.
     727             :   AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
     728      198029 :     auto It = PerBlockAccesses.find(BB);
     729      198029 :     return It == PerBlockAccesses.end() ? nullptr : It->second.get();
     730             :   }
     731             : 
     732             :   // This is used by the use optimizer and updater.
     733             :   DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
     734       88615 :     auto It = PerBlockDefs.find(BB);
     735       88615 :     return It == PerBlockDefs.end() ? nullptr : It->second.get();
     736             :   }
     737             : 
     738             :   // These is used by the updater to perform various internal MemorySSA
     739             :   // machinsations.  They do not always leave the IR in a correct state, and
     740             :   // relies on the updater to fixup what it breaks, so it is not public.
     741             : 
     742             :   void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
     743             :   void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
     744             : 
     745             :   // Rename the dominator tree branch rooted at BB.
     746           1 :   void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
     747             :                   SmallPtrSetImpl<BasicBlock *> &Visited) {
     748           2 :     renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
     749           1 :   }
     750             : 
     751             :   void removeFromLookups(MemoryAccess *);
     752             :   void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
     753             :   void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
     754             :                                InsertionPlace);
     755             :   void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
     756             :                              AccessList::iterator);
     757             :   MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
     758             : 
     759             : private:
     760             :   class CachingWalker;
     761             :   class OptimizeUses;
     762             : 
     763             :   CachingWalker *getWalkerImpl();
     764             :   void buildMemorySSA();
     765             :   void optimizeUses();
     766             : 
     767             :   void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
     768             : 
     769             :   using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
     770             :   using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
     771             : 
     772             :   void
     773             :   determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
     774             :   void markUnreachableAsLiveOnEntry(BasicBlock *BB);
     775             :   bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
     776             :   MemoryPhi *createMemoryPhi(BasicBlock *BB);
     777             :   MemoryUseOrDef *createNewAccess(Instruction *);
     778             :   MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
     779             :   void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
     780             :   MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
     781             :   void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
     782             :   void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
     783             :                   SmallPtrSetImpl<BasicBlock *> &Visited,
     784             :                   bool SkipVisited = false, bool RenameAllUses = false);
     785             :   AccessList *getOrCreateAccessList(const BasicBlock *);
     786             :   DefsList *getOrCreateDefsList(const BasicBlock *);
     787             :   void renumberBlock(const BasicBlock *) const;
     788             :   AliasAnalysis *AA;
     789             :   DominatorTree *DT;
     790             :   Function &F;
     791             : 
     792             :   // Memory SSA mappings
     793             :   DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
     794             : 
     795             :   // These two mappings contain the main block to access/def mappings for
     796             :   // MemorySSA. The list contained in PerBlockAccesses really owns all the
     797             :   // MemoryAccesses.
     798             :   // Both maps maintain the invariant that if a block is found in them, the
     799             :   // corresponding list is not empty, and if a block is not found in them, the
     800             :   // corresponding list is empty.
     801             :   AccessMap PerBlockAccesses;
     802             :   DefsMap PerBlockDefs;
     803             :   std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
     804             : 
     805             :   // Domination mappings
     806             :   // Note that the numbering is local to a block, even though the map is
     807             :   // global.
     808             :   mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
     809             :   mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
     810             : 
     811             :   // Memory SSA building info
     812             :   std::unique_ptr<CachingWalker> Walker;
     813             :   unsigned NextID;
     814             : };
     815             : 
     816             : // Internal MemorySSA utils, for use by MemorySSA classes and walkers
     817             : class MemorySSAUtil {
     818             : protected:
     819             :   friend class GVNHoist;
     820             :   friend class MemorySSAWalker;
     821             : 
     822             :   // This function should not be used by new passes.
     823             :   static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
     824             :                                   AliasAnalysis &AA);
     825             : };
     826             : 
     827             : // This pass does eager building and then printing of MemorySSA. It is used by
     828             : // the tests to be able to build, dump, and verify Memory SSA.
     829          40 : class MemorySSAPrinterLegacyPass : public FunctionPass {
     830             : public:
     831             :   MemorySSAPrinterLegacyPass();
     832             : 
     833             :   bool runOnFunction(Function &) override;
     834             :   void getAnalysisUsage(AnalysisUsage &AU) const override;
     835             : 
     836             :   static char ID;
     837             : };
     838             : 
     839             : /// An analysis that produces \c MemorySSA for a function.
     840             : ///
     841             : class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
     842             :   friend AnalysisInfoMixin<MemorySSAAnalysis>;
     843             : 
     844             :   static AnalysisKey Key;
     845             : 
     846             : public:
     847             :   // Wrap MemorySSA result to ensure address stability of internal MemorySSA
     848             :   // pointers after construction.  Use a wrapper class instead of plain
     849             :   // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
     850         540 :   struct Result {
     851             :     Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
     852             : 
     853             :     MemorySSA &getMSSA() { return *MSSA.get(); }
     854             : 
     855             :     std::unique_ptr<MemorySSA> MSSA;
     856             :   };
     857             : 
     858             :   Result run(Function &F, FunctionAnalysisManager &AM);
     859             : };
     860             : 
     861             : /// Printer pass for \c MemorySSA.
     862             : class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
     863             :   raw_ostream &OS;
     864             : 
     865             : public:
     866          20 :   explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
     867             : 
     868             :   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
     869             : };
     870             : 
     871             : /// Verifier pass for \c MemorySSA.
     872             : struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
     873             :   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
     874             : };
     875             : 
     876             : /// Legacy analysis pass which computes \c MemorySSA.
     877        2432 : class MemorySSAWrapperPass : public FunctionPass {
     878             : public:
     879             :   MemorySSAWrapperPass();
     880             : 
     881             :   static char ID;
     882             : 
     883             :   bool runOnFunction(Function &) override;
     884             :   void releaseMemory() override;
     885             :   MemorySSA &getMSSA() { return *MSSA; }
     886             :   const MemorySSA &getMSSA() const { return *MSSA; }
     887             : 
     888             :   void getAnalysisUsage(AnalysisUsage &AU) const override;
     889             : 
     890             :   void verifyAnalysis() const override;
     891             :   void print(raw_ostream &OS, const Module *M = nullptr) const override;
     892             : 
     893             : private:
     894             :   std::unique_ptr<MemorySSA> MSSA;
     895             : };
     896             : 
     897             : /// This is the generic walker interface for walkers of MemorySSA.
     898             : /// Walkers are used to be able to further disambiguate the def-use chains
     899             : /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
     900             : /// you.
     901             : /// In particular, while the def-use chains provide basic information, and are
     902             : /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
     903             : /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
     904             : /// information. In particular, they may want to use SCEV info to further
     905             : /// disambiguate memory accesses, or they may want the nearest dominating
     906             : /// may-aliasing MemoryDef for a call or a store. This API enables a
     907             : /// standardized interface to getting and using that info.
     908             : class MemorySSAWalker {
     909             : public:
     910             :   MemorySSAWalker(MemorySSA *);
     911             :   virtual ~MemorySSAWalker() = default;
     912             : 
     913             :   using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
     914             : 
     915             :   /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
     916             :   /// will give you the nearest dominating MemoryAccess that Mod's the location
     917             :   /// the instruction accesses (by skipping any def which AA can prove does not
     918             :   /// alias the location(s) accessed by the instruction given).
     919             :   ///
     920             :   /// Note that this will return a single access, and it must dominate the
     921             :   /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
     922             :   /// this will return the MemoryPhi, not the operand. This means that
     923             :   /// given:
     924             :   /// if (a) {
     925             :   ///   1 = MemoryDef(liveOnEntry)
     926             :   ///   store %a
     927             :   /// } else {
     928             :   ///   2 = MemoryDef(liveOnEntry)
     929             :   ///   store %b
     930             :   /// }
     931             :   /// 3 = MemoryPhi(2, 1)
     932             :   /// MemoryUse(3)
     933             :   /// load %a
     934             :   ///
     935             :   /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
     936             :   /// in the if (a) branch.
     937      153861 :   MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
     938      153861 :     MemoryAccess *MA = MSSA->getMemoryAccess(I);
     939             :     assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
     940      153861 :     return getClobberingMemoryAccess(MA);
     941             :   }
     942             : 
     943             :   /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
     944             :   /// but takes a MemoryAccess instead of an Instruction.
     945             :   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
     946             : 
     947             :   /// Given a potentially clobbering memory access and a new location,
     948             :   /// calling this will give you the nearest dominating clobbering MemoryAccess
     949             :   /// (by skipping non-aliasing def links).
     950             :   ///
     951             :   /// This version of the function is mainly used to disambiguate phi translated
     952             :   /// pointers, where the value of a pointer may have changed from the initial
     953             :   /// memory access. Note that this expects to be handed either a MemoryUse,
     954             :   /// or an already potentially clobbering access. Unlike the above API, if
     955             :   /// given a MemoryDef that clobbers the pointer as the starting access, it
     956             :   /// will return that MemoryDef, whereas the above would return the clobber
     957             :   /// starting from the use side of  the memory def.
     958             :   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
     959             :                                                   const MemoryLocation &) = 0;
     960             : 
     961             :   /// Given a memory access, invalidate anything this walker knows about
     962             :   /// that access.
     963             :   /// This API is used by walkers that store information to perform basic cache
     964             :   /// invalidation.  This will be called by MemorySSA at appropriate times for
     965             :   /// the walker it uses or returns.
     966           0 :   virtual void invalidateInfo(MemoryAccess *) {}
     967             : 
     968           0 :   virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
     969             : 
     970             : protected:
     971             :   friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
     972             :                           // constructor.
     973             :   MemorySSA *MSSA;
     974             : };
     975             : 
     976             : /// A MemorySSAWalker that does no alias queries, or anything else. It
     977             : /// simply returns the links as they were constructed by the builder.
     978           0 : class DoNothingMemorySSAWalker final : public MemorySSAWalker {
     979             : public:
     980             :   // Keep the overrides below from hiding the Instruction overload of
     981             :   // getClobberingMemoryAccess.
     982             :   using MemorySSAWalker::getClobberingMemoryAccess;
     983             : 
     984             :   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
     985             :   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
     986             :                                           const MemoryLocation &) override;
     987             : };
     988             : 
     989             : using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
     990             : using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
     991             : 
     992             : /// Iterator base class used to implement const and non-const iterators
     993             : /// over the defining accesses of a MemoryAccess.
     994             : template <class T>
     995             : class memoryaccess_def_iterator_base
     996             :     : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
     997             :                                   std::forward_iterator_tag, T, ptrdiff_t, T *,
     998             :                                   T *> {
     999             :   using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
    1000             : 
    1001             : public:
    1002      114675 :   memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
    1003             :   memoryaccess_def_iterator_base() = default;
    1004             : 
    1005             :   bool operator==(const memoryaccess_def_iterator_base &Other) const {
    1006      604341 :     return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
    1007             :   }
    1008             : 
    1009             :   // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
    1010             :   // block from the operand in constant time (In a PHINode, the uselist has
    1011             :   // both, so it's just subtraction). We provide it as part of the
    1012             :   // iterator to avoid callers having to linear walk to get the block.
    1013             :   // If the operation becomes constant time on MemoryPHI's, this bit of
    1014             :   // abstraction breaking should be removed.
    1015      244146 :   BasicBlock *getPhiArgBlock() const {
    1016      244146 :     MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
    1017             :     assert(MP && "Tried to get phi arg block when not iterating over a PHI");
    1018      488292 :     return MP->getIncomingBlock(ArgNo);
    1019             :   }
    1020             : 
    1021      244833 :   typename BaseT::iterator::pointer operator*() const {
    1022             :     assert(Access && "Tried to access past the end of our iterator");
    1023             :     // Go to the first argument for phis, and the defining access for everything
    1024             :     // else.
    1025      244833 :     if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
    1026      244833 :       return MP->getIncomingValue(ArgNo);
    1027             :     return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
    1028             :   }
    1029             : 
    1030             :   using BaseT::operator++;
    1031             :   memoryaccess_def_iterator &operator++() {
    1032             :     assert(Access && "Hit end of iterator");
    1033      244833 :     if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
    1034      489666 :       if (++ArgNo >= MP->getNumIncomingValues()) {
    1035      114675 :         ArgNo = 0;
    1036      114675 :         Access = nullptr;
    1037             :       }
    1038             :     } else {
    1039           0 :       Access = nullptr;
    1040             :     }
    1041             :     return *this;
    1042             :   }
    1043             : 
    1044             : private:
    1045             :   T *Access = nullptr;
    1046             :   unsigned ArgNo = 0;
    1047             : };
    1048             : 
    1049             : inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
    1050             :   return memoryaccess_def_iterator(this);
    1051             : }
    1052             : 
    1053             : inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
    1054             :   return const_memoryaccess_def_iterator(this);
    1055             : }
    1056             : 
    1057             : inline memoryaccess_def_iterator MemoryAccess::defs_end() {
    1058             :   return memoryaccess_def_iterator();
    1059             : }
    1060             : 
    1061             : inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
    1062             :   return const_memoryaccess_def_iterator();
    1063             : }
    1064             : 
    1065             : /// GraphTraits for a MemoryAccess, which walks defs in the normal case,
    1066             : /// and uses in the inverse case.
    1067             : template <> struct GraphTraits<MemoryAccess *> {
    1068             :   using NodeRef = MemoryAccess *;
    1069             :   using ChildIteratorType = memoryaccess_def_iterator;
    1070             : 
    1071             :   static NodeRef getEntryNode(NodeRef N) { return N; }
    1072             :   static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
    1073             :   static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
    1074             : };
    1075             : 
    1076             : template <> struct GraphTraits<Inverse<MemoryAccess *>> {
    1077             :   using NodeRef = MemoryAccess *;
    1078             :   using ChildIteratorType = MemoryAccess::iterator;
    1079             : 
    1080             :   static NodeRef getEntryNode(NodeRef N) { return N; }
    1081             :   static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
    1082             :   static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
    1083             : };
    1084             : 
    1085             : /// Provide an iterator that walks defs, giving both the memory access,
    1086             : /// and the current pointer location, updating the pointer location as it
    1087             : /// changes due to phi node translation.
    1088             : ///
    1089             : /// This iterator, while somewhat specialized, is what most clients actually
    1090             : /// want when walking upwards through MemorySSA def chains. It takes a pair of
    1091             : /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
    1092             : /// memory location through phi nodes for the user.
    1093             : class upward_defs_iterator
    1094             :     : public iterator_facade_base<upward_defs_iterator,
    1095             :                                   std::forward_iterator_tag,
    1096             :                                   const MemoryAccessPair> {
    1097             :   using BaseT = upward_defs_iterator::iterator_facade_base;
    1098             : 
    1099             : public:
    1100      114675 :   upward_defs_iterator(const MemoryAccessPair &Info)
    1101      229350 :       : DefIterator(Info.first), Location(Info.second),
    1102      114675 :         OriginalAccess(Info.first) {
    1103             :     CurrentPair.first = nullptr;
    1104             : 
    1105      229350 :     WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
    1106      114675 :     fillInCurrentPair();
    1107      114675 :   }
    1108             : 
    1109             :   upward_defs_iterator() { CurrentPair.first = nullptr; }
    1110             : 
    1111             :   bool operator==(const upward_defs_iterator &Other) const {
    1112             :     return DefIterator == Other.DefIterator;
    1113             :   }
    1114             : 
    1115             :   BaseT::iterator::reference operator*() const {
    1116             :     assert(DefIterator != OriginalAccess->defs_end() &&
    1117             :            "Tried to access past the end of our iterator");
    1118             :     return CurrentPair;
    1119             :   }
    1120             : 
    1121             :   using BaseT::operator++;
    1122      244833 :   upward_defs_iterator &operator++() {
    1123             :     assert(DefIterator != OriginalAccess->defs_end() &&
    1124             :            "Tried to access past the end of the iterator");
    1125             :     ++DefIterator;
    1126             :     if (DefIterator != OriginalAccess->defs_end())
    1127      130158 :       fillInCurrentPair();
    1128      244833 :     return *this;
    1129             :   }
    1130             : 
    1131             :   BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
    1132             : 
    1133             : private:
    1134      244833 :   void fillInCurrentPair() {
    1135      244833 :     CurrentPair.first = *DefIterator;
    1136      244833 :     if (WalkingPhi && Location.Ptr) {
    1137             :       PHITransAddr Translator(
    1138      244146 :           const_cast<Value *>(Location.Ptr),
    1139      488292 :           OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
    1140      244146 :       if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
    1141             :                                         DefIterator.getPhiArgBlock(), nullptr,
    1142             :                                         false))
    1143           0 :         if (Translator.getAddr() != Location.Ptr) {
    1144           0 :           CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
    1145             :           return;
    1146             :         }
    1147             :     }
    1148      244833 :     CurrentPair.second = Location;
    1149             :   }
    1150             : 
    1151             :   MemoryAccessPair CurrentPair;
    1152             :   memoryaccess_def_iterator DefIterator;
    1153             :   MemoryLocation Location;
    1154             :   MemoryAccess *OriginalAccess = nullptr;
    1155             :   bool WalkingPhi = false;
    1156             : };
    1157             : 
    1158             : inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
    1159      114675 :   return upward_defs_iterator(Pair);
    1160             : }
    1161             : 
    1162             : inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
    1163             : 
    1164             : inline iterator_range<upward_defs_iterator>
    1165             : upward_defs(const MemoryAccessPair &Pair) {
    1166             :   return make_range(upward_defs_begin(Pair), upward_defs_end());
    1167             : }
    1168             : 
    1169             : /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
    1170             : /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
    1171             : /// comparing against a null def_chain_iterator, this will compare equal only
    1172             : /// after walking said Phi/liveOnEntry.
    1173             : ///
    1174             : /// The UseOptimizedChain flag specifies whether to walk the clobbering
    1175             : /// access chain, or all the accesses.
    1176             : ///
    1177             : /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
    1178             : /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
    1179             : /// a phi node.  The optimized chain walks the clobbering access of a store.
    1180             : /// So if you are just trying to find, given a store, what the next
    1181             : /// thing that would clobber the same memory is, you want the optimized chain.
    1182             : template <class T, bool UseOptimizedChain = false>
    1183             : struct def_chain_iterator
    1184             :     : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
    1185             :                                   std::forward_iterator_tag, MemoryAccess *> {
    1186             :   def_chain_iterator() : MA(nullptr) {}
    1187             :   def_chain_iterator(T MA) : MA(MA) {}
    1188             : 
    1189             :   T operator*() const { return MA; }
    1190             : 
    1191             :   def_chain_iterator &operator++() {
    1192             :     // N.B. liveOnEntry has a null defining access.
    1193             :     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
    1194             :       if (UseOptimizedChain && MUD->isOptimized())
    1195             :         MA = MUD->getOptimized();
    1196             :       else
    1197             :         MA = MUD->getDefiningAccess();
    1198             :     } else {
    1199             :       MA = nullptr;
    1200             :     }
    1201             : 
    1202             :     return *this;
    1203             :   }
    1204             : 
    1205             :   bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
    1206             : 
    1207             : private:
    1208             :   T MA;
    1209             : };
    1210             : 
    1211             : template <class T>
    1212             : inline iterator_range<def_chain_iterator<T>>
    1213             : def_chain(T MA, MemoryAccess *UpTo = nullptr) {
    1214             : #ifdef EXPENSIVE_CHECKS
    1215             :   assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
    1216             :          "UpTo isn't in the def chain!");
    1217             : #endif
    1218             :   return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
    1219             : }
    1220             : 
    1221             : template <class T>
    1222             : inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
    1223             :   return make_range(def_chain_iterator<T, true>(MA),
    1224             :                     def_chain_iterator<T, true>(nullptr));
    1225             : }
    1226             : 
    1227             : } // end namespace llvm
    1228             : 
    1229             : #endif // LLVM_ANALYSIS_MEMORYSSA_H

Generated by: LCOV version 1.13