LLVM  6.0.0svn
MemorySSA.h
Go to the documentation of this file.
1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief This file exposes an interface to building/using memory SSA to
12 /// walk memory instructions using a use/def graph.
13 ///
14 /// Memory SSA class builds an SSA form that links together memory access
15 /// instructions such as loads, stores, atomics, and calls. Additionally, it
16 /// does a trivial form of "heap versioning" Every time the memory state changes
17 /// in the program, we generate a new heap version. It generates
18 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
19 ///
20 /// As a trivial example,
21 /// define i32 @main() #0 {
22 /// entry:
23 /// %call = call noalias i8* @_Znwm(i64 4) #2
24 /// %0 = bitcast i8* %call to i32*
25 /// %call1 = call noalias i8* @_Znwm(i64 4) #2
26 /// %1 = bitcast i8* %call1 to i32*
27 /// store i32 5, i32* %0, align 4
28 /// store i32 7, i32* %1, align 4
29 /// %2 = load i32* %0, align 4
30 /// %3 = load i32* %1, align 4
31 /// %add = add nsw i32 %2, %3
32 /// ret i32 %add
33 /// }
34 ///
35 /// Will become
36 /// define i32 @main() #0 {
37 /// entry:
38 /// ; 1 = MemoryDef(0)
39 /// %call = call noalias i8* @_Znwm(i64 4) #3
40 /// %2 = bitcast i8* %call to i32*
41 /// ; 2 = MemoryDef(1)
42 /// %call1 = call noalias i8* @_Znwm(i64 4) #3
43 /// %4 = bitcast i8* %call1 to i32*
44 /// ; 3 = MemoryDef(2)
45 /// store i32 5, i32* %2, align 4
46 /// ; 4 = MemoryDef(3)
47 /// store i32 7, i32* %4, align 4
48 /// ; MemoryUse(3)
49 /// %7 = load i32* %2, align 4
50 /// ; MemoryUse(4)
51 /// %8 = load i32* %4, align 4
52 /// %add = add nsw i32 %7, %8
53 /// ret i32 %add
54 /// }
55 ///
56 /// Given this form, all the stores that could ever effect the load at %8 can be
57 /// gotten by using the MemoryUse associated with it, and walking from use to
58 /// def until you hit the top of the function.
59 ///
60 /// Each def also has a list of users associated with it, so you can walk from
61 /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
62 /// but not the RHS of MemoryDefs. You can see this above at %7, which would
63 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
64 /// store, all the MemoryUses on its use lists are may-aliases of that store
65 /// (but the MemoryDefs on its use list may not be).
66 ///
67 /// MemoryDefs are not disambiguated because it would require multiple reaching
68 /// definitions, which would require multiple phis, and multiple memoryaccesses
69 /// per instruction.
70 //
71 //===----------------------------------------------------------------------===//
72 
73 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
74 #define LLVM_ANALYSIS_MEMORYSSA_H
75 
76 #include "llvm/ADT/DenseMap.h"
77 #include "llvm/ADT/GraphTraits.h"
78 #include "llvm/ADT/SmallPtrSet.h"
79 #include "llvm/ADT/SmallVector.h"
80 #include "llvm/ADT/ilist.h"
81 #include "llvm/ADT/ilist_node.h"
82 #include "llvm/ADT/iterator.h"
84 #include "llvm/ADT/simple_ilist.h"
88 #include "llvm/IR/BasicBlock.h"
89 #include "llvm/IR/DerivedUser.h"
90 #include "llvm/IR/Dominators.h"
91 #include "llvm/IR/Module.h"
92 #include "llvm/IR/Type.h"
93 #include "llvm/IR/Use.h"
94 #include "llvm/IR/User.h"
95 #include "llvm/IR/Value.h"
96 #include "llvm/Pass.h"
97 #include "llvm/Support/Casting.h"
98 #include <algorithm>
99 #include <cassert>
100 #include <cstddef>
101 #include <iterator>
102 #include <memory>
103 #include <utility>
104 
105 namespace llvm {
106 
107 class Function;
108 class Instruction;
109 class MemoryAccess;
110 class MemorySSAWalker;
111 class LLVMContext;
112 class raw_ostream;
113 
114 namespace MSSAHelpers {
115 
116 struct AllAccessTag {};
117 struct DefsOnlyTag {};
118 
119 } // end namespace MSSAHelpers
120 
121 enum {
122  // Used to signify what the default invalid ID is for MemoryAccess's
123  // getID()
125 };
126 
127 template <class T> class memoryaccess_def_iterator_base;
131 
132 // \brief The base for all memory accesses. All memory accesses in a block are
133 // linked together using an intrusive list.
135  : public DerivedUser,
136  public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
137  public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
138 public:
139  using AllAccessType =
141  using DefsOnlyType =
143 
144  MemoryAccess(const MemoryAccess &) = delete;
145  MemoryAccess &operator=(const MemoryAccess &) = delete;
146 
147  void *operator new(size_t) = delete;
148 
149  // Methods for support type inquiry through isa, cast, and
150  // dyn_cast
151  static bool classof(const Value *V) {
152  unsigned ID = V->getValueID();
153  return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
154  }
155 
156  BasicBlock *getBlock() const { return Block; }
157 
158  void print(raw_ostream &OS) const;
159  void dump() const;
160 
161  /// \brief The user iterators for a memory access
164 
165  /// \brief This iterator walks over all of the defs in a given
166  /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
167  /// MemoryUse/MemoryDef, this walks the defining access.
168  memoryaccess_def_iterator defs_begin();
169  const_memoryaccess_def_iterator defs_begin() const;
170  memoryaccess_def_iterator defs_end();
171  const_memoryaccess_def_iterator defs_end() const;
172 
173  /// \brief Get the iterators for the all access list and the defs only list
174  /// We default to the all access list.
176  return this->AllAccessType::getIterator();
177  }
179  return this->AllAccessType::getIterator();
180  }
182  return this->AllAccessType::getReverseIterator();
183  }
185  return this->AllAccessType::getReverseIterator();
186  }
188  return this->DefsOnlyType::getIterator();
189  }
191  return this->DefsOnlyType::getIterator();
192  }
194  return this->DefsOnlyType::getReverseIterator();
195  }
197  return this->DefsOnlyType::getReverseIterator();
198  }
199 
200 protected:
201  friend class MemoryDef;
202  friend class MemoryPhi;
203  friend class MemorySSA;
204  friend class MemoryUse;
205  friend class MemoryUseOrDef;
206 
207  /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
208  /// moved.
209  void setBlock(BasicBlock *BB) { Block = BB; }
210 
211  /// \brief Used for debugging and tracking things about MemoryAccesses.
212  /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
213  inline unsigned getID() const;
214 
215  MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
216  BasicBlock *BB, unsigned NumOperands)
217  : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
218  Block(BB) {}
219 
220 private:
221  BasicBlock *Block;
222 };
223 
225  MA.print(OS);
226  return OS;
227 }
228 
229 /// \brief Class that has the common methods + fields of memory uses/defs. It's
230 /// a little awkward to have, but there are many cases where we want either a
231 /// use or def, and there are many cases where uses are needed (defs aren't
232 /// acceptable), and vice-versa.
233 ///
234 /// This class should never be instantiated directly; make a MemoryUse or
235 /// MemoryDef instead.
236 class MemoryUseOrDef : public MemoryAccess {
237 public:
238  void *operator new(size_t) = delete;
239 
241 
242  /// \brief Get the instruction that this MemoryUse represents.
243  Instruction *getMemoryInst() const { return MemoryInst; }
244 
245  /// \brief Get the access that produces the memory state used by this Use.
246  MemoryAccess *getDefiningAccess() const { return getOperand(0); }
247 
248  static bool classof(const Value *MA) {
249  return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
250  }
251 
252  // Sadly, these have to be public because they are needed in some of the
253  // iterators.
254  inline bool isOptimized() const;
255  inline MemoryAccess *getOptimized() const;
256  inline void setOptimized(MemoryAccess *);
257 
258  /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
259  /// be rewalked by the walker if necessary.
260  /// This really should only be called by tests.
261  inline void resetOptimized();
262 
263 protected:
264  friend class MemorySSA;
265  friend class MemorySSAUpdater;
266 
267  MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
268  DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
269  : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInst(MI) {
270  setDefiningAccess(DMA);
271  }
272 
273  void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
274  if (!Optimized) {
275  setOperand(0, DMA);
276  return;
277  }
278  setOptimized(DMA);
279  }
280 
281 private:
282  Instruction *MemoryInst;
283 };
284 
285 template <>
287  : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
289 
290 /// \brief Represents read-only accesses to memory
291 ///
292 /// In particular, the set of Instructions that will be represented by
293 /// MemoryUse's is exactly the set of Instructions for which
294 /// AliasAnalysis::getModRefInfo returns "Ref".
295 class MemoryUse final : public MemoryUseOrDef {
296 public:
298 
300  : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
301 
302  // allocate space for exactly one operand
303  void *operator new(size_t s) { return User::operator new(s, 1); }
304 
305  static bool classof(const Value *MA) {
306  return MA->getValueID() == MemoryUseVal;
307  }
308 
309  void print(raw_ostream &OS) const;
310 
312  OptimizedID = DMA->getID();
313  setOperand(0, DMA);
314  }
315 
316  bool isOptimized() const {
317  return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
318  }
319 
321  return getDefiningAccess();
322  }
323 
324  void resetOptimized() {
325  OptimizedID = INVALID_MEMORYACCESS_ID;
326  }
327 
328 protected:
329  friend class MemorySSA;
330 
331 private:
332  static void deleteMe(DerivedUser *Self);
333 
334  unsigned int OptimizedID = 0;
335 };
336 
337 template <>
338 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
340 
341 /// \brief Represents a read-write access to memory, whether it is a must-alias,
342 /// or a may-alias.
343 ///
344 /// In particular, the set of Instructions that will be represented by
345 /// MemoryDef's is exactly the set of Instructions for which
346 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
347 /// Note that, in order to provide def-def chains, all defs also have a use
348 /// associated with them. This use points to the nearest reaching
349 /// MemoryDef/MemoryPhi.
350 class MemoryDef final : public MemoryUseOrDef {
351 public:
352  friend class MemorySSA;
353 
355 
357  unsigned Ver)
358  : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
359 
360  // allocate space for exactly one operand
361  void *operator new(size_t s) { return User::operator new(s, 1); }
362 
363  static bool classof(const Value *MA) {
364  return MA->getValueID() == MemoryDefVal;
365  }
366 
368  Optimized = MA;
369  OptimizedID = getDefiningAccess()->getID();
370  }
371 
372  MemoryAccess *getOptimized() const { return Optimized; }
373 
374  bool isOptimized() const {
375  return getOptimized() && getDefiningAccess() &&
376  OptimizedID == getDefiningAccess()->getID();
377  }
378 
379  void resetOptimized() {
380  OptimizedID = INVALID_MEMORYACCESS_ID;
381  }
382 
383  void print(raw_ostream &OS) const;
384 
385  unsigned getID() const { return ID; }
386 
387 private:
388  static void deleteMe(DerivedUser *Self);
389 
390  const unsigned ID;
391  MemoryAccess *Optimized = nullptr;
392  unsigned int OptimizedID = INVALID_MEMORYACCESS_ID;
393 };
394 
395 template <>
396 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
398 
399 /// \brief Represents phi nodes for memory accesses.
400 ///
401 /// These have the same semantic as regular phi nodes, with the exception that
402 /// only one phi will ever exist in a given basic block.
403 /// Guaranteeing one phi per block means guaranteeing there is only ever one
404 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
405 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
406 /// a MemoryPhi's operands.
407 /// That is, given
408 /// if (a) {
409 /// store %a
410 /// store %b
411 /// }
412 /// it *must* be transformed into
413 /// if (a) {
414 /// 1 = MemoryDef(liveOnEntry)
415 /// store %a
416 /// 2 = MemoryDef(1)
417 /// store %b
418 /// }
419 /// and *not*
420 /// if (a) {
421 /// 1 = MemoryDef(liveOnEntry)
422 /// store %a
423 /// 2 = MemoryDef(liveOnEntry)
424 /// store %b
425 /// }
426 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
427 /// end of the branch, and if there are not two phi nodes, one will be
428 /// disconnected completely from the SSA graph below that point.
429 /// Because MemoryUse's do not generate new definitions, they do not have this
430 /// issue.
431 class MemoryPhi final : public MemoryAccess {
432  // allocate space for exactly zero operands
433  void *operator new(size_t s) { return User::operator new(s); }
434 
435 public:
436  /// Provide fast operand accessors
438 
439  MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
440  : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
441  ReservedSpace(NumPreds) {
442  allocHungoffUses(ReservedSpace);
443  }
444 
445  // Block iterator interface. This provides access to the list of incoming
446  // basic blocks, which parallels the list of incoming values.
449 
451  auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
452  return reinterpret_cast<block_iterator>(Ref + 1);
453  }
454 
456  const auto *Ref =
457  reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
458  return reinterpret_cast<const_block_iterator>(Ref + 1);
459  }
460 
461  block_iterator block_end() { return block_begin() + getNumOperands(); }
462 
464  return block_begin() + getNumOperands();
465  }
466 
468  return make_range(block_begin(), block_end());
469  }
470 
472  return make_range(block_begin(), block_end());
473  }
474 
475  op_range incoming_values() { return operands(); }
476 
477  const_op_range incoming_values() const { return operands(); }
478 
479  /// \brief Return the number of incoming edges
480  unsigned getNumIncomingValues() const { return getNumOperands(); }
481 
482  /// \brief Return incoming value number x
483  MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
484  void setIncomingValue(unsigned I, MemoryAccess *V) {
485  assert(V && "PHI node got a null value!");
486  setOperand(I, V);
487  }
488 
489  static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
490  static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
491 
492  /// \brief Return incoming basic block number @p i.
493  BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
494 
495  /// \brief Return incoming basic block corresponding
496  /// to an operand of the PHI.
497  BasicBlock *getIncomingBlock(const Use &U) const {
498  assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
499  return getIncomingBlock(unsigned(&U - op_begin()));
500  }
501 
502  /// \brief Return incoming basic block corresponding
503  /// to value use iterator.
505  return getIncomingBlock(I.getUse());
506  }
507 
508  void setIncomingBlock(unsigned I, BasicBlock *BB) {
509  assert(BB && "PHI node got a null basic block!");
510  block_begin()[I] = BB;
511  }
512 
513  /// \brief Add an incoming value to the end of the PHI list
515  if (getNumOperands() == ReservedSpace)
516  growOperands(); // Get more space!
517  // Initialize some new operands.
518  setNumHungOffUseOperands(getNumOperands() + 1);
519  setIncomingValue(getNumOperands() - 1, V);
520  setIncomingBlock(getNumOperands() - 1, BB);
521  }
522 
523  /// \brief Return the first index of the specified basic
524  /// block in the value list for this PHI. Returns -1 if no instance.
525  int getBasicBlockIndex(const BasicBlock *BB) const {
526  for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
527  if (block_begin()[I] == BB)
528  return I;
529  return -1;
530  }
531 
533  int Idx = getBasicBlockIndex(BB);
534  assert(Idx >= 0 && "Invalid basic block argument!");
535  return getIncomingValue(Idx);
536  }
537 
538  static bool classof(const Value *V) {
539  return V->getValueID() == MemoryPhiVal;
540  }
541 
542  void print(raw_ostream &OS) const;
543 
544  unsigned getID() const { return ID; }
545 
546 protected:
547  friend class MemorySSA;
548 
549  /// \brief this is more complicated than the generic
550  /// User::allocHungoffUses, because we have to allocate Uses for the incoming
551  /// values and pointers to the incoming blocks, all in one allocation.
552  void allocHungoffUses(unsigned N) {
553  User::allocHungoffUses(N, /* IsPhi */ true);
554  }
555 
556 private:
557  // For debugging only
558  const unsigned ID;
559  unsigned ReservedSpace;
560 
561  /// \brief This grows the operand list in response to a push_back style of
562  /// operation. This grows the number of ops by 1.5 times.
563  void growOperands() {
564  unsigned E = getNumOperands();
565  // 2 op PHI nodes are VERY common, so reserve at least enough for that.
566  ReservedSpace = std::max(E + E / 2, 2u);
567  growHungoffUses(ReservedSpace, /* IsPhi */ true);
568  }
569 
570  static void deleteMe(DerivedUser *Self);
571 };
572 
573 inline unsigned MemoryAccess::getID() const {
574  assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
575  "only memory defs and phis have ids");
576  if (const auto *MD = dyn_cast<MemoryDef>(this))
577  return MD->getID();
578  return cast<MemoryPhi>(this)->getID();
579 }
580 
581 inline bool MemoryUseOrDef::isOptimized() const {
582  if (const auto *MD = dyn_cast<MemoryDef>(this))
583  return MD->isOptimized();
584  return cast<MemoryUse>(this)->isOptimized();
585 }
586 
588  if (const auto *MD = dyn_cast<MemoryDef>(this))
589  return MD->getOptimized();
590  return cast<MemoryUse>(this)->getOptimized();
591 }
592 
594  if (auto *MD = dyn_cast<MemoryDef>(this))
595  MD->setOptimized(MA);
596  else
597  cast<MemoryUse>(this)->setOptimized(MA);
598 }
599 
601  if (auto *MD = dyn_cast<MemoryDef>(this))
602  MD->resetOptimized();
603  else
604  cast<MemoryUse>(this)->resetOptimized();
605 }
606 
607 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
609 
610 /// \brief Encapsulates MemorySSA, including all data associated with memory
611 /// accesses.
612 class MemorySSA {
613 public:
615  ~MemorySSA();
616 
617  MemorySSAWalker *getWalker();
618 
619  /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
620  /// access associated with it. If passed a basic block gets the memory phi
621  /// node that exists for that block, if there is one. Otherwise, this will get
622  /// a MemoryUseOrDef.
623  MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
624  MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
625 
626  void dump() const;
627  void print(raw_ostream &) const;
628 
629  /// \brief Return true if \p MA represents the live on entry value
630  ///
631  /// Loads and stores from pointer arguments and other global values may be
632  /// defined by memory operations that do not occur in the current function, so
633  /// they may be live on entry to the function. MemorySSA represents such
634  /// memory state by the live on entry definition, which is guaranteed to occur
635  /// before any other memory access in the function.
636  inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
637  return MA == LiveOnEntryDef.get();
638  }
639 
640  inline MemoryAccess *getLiveOnEntryDef() const {
641  return LiveOnEntryDef.get();
642  }
643 
644  // Sadly, iplists, by default, owns and deletes pointers added to the
645  // list. It's not currently possible to have two iplists for the same type,
646  // where one owns the pointers, and one does not. This is because the traits
647  // are per-type, not per-tag. If this ever changes, we should make the
648  // DefList an iplist.
650  using DefsList =
652 
653  /// \brief Return the list of MemoryAccess's for a given basic block.
654  ///
655  /// This list is not modifiable by the user.
656  const AccessList *getBlockAccesses(const BasicBlock *BB) const {
657  return getWritableBlockAccesses(BB);
658  }
659 
660  /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
661  /// block.
662  ///
663  /// This list is not modifiable by the user.
664  const DefsList *getBlockDefs(const BasicBlock *BB) const {
665  return getWritableBlockDefs(BB);
666  }
667 
668  /// \brief Given two memory accesses in the same basic block, determine
669  /// whether MemoryAccess \p A dominates MemoryAccess \p B.
670  bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
671 
672  /// \brief Given two memory accesses in potentially different blocks,
673  /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
674  bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
675 
676  /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
677  /// dominates Use \p B.
678  bool dominates(const MemoryAccess *A, const Use &B) const;
679 
680  /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
681  /// all uses, uses appear in the right places). This is used by unit tests.
682  void verifyMemorySSA() const;
683 
684  /// Used in various insertion functions to specify whether we are talking
685  /// about the beginning or end of a block.
686  enum InsertionPlace { Beginning, End };
687 
688 protected:
689  // Used by Memory SSA annotater, dumpers, and wrapper pass
692  friend class MemorySSAUpdater;
693 
694  void verifyDefUses(Function &F) const;
695  void verifyDomination(Function &F) const;
696  void verifyOrdering(Function &F) const;
697 
698  // This is used by the use optimizer and updater.
700  auto It = PerBlockAccesses.find(BB);
701  return It == PerBlockAccesses.end() ? nullptr : It->second.get();
702  }
703 
704  // This is used by the use optimizer and updater.
706  auto It = PerBlockDefs.find(BB);
707  return It == PerBlockDefs.end() ? nullptr : It->second.get();
708  }
709 
710  // These is used by the updater to perform various internal MemorySSA
711  // machinsations. They do not always leave the IR in a correct state, and
712  // relies on the updater to fixup what it breaks, so it is not public.
713 
714  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
715  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
716 
717  // Rename the dominator tree branch rooted at BB.
718  void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
720  renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
721  }
722 
723  void removeFromLookups(MemoryAccess *);
724  void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
725  void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
727  void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
728  AccessList::iterator);
729  MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
730 
731 private:
732  class CachingWalker;
733  class OptimizeUses;
734 
735  CachingWalker *getWalkerImpl();
736  void buildMemorySSA();
737  void optimizeUses();
738 
739  void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
740 
743 
744  void
745  determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
746  void markUnreachableAsLiveOnEntry(BasicBlock *BB);
747  bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
748  MemoryPhi *createMemoryPhi(BasicBlock *BB);
749  MemoryUseOrDef *createNewAccess(Instruction *);
750  MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
751  void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &,
753  MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
754  void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
755  void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
757  bool SkipVisited = false, bool RenameAllUses = false);
758  AccessList *getOrCreateAccessList(const BasicBlock *);
759  DefsList *getOrCreateDefsList(const BasicBlock *);
760  void renumberBlock(const BasicBlock *) const;
761  AliasAnalysis *AA;
762  DominatorTree *DT;
763  Function &F;
764 
765  // Memory SSA mappings
766  DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
767 
768  // These two mappings contain the main block to access/def mappings for
769  // MemorySSA. The list contained in PerBlockAccesses really owns all the
770  // MemoryAccesses.
771  // Both maps maintain the invariant that if a block is found in them, the
772  // corresponding list is not empty, and if a block is not found in them, the
773  // corresponding list is empty.
774  AccessMap PerBlockAccesses;
775  DefsMap PerBlockDefs;
776  std::unique_ptr<MemoryAccess> LiveOnEntryDef;
777 
778  // Domination mappings
779  // Note that the numbering is local to a block, even though the map is
780  // global.
781  mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
782  mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
783 
784  // Memory SSA building info
785  std::unique_ptr<CachingWalker> Walker;
786  unsigned NextID;
787 };
788 
789 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
791 protected:
792  friend class GVNHoist;
793  friend class MemorySSAWalker;
794 
795  // This function should not be used by new passes.
796  static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
797  AliasAnalysis &AA);
798 };
799 
800 // This pass does eager building and then printing of MemorySSA. It is used by
801 // the tests to be able to build, dump, and verify Memory SSA.
803 public:
805 
806  bool runOnFunction(Function &) override;
807  void getAnalysisUsage(AnalysisUsage &AU) const override;
808 
809  static char ID;
810 };
811 
812 /// An analysis that produces \c MemorySSA for a function.
813 ///
814 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
816 
817  static AnalysisKey Key;
818 
819 public:
820  // Wrap MemorySSA result to ensure address stability of internal MemorySSA
821  // pointers after construction. Use a wrapper class instead of plain
822  // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
823  struct Result {
824  Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
825 
826  MemorySSA &getMSSA() { return *MSSA.get(); }
827 
828  std::unique_ptr<MemorySSA> MSSA;
829  };
830 
832 };
833 
834 /// \brief Printer pass for \c MemorySSA.
835 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
836  raw_ostream &OS;
837 
838 public:
839  explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
840 
842 };
843 
844 /// \brief Verifier pass for \c MemorySSA.
845 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
847 };
848 
849 /// \brief Legacy analysis pass which computes \c MemorySSA.
851 public:
853 
854  static char ID;
855 
856  bool runOnFunction(Function &) override;
857  void releaseMemory() override;
858  MemorySSA &getMSSA() { return *MSSA; }
859  const MemorySSA &getMSSA() const { return *MSSA; }
860 
861  void getAnalysisUsage(AnalysisUsage &AU) const override;
862 
863  void verifyAnalysis() const override;
864  void print(raw_ostream &OS, const Module *M = nullptr) const override;
865 
866 private:
867  std::unique_ptr<MemorySSA> MSSA;
868 };
869 
870 /// \brief This is the generic walker interface for walkers of MemorySSA.
871 /// Walkers are used to be able to further disambiguate the def-use chains
872 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
873 /// you.
874 /// In particular, while the def-use chains provide basic information, and are
875 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
876 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
877 /// information. In particular, they may want to use SCEV info to further
878 /// disambiguate memory accesses, or they may want the nearest dominating
879 /// may-aliasing MemoryDef for a call or a store. This API enables a
880 /// standardized interface to getting and using that info.
882 public:
884  virtual ~MemorySSAWalker() = default;
885 
887 
888  /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
889  /// will give you the nearest dominating MemoryAccess that Mod's the location
890  /// the instruction accesses (by skipping any def which AA can prove does not
891  /// alias the location(s) accessed by the instruction given).
892  ///
893  /// Note that this will return a single access, and it must dominate the
894  /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
895  /// this will return the MemoryPhi, not the operand. This means that
896  /// given:
897  /// if (a) {
898  /// 1 = MemoryDef(liveOnEntry)
899  /// store %a
900  /// } else {
901  /// 2 = MemoryDef(liveOnEntry)
902  /// store %b
903  /// }
904  /// 3 = MemoryPhi(2, 1)
905  /// MemoryUse(3)
906  /// load %a
907  ///
908  /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
909  /// in the if (a) branch.
911  MemoryAccess *MA = MSSA->getMemoryAccess(I);
912  assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
913  return getClobberingMemoryAccess(MA);
914  }
915 
916  /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
917  /// but takes a MemoryAccess instead of an Instruction.
918  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
919 
920  /// \brief Given a potentially clobbering memory access and a new location,
921  /// calling this will give you the nearest dominating clobbering MemoryAccess
922  /// (by skipping non-aliasing def links).
923  ///
924  /// This version of the function is mainly used to disambiguate phi translated
925  /// pointers, where the value of a pointer may have changed from the initial
926  /// memory access. Note that this expects to be handed either a MemoryUse,
927  /// or an already potentially clobbering access. Unlike the above API, if
928  /// given a MemoryDef that clobbers the pointer as the starting access, it
929  /// will return that MemoryDef, whereas the above would return the clobber
930  /// starting from the use side of the memory def.
931  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
932  const MemoryLocation &) = 0;
933 
934  /// \brief Given a memory access, invalidate anything this walker knows about
935  /// that access.
936  /// This API is used by walkers that store information to perform basic cache
937  /// invalidation. This will be called by MemorySSA at appropriate times for
938  /// the walker it uses or returns.
939  virtual void invalidateInfo(MemoryAccess *) {}
940 
941  virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
942 
943 protected:
944  friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
945  // constructor.
947 };
948 
949 /// \brief A MemorySSAWalker that does no alias queries, or anything else. It
950 /// simply returns the links as they were constructed by the builder.
952 public:
953  // Keep the overrides below from hiding the Instruction overload of
954  // getClobberingMemoryAccess.
956 
957  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
958  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
959  const MemoryLocation &) override;
960 };
961 
962 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
963 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
964 
965 /// \brief Iterator base class used to implement const and non-const iterators
966 /// over the defining accesses of a MemoryAccess.
967 template <class T>
969  : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
970  std::forward_iterator_tag, T, ptrdiff_t, T *,
971  T *> {
972  using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
973 
974 public:
975  memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
976  memoryaccess_def_iterator_base() = default;
977 
978  bool operator==(const memoryaccess_def_iterator_base &Other) const {
979  return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
980  }
981 
982  // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
983  // block from the operand in constant time (In a PHINode, the uselist has
984  // both, so it's just subtraction). We provide it as part of the
985  // iterator to avoid callers having to linear walk to get the block.
986  // If the operation becomes constant time on MemoryPHI's, this bit of
987  // abstraction breaking should be removed.
989  MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
990  assert(MP && "Tried to get phi arg block when not iterating over a PHI");
991  return MP->getIncomingBlock(ArgNo);
992  }
993 
994  typename BaseT::iterator::pointer operator*() const {
995  assert(Access && "Tried to access past the end of our iterator");
996  // Go to the first argument for phis, and the defining access for everything
997  // else.
998  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
999  return MP->getIncomingValue(ArgNo);
1000  return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1001  }
1002 
1003  using BaseT::operator++;
1005  assert(Access && "Hit end of iterator");
1006  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1007  if (++ArgNo >= MP->getNumIncomingValues()) {
1008  ArgNo = 0;
1009  Access = nullptr;
1010  }
1011  } else {
1012  Access = nullptr;
1013  }
1014  return *this;
1015  }
1016 
1017 private:
1018  T *Access = nullptr;
1019  unsigned ArgNo = 0;
1020 };
1021 
1023  return memoryaccess_def_iterator(this);
1024 }
1025 
1027  return const_memoryaccess_def_iterator(this);
1028 }
1029 
1031  return memoryaccess_def_iterator();
1032 }
1033 
1036 }
1037 
1038 /// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
1039 /// and uses in the inverse case.
1040 template <> struct GraphTraits<MemoryAccess *> {
1043 
1044  static NodeRef getEntryNode(NodeRef N) { return N; }
1046  static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1047 };
1048 
1049 template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1052 
1053  static NodeRef getEntryNode(NodeRef N) { return N; }
1055  static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1056 };
1057 
1058 /// \brief Provide an iterator that walks defs, giving both the memory access,
1059 /// and the current pointer location, updating the pointer location as it
1060 /// changes due to phi node translation.
1061 ///
1062 /// This iterator, while somewhat specialized, is what most clients actually
1063 /// want when walking upwards through MemorySSA def chains. It takes a pair of
1064 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1065 /// memory location through phi nodes for the user.
1067  : public iterator_facade_base<upward_defs_iterator,
1068  std::forward_iterator_tag,
1069  const MemoryAccessPair> {
1070  using BaseT = upward_defs_iterator::iterator_facade_base;
1071 
1072 public:
1074  : DefIterator(Info.first), Location(Info.second),
1075  OriginalAccess(Info.first) {
1076  CurrentPair.first = nullptr;
1077 
1078  WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1079  fillInCurrentPair();
1080  }
1081 
1082  upward_defs_iterator() { CurrentPair.first = nullptr; }
1083 
1084  bool operator==(const upward_defs_iterator &Other) const {
1085  return DefIterator == Other.DefIterator;
1086  }
1087 
1088  BaseT::iterator::reference operator*() const {
1089  assert(DefIterator != OriginalAccess->defs_end() &&
1090  "Tried to access past the end of our iterator");
1091  return CurrentPair;
1092  }
1093 
1094  using BaseT::operator++;
1096  assert(DefIterator != OriginalAccess->defs_end() &&
1097  "Tried to access past the end of the iterator");
1098  ++DefIterator;
1099  if (DefIterator != OriginalAccess->defs_end())
1100  fillInCurrentPair();
1101  return *this;
1102  }
1103 
1104  BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1105 
1106 private:
1107  void fillInCurrentPair() {
1108  CurrentPair.first = *DefIterator;
1109  if (WalkingPhi && Location.Ptr) {
1110  PHITransAddr Translator(
1111  const_cast<Value *>(Location.Ptr),
1112  OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1113  if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1114  DefIterator.getPhiArgBlock(), nullptr,
1115  false))
1116  if (Translator.getAddr() != Location.Ptr) {
1117  CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
1118  return;
1119  }
1120  }
1121  CurrentPair.second = Location;
1122  }
1123 
1124  MemoryAccessPair CurrentPair;
1125  memoryaccess_def_iterator DefIterator;
1126  MemoryLocation Location;
1127  MemoryAccess *OriginalAccess = nullptr;
1128  bool WalkingPhi = false;
1129 };
1130 
1132  return upward_defs_iterator(Pair);
1133 }
1134 
1136 
1139  return make_range(upward_defs_begin(Pair), upward_defs_end());
1140 }
1141 
1142 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1143 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1144 /// comparing against a null def_chain_iterator, this will compare equal only
1145 /// after walking said Phi/liveOnEntry.
1146 ///
1147 /// The UseOptimizedChain flag specifies whether to walk the clobbering
1148 /// access chain, or all the accesses.
1149 ///
1150 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1151 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1152 /// a phi node. The optimized chain walks the clobbering access of a store.
1153 /// So if you are just trying to find, given a store, what the next
1154 /// thing that would clobber the same memory is, you want the optimized chain.
1155 template <class T, bool UseOptimizedChain = false>
1157  : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1158  std::forward_iterator_tag, MemoryAccess *> {
1159  def_chain_iterator() : MA(nullptr) {}
1160  def_chain_iterator(T MA) : MA(MA) {}
1161 
1162  T operator*() const { return MA; }
1163 
1165  // N.B. liveOnEntry has a null defining access.
1166  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1167  if (UseOptimizedChain && MUD->isOptimized())
1168  MA = MUD->getOptimized();
1169  else
1170  MA = MUD->getDefiningAccess();
1171  } else {
1172  MA = nullptr;
1173  }
1174 
1175  return *this;
1176  }
1177 
1178  bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1179 
1180 private:
1181  T MA;
1182 };
1183 
1184 template <class T>
1186 def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1187 #ifdef EXPENSIVE_CHECKS
1188  assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
1189  "UpTo isn't in the def chain!");
1190 #endif
1192 }
1193 
1194 template <class T>
1197  def_chain_iterator<T, true>(nullptr));
1198 }
1199 
1200 } // end namespace llvm
1201 
1202 #endif // LLVM_ANALYSIS_MEMORYSSA_H
uint64_t CallInst * C
MemorySSA * MSSA
Definition: MemorySSA.h:946
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:699
memoryaccess_def_iterator & operator++()
Definition: MemorySSA.h:1004
BasicBlock * getIncomingBlock(MemoryAccess::const_user_iterator I) const
Return incoming basic block corresponding to value use iterator.
Definition: MemorySSA.h:504
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getValueID() const
Return an ID for the concrete type of this object.
Definition: Value.h:465
virtual void verify(const MemorySSA *MSSA)
Definition: MemorySSA.h:941
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
Result(std::unique_ptr< MemorySSA > &&MSSA)
Definition: MemorySSA.h:824
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list...
Definition: MemorySSA.h:175
BasicBlock * getIncomingBlock(const Use &U) const
Return incoming basic block corresponding to an operand of the PHI.
Definition: MemorySSA.h:497
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:246
MemorySSAPrinterPass(raw_ostream &OS)
Definition: MemorySSA.h:839
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
void resetOptimized()
Definition: MemorySSA.h:324
const MemorySSA & getMSSA() const
Definition: MemorySSA.h:859
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess&#39;s for a given basic block.
Definition: MemorySSA.h:656
BasicBlock *const * const_block_iterator
Definition: MemorySSA.h:448
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:372
Extension point for the Value hierarchy.
Definition: DerivedUser.h:28
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:350
BaseT::iterator::reference operator*() const
Definition: MemorySSA.h:1088
void setOptimized(MemoryAccess *)
Definition: MemorySSA.h:593
BasicBlock * getPhiArgBlock() const
Definition: MemorySSA.h:988
memoryaccess_def_iterator defs_begin()
This iterator walks over all of the defs in a given MemoryAccess.
Definition: MemorySSA.h:1022
unsigned second
F(f)
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: MemorySSA.h:525
void(*)(DerivedUser *) DeleteValueTy
Definition: DerivedUser.h:30
This defines the Use class.
AllAccessType::const_self_iterator getIterator() const
Definition: MemorySSA.h:178
DefsOnlyType::self_iterator getDefsIterator()
Definition: MemorySSA.h:187
void setIncomingValue(unsigned I, MemoryAccess *V)
Definition: MemorySSA.h:484
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
Represents read-only accesses to memory.
Definition: MemorySSA.h:295
This class is a batch walker of all MemoryUse&#39;s in the program, and points their defining access at t...
Definition: MemorySSA.cpp:1092
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:850
Definition: BitVector.h:920
void resetOptimized()
Reset the ID of what this MemoryUse was optimized to, causing it to be rewalked by the walker if nece...
Definition: MemorySSA.h:600
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
Definition: MemorySSA.h:718
block_iterator block_begin()
Definition: MemorySSA.h:450
A MemorySSAWalker that does AA walks to disambiguate accesses.
Definition: MemorySSA.cpp:890
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:612
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef&#39;s and MemoryPhi&#39;s for a given basic block.
Definition: MemorySSA.h:664
const_block_iterator block_end() const
Definition: MemorySSA.h:463
bool isOptimized() const
Definition: MemorySSA.h:374
Walks the defining accesses of MemoryDefs.
Definition: MemorySSA.h:1156
def_chain_iterator & operator++()
Definition: MemorySSA.h:1164
bool isOptimized() const
Definition: MemorySSA.h:581
MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
Definition: MemorySSA.h:267
BaseT::iterator::pointer operator*() const
Definition: MemorySSA.h:994
static bool classof(const Value *MA)
Definition: MemorySSA.h:363
iterator_range< block_iterator > blocks()
Definition: MemorySSA.h:467
A simple intrusive list implementation.
Definition: simple_ilist.h:79
Key
PAL metadata keys.
#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS)
Macro for generating out-of-class operand accessor definitions.
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:41
static ChildIteratorType child_end(NodeRef N)
Definition: MemorySSA.h:1055
static int getID(struct InternalInstruction *insn, const void *miiArg)
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:320
AllAccessType::const_reverse_self_iterator getReverseIterator() const
Definition: MemorySSA.h:184
upward_defs_iterator upward_defs_end()
Definition: MemorySSA.h:1135
std::unique_ptr< MemorySSA > MSSA
Definition: MemorySSA.h:828
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:365
This is the generic walker interface for walkers of MemorySSA.
Definition: MemorySSA.h:881
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:68
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:140
op_range incoming_values()
Definition: MemorySSA.h:475
memoryaccess_def_iterator defs_end()
Definition: MemorySSA.h:1030
unsigned getID() const
Definition: MemorySSA.h:544
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
Definition: MemorySSA.h:483
const_block_iterator block_begin() const
Definition: MemorySSA.h:455
An assembly annotator class to print Memory SSA information in comments.
Definition: MemorySSA.cpp:87
static bool runOnFunction(Function &F, bool PostInlining)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const
Definition: MemorySSA.h:196
AllAccessType::reverse_self_iterator getReverseIterator()
Definition: MemorySSA.h:181
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
PointerIntPair - This class implements a pair of a pointer and small integer.
PHITransAddr - An address value which tracks and handles phi translation.
Definition: PHITransAddr.h:36
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
const_op_range incoming_values() const
Definition: MemorySSA.h:477
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
upward_defs_iterator & operator++()
Definition: MemorySSA.h:1095
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: MemorySSA.h:514
early cse Early CSE w MemorySSA
Definition: EarlyCSE.cpp:1154
A CRTP mix-in that provides informational APIs needed for analysis passes.
Definition: PassManager.h:382
static unsigned getIncomingValueNumForOperand(unsigned I)
Definition: MemorySSA.h:490
void setIncomingBlock(unsigned I, BasicBlock *BB)
Definition: MemorySSA.h:508
upward_defs_iterator(const MemoryAccessPair &Info)
Definition: MemorySSA.h:1073
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:686
Represent the analysis usage information of a pass.
iterator_range< def_chain_iterator< T, true > > optimized_def_chain(T MA)
Definition: MemorySSA.h:1195
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:1844
Printer pass for MemorySSA.
Definition: MemorySSA.h:835
static const unsigned End
static unsigned getOperandNumForIncomingValue(unsigned I)
Definition: MemorySSA.h:489
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
static bool classof(const Value *MA)
Definition: MemorySSA.h:305
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
BasicBlock * getPhiArgBlock() const
Definition: MemorySSA.h:1104
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Definition: MemorySSA.h:939
Iterator base class used to implement const and non-const iterators over the defining accesses of a M...
Definition: MemorySSA.h:127
#define DECLARE_TRANSPARENT_OPERAND_ACCESSORS(VALUECLASS)
Macro for generating in-class operand accessor declarations.
Provide an iterator that walks defs, giving both the memory access, and the current pointer location...
Definition: MemorySSA.h:1066
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:834
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:403
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:587
A MemorySSAWalker that does no alias queries, or anything else.
Definition: MemorySSA.h:951
std::pair< const MemoryAccess *, MemoryLocation > ConstMemoryAccessPair
Definition: MemorySSA.h:963
unsigned first
bool operator==(const memoryaccess_def_iterator_base &Other) const
Definition: MemorySSA.h:978
bool isOptimized() const
Definition: MemorySSA.h:316
Representation for a specific memory location.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Iterator for intrusive lists based on ilist_node.
static bool classof(const Value *V)
Definition: MemorySSA.h:538
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
Module.h This file contains the declarations for the Module class.
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:814
BasicBlock * getBlock() const
Definition: MemorySSA.h:156
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:640
Verifier pass for MemorySSA.
Definition: MemorySSA.h:845
A range adaptor for a pair of iterators.
MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds=0)
Definition: MemorySSA.h:439
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:236
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: MemorySSA.h:480
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
Definition: MemorySSA.h:493
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:243
iterator_range< const_block_iterator > blocks() const
Definition: MemorySSA.h:471
user_iterator_impl< const User > const_user_iterator
Definition: Value.h:371
DefsOnlyType::const_self_iterator getDefsIterator() const
Definition: MemorySSA.h:190
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
Definition: MemorySSA.h:1186
memoryaccess_def_iterator_base< MemoryAccess > memoryaccess_def_iterator
Definition: MemorySSA.h:128
block_iterator block_end()
Definition: MemorySSA.h:461
bool operator==(const def_chain_iterator &O) const
Definition: MemorySSA.h:1178
This file provides utility analysis objects describing memory locations.
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
Definition: MemorySSA.h:273
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
user_iterator_impl< User > user_iterator
Definition: Value.h:370
Compile-time customization of User operands.
Definition: User.h:43
MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, BasicBlock *BB, unsigned NumOperands)
Definition: MemorySSA.h:215
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:636
raw_ostream & operator<<(raw_ostream &OS, const APInt &I)
Definition: APInt.h:2018
static ChildIteratorType child_begin(NodeRef N)
Definition: MemorySSA.h:1054
MemoryAccess * getClobberingMemoryAccess(const Instruction *I)
Given a memory Mod/Ref/ModRef&#39;ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:910
static ChildIteratorType child_begin(NodeRef N)
Definition: MemorySSA.h:1045
static bool classof(const Value *MA)
Definition: MemorySSA.h:248
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:377
void setOptimized(MemoryAccess *MA)
Definition: MemorySSA.h:367
static NodeRef getEntryNode(NodeRef N)
Definition: MemorySSA.h:1044
LLVM Value Representation.
Definition: Value.h:73
MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, unsigned Ver)
Definition: MemorySSA.h:356
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
Definition: MemorySSA.h:193
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair)
Definition: MemorySSA.h:1131
HungoffOperandTraits - determine the allocation regime of the Use array when it is not a prefix to th...
Definition: OperandTraits.h:96
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:41
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
Definition: MemorySSA.h:573
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:44
const_user_iterator const_iterator
Definition: MemorySSA.h:163
IRTranslator LLVM IR MI
FixedNumOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:31
A container for analyses that lazily runs them and caches their results.
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Definition: MemorySSA.h:209
MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
Definition: MemorySSA.h:299
static bool classof(const Value *V)
Definition: MemorySSA.h:151
iterator_range< upward_defs_iterator > upward_defs(const MemoryAccessPair &Pair)
Definition: MemorySSA.h:1138
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:431
unsigned getID() const
Definition: MemorySSA.h:385
static ChildIteratorType child_end(NodeRef N)
Definition: MemorySSA.h:1046
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition: MemorySSA.h:705
user_iterator iterator
The user iterators for a memory access.
Definition: MemorySSA.h:162
memoryaccess_def_iterator_base< const MemoryAccess > const_memoryaccess_def_iterator
Definition: MemorySSA.h:130
bool operator==(const upward_defs_iterator &Other) const
Definition: MemorySSA.h:1084
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:70
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Definition: MemorySSA.h:532
void resetOptimized()
Definition: MemorySSA.h:379
void setOptimized(MemoryAccess *DMA)
Definition: MemorySSA.h:311
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
Definition: MemorySSA.h:962
void allocHungoffUses(unsigned N)
this is more complicated than the generic User::allocHungoffUses, because we have to allocate Uses fo...
Definition: MemorySSA.h:552
user_iterator user_end()
Definition: Value.h:385