LLVM  9.0.0svn
MemorySSAUpdater.cpp
Go to the documentation of this file.
1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSAUpdater class.
10 //
11 //===----------------------------------------------------------------===//
13 #include "llvm/ADT/STLExtras.h"
14 #include "llvm/ADT/SetVector.h"
15 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/GlobalVariable.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/IR/Metadata.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Support/Debug.h"
27 #include <algorithm>
28 
29 #define DEBUG_TYPE "memoryssa"
30 using namespace llvm;
31 
32 // This is the marker algorithm from "Simple and Efficient Construction of
33 // Static Single Assignment Form"
34 // The simple, non-marker algorithm places phi nodes at any join
35 // Here, we place markers, and only place phi nodes if they end up necessary.
36 // They are only necessary if they break a cycle (IE we recursively visit
37 // ourselves again), or we discover, while getting the value of the operands,
38 // that there are two or more definitions needing to be merged.
39 // This still will leave non-minimal form in the case of irreducible control
40 // flow, where phi nodes may be in cycles with themselves, but unnecessary.
41 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
42  BasicBlock *BB,
43  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
44  // First, do a cache lookup. Without this cache, certain CFG structures
45  // (like a series of if statements) take exponential time to visit.
46  auto Cached = CachedPreviousDef.find(BB);
47  if (Cached != CachedPreviousDef.end()) {
48  return Cached->second;
49  }
50 
51  if (BasicBlock *Pred = BB->getSinglePredecessor()) {
52  // Single predecessor case, just recurse, we can only have one definition.
53  MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
54  CachedPreviousDef.insert({BB, Result});
55  return Result;
56  }
57 
58  if (VisitedBlocks.count(BB)) {
59  // We hit our node again, meaning we had a cycle, we must insert a phi
60  // node to break it so we have an operand. The only case this will
61  // insert useless phis is if we have irreducible control flow.
62  MemoryAccess *Result = MSSA->createMemoryPhi(BB);
63  CachedPreviousDef.insert({BB, Result});
64  return Result;
65  }
66 
67  if (VisitedBlocks.insert(BB).second) {
68  // Mark us visited so we can detect a cycle
70 
71  // Recurse to get the values in our predecessors for placement of a
72  // potential phi node. This will insert phi nodes if we cycle in order to
73  // break the cycle and have an operand.
74  for (auto *Pred : predecessors(BB))
75  if (MSSA->DT->isReachableFromEntry(Pred))
76  PhiOps.push_back(getPreviousDefFromEnd(Pred, CachedPreviousDef));
77  else
78  PhiOps.push_back(MSSA->getLiveOnEntryDef());
79 
80  // Now try to simplify the ops to avoid placing a phi.
81  // This may return null if we never created a phi yet, that's okay
82  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
83 
84  // See if we can avoid the phi by simplifying it.
85  auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
86  // If we couldn't simplify, we may have to create a phi
87  if (Result == Phi) {
88  if (!Phi)
89  Phi = MSSA->createMemoryPhi(BB);
90 
91  // See if the existing phi operands match what we need.
92  // Unlike normal SSA, we only allow one phi node per block, so we can't just
93  // create a new one.
94  if (Phi->getNumOperands() != 0) {
95  // FIXME: Figure out whether this is dead code and if so remove it.
96  if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
97  // These will have been filled in by the recursive read we did above.
98  llvm::copy(PhiOps, Phi->op_begin());
99  std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
100  }
101  } else {
102  unsigned i = 0;
103  for (auto *Pred : predecessors(BB))
104  Phi->addIncoming(&*PhiOps[i++], Pred);
105  InsertedPHIs.push_back(Phi);
106  }
107  Result = Phi;
108  }
109 
110  // Set ourselves up for the next variable by resetting visited state.
111  VisitedBlocks.erase(BB);
112  CachedPreviousDef.insert({BB, Result});
113  return Result;
114  }
115  llvm_unreachable("Should have hit one of the three cases above");
116 }
117 
118 // This starts at the memory access, and goes backwards in the block to find the
119 // previous definition. If a definition is not found the block of the access,
120 // it continues globally, creating phi nodes to ensure we have a single
121 // definition.
122 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
123  if (auto *LocalResult = getPreviousDefInBlock(MA))
124  return LocalResult;
126  return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
127 }
128 
129 // This starts at the memory access, and goes backwards in the block to the find
130 // the previous definition. If the definition is not found in the block of the
131 // access, it returns nullptr.
132 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
133  auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
134 
135  // It's possible there are no defs, or we got handed the first def to start.
136  if (Defs) {
137  // If this is a def, we can just use the def iterators.
138  if (!isa<MemoryUse>(MA)) {
139  auto Iter = MA->getReverseDefsIterator();
140  ++Iter;
141  if (Iter != Defs->rend())
142  return &*Iter;
143  } else {
144  // Otherwise, have to walk the all access iterator.
145  auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
146  for (auto &U : make_range(++MA->getReverseIterator(), End))
147  if (!isa<MemoryUse>(U))
148  return cast<MemoryAccess>(&U);
149  // Note that if MA comes before Defs->begin(), we won't hit a def.
150  return nullptr;
151  }
152  }
153  return nullptr;
154 }
155 
156 // This starts at the end of block
157 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
158  BasicBlock *BB,
159  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
160  auto *Defs = MSSA->getWritableBlockDefs(BB);
161 
162  if (Defs) {
163  CachedPreviousDef.insert({BB, &*Defs->rbegin()});
164  return &*Defs->rbegin();
165  }
166 
167  return getPreviousDefRecursive(BB, CachedPreviousDef);
168 }
169 // Recurse over a set of phi uses to eliminate the trivial ones
170 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
171  if (!Phi)
172  return nullptr;
173  TrackingVH<MemoryAccess> Res(Phi);
175  std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
176  for (auto &U : Uses) {
177  if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U)) {
178  auto OperRange = UsePhi->operands();
179  tryRemoveTrivialPhi(UsePhi, OperRange);
180  }
181  }
182  return Res;
183 }
184 
185 // Eliminate trivial phis
186 // Phis are trivial if they are defined either by themselves, or all the same
187 // argument.
188 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
189 // We recursively try to remove them.
190 template <class RangeType>
191 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
192  RangeType &Operands) {
193  // Bail out on non-opt Phis.
194  if (NonOptPhis.count(Phi))
195  return Phi;
196 
197  // Detect equal or self arguments
198  MemoryAccess *Same = nullptr;
199  for (auto &Op : Operands) {
200  // If the same or self, good so far
201  if (Op == Phi || Op == Same)
202  continue;
203  // not the same, return the phi since it's not eliminatable by us
204  if (Same)
205  return Phi;
206  Same = cast<MemoryAccess>(&*Op);
207  }
208  // Never found a non-self reference, the phi is undef
209  if (Same == nullptr)
210  return MSSA->getLiveOnEntryDef();
211  if (Phi) {
212  Phi->replaceAllUsesWith(Same);
213  removeMemoryAccess(Phi);
214  }
215 
216  // We should only end up recursing in case we replaced something, in which
217  // case, we may have made other Phis trivial.
218  return recursePhi(Same);
219 }
220 
222  InsertedPHIs.clear();
223  MU->setDefiningAccess(getPreviousDef(MU));
224  // Unlike for defs, there is no extra work to do. Because uses do not create
225  // new may-defs, there are only two cases:
226  //
227  // 1. There was a def already below us, and therefore, we should not have
228  // created a phi node because it was already needed for the def.
229  //
230  // 2. There is no def below us, and therefore, there is no extra renaming work
231  // to do.
232 }
233 
234 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
235 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
236  MemoryAccess *NewDef) {
237  // Replace any operand with us an incoming block with the new defining
238  // access.
239  int i = MP->getBasicBlockIndex(BB);
240  assert(i != -1 && "Should have found the basic block in the phi");
241  // We can't just compare i against getNumOperands since one is signed and the
242  // other not. So use it to index into the block iterator.
243  for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
244  ++BBIter) {
245  if (*BBIter != BB)
246  break;
247  MP->setIncomingValue(i, NewDef);
248  ++i;
249  }
250 }
251 
252 // A brief description of the algorithm:
253 // First, we compute what should define the new def, using the SSA
254 // construction algorithm.
255 // Then, we update the defs below us (and any new phi nodes) in the graph to
256 // point to the correct new defs, to ensure we only have one variable, and no
257 // disconnected stores.
258 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
259  InsertedPHIs.clear();
260 
261  // See if we had a local def, and if not, go hunting.
262  MemoryAccess *DefBefore = getPreviousDef(MD);
263  bool DefBeforeSameBlock = DefBefore->getBlock() == MD->getBlock();
264 
265  // There is a def before us, which means we can replace any store/phi uses
266  // of that thing with us, since we are in the way of whatever was there
267  // before.
268  // We now define that def's memorydefs and memoryphis
269  if (DefBeforeSameBlock) {
270  for (auto UI = DefBefore->use_begin(), UE = DefBefore->use_end();
271  UI != UE;) {
272  Use &U = *UI++;
273  // Leave the MemoryUses alone.
274  // Also make sure we skip ourselves to avoid self references.
275  if (isa<MemoryUse>(U.getUser()) || U.getUser() == MD)
276  continue;
277  // Defs are automatically unoptimized when the user is set to MD below,
278  // because the isOptimized() call will fail to find the same ID.
279  U.set(MD);
280  }
281  }
282 
283  // and that def is now our defining access.
284  MD->setDefiningAccess(DefBefore);
285 
286  // Remember the index where we may insert new phis below.
287  unsigned NewPhiIndex = InsertedPHIs.size();
288 
289  SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
290  if (!DefBeforeSameBlock) {
291  // If there was a local def before us, we must have the same effect it
292  // did. Because every may-def is the same, any phis/etc we would create, it
293  // would also have created. If there was no local def before us, we
294  // performed a global update, and have to search all successors and make
295  // sure we update the first def in each of them (following all paths until
296  // we hit the first def along each path). This may also insert phi nodes.
297  // TODO: There are other cases we can skip this work, such as when we have a
298  // single successor, and only used a straight line of single pred blocks
299  // backwards to find the def. To make that work, we'd have to track whether
300  // getDefRecursive only ever used the single predecessor case. These types
301  // of paths also only exist in between CFG simplifications.
302 
303  // If this is the first def in the block and this insert is in an arbitrary
304  // place, compute IDF and place phis.
305  auto Iter = MD->getDefsIterator();
306  ++Iter;
307  auto IterEnd = MSSA->getBlockDefs(MD->getBlock())->end();
308  if (Iter == IterEnd) {
309  ForwardIDFCalculator IDFs(*MSSA->DT);
311  SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
312  DefiningBlocks.insert(MD->getBlock());
313  IDFs.setDefiningBlocks(DefiningBlocks);
314  IDFs.calculate(IDFBlocks);
315  SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
316  for (auto *BBIDF : IDFBlocks)
317  if (!MSSA->getMemoryAccess(BBIDF)) {
318  auto *MPhi = MSSA->createMemoryPhi(BBIDF);
319  NewInsertedPHIs.push_back(MPhi);
320  // Add the phis created into the IDF blocks to NonOptPhis, so they are
321  // not optimized out as trivial by the call to getPreviousDefFromEnd
322  // below. Once they are complete, all these Phis are added to the
323  // FixupList, and removed from NonOptPhis inside fixupDefs().
324  NonOptPhis.insert(MPhi);
325  }
326 
327  for (auto &MPhi : NewInsertedPHIs) {
328  auto *BBIDF = MPhi->getBlock();
329  for (auto *Pred : predecessors(BBIDF)) {
331  MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef),
332  Pred);
333  }
334  }
335 
336  // Re-take the index where we're adding the new phis, because the above
337  // call to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
338  NewPhiIndex = InsertedPHIs.size();
339  for (auto &MPhi : NewInsertedPHIs) {
340  InsertedPHIs.push_back(&*MPhi);
341  FixupList.push_back(&*MPhi);
342  }
343  }
344 
345  FixupList.push_back(MD);
346  }
347 
348  // Remember the index where we stopped inserting new phis above, since the
349  // fixupDefs call in the loop below may insert more, that are already minimal.
350  unsigned NewPhiIndexEnd = InsertedPHIs.size();
351 
352  while (!FixupList.empty()) {
353  unsigned StartingPHISize = InsertedPHIs.size();
354  fixupDefs(FixupList);
355  FixupList.clear();
356  // Put any new phis on the fixup list, and process them
357  FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
358  }
359 
360  // Optimize potentially non-minimal phis added in this method.
361  unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
362  if (NewPhiSize)
363  tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
364 
365  // Now that all fixups are done, rename all uses if we are asked.
366  if (RenameUses) {
368  BasicBlock *StartBlock = MD->getBlock();
369  // We are guaranteed there is a def in the block, because we just got it
370  // handed to us in this function.
371  MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
372  // Convert to incoming value if it's a memorydef. A phi *is* already an
373  // incoming value.
374  if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
375  FirstDef = MD->getDefiningAccess();
376 
377  MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
378  // We just inserted a phi into this block, so the incoming value will become
379  // the phi anyway, so it does not matter what we pass.
380  for (auto &MP : InsertedPHIs) {
381  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
382  if (Phi)
383  MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
384  }
385  }
386 }
387 
388 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
391  for (auto &Var : Vars) {
392  MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
393  if (!NewDef)
394  continue;
395  // First, see if there is a local def after the operand.
396  auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
397  auto DefIter = NewDef->getDefsIterator();
398 
399  // The temporary Phi is being fixed, unmark it for not to optimize.
400  if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
401  NonOptPhis.erase(Phi);
402 
403  // If there is a local def after us, we only have to rename that.
404  if (++DefIter != Defs->end()) {
405  cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
406  continue;
407  }
408 
409  // Otherwise, we need to search down through the CFG.
410  // For each of our successors, handle it directly if their is a phi, or
411  // place on the fixup worklist.
412  for (const auto *S : successors(NewDef->getBlock())) {
413  if (auto *MP = MSSA->getMemoryAccess(S))
414  setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
415  else
416  Worklist.push_back(S);
417  }
418 
419  while (!Worklist.empty()) {
420  const BasicBlock *FixupBlock = Worklist.back();
421  Worklist.pop_back();
422 
423  // Get the first def in the block that isn't a phi node.
424  if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
425  auto *FirstDef = &*Defs->begin();
426  // The loop above and below should have taken care of phi nodes
427  assert(!isa<MemoryPhi>(FirstDef) &&
428  "Should have already handled phi nodes!");
429  // We are now this def's defining access, make sure we actually dominate
430  // it
431  assert(MSSA->dominates(NewDef, FirstDef) &&
432  "Should have dominated the new access");
433 
434  // This may insert new phi nodes, because we are not guaranteed the
435  // block we are processing has a single pred, and depending where the
436  // store was inserted, it may require phi nodes below it.
437  cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
438  return;
439  }
440  // We didn't find a def, so we must continue.
441  for (const auto *S : successors(FixupBlock)) {
442  // If there is a phi node, handle it.
443  // Otherwise, put the block on the worklist
444  if (auto *MP = MSSA->getMemoryAccess(S))
445  setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
446  else {
447  // If we cycle, we should have ended up at a phi node that we already
448  // processed. FIXME: Double check this
449  if (!Seen.insert(S).second)
450  continue;
451  Worklist.push_back(S);
452  }
453  }
454  }
455  }
456 }
457 
459  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
460  MPhi->unorderedDeleteIncomingBlock(From);
461  if (MPhi->getNumIncomingValues() == 1)
462  removeMemoryAccess(MPhi);
463  }
464 }
465 
467  const BasicBlock *To) {
468  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
469  bool Found = false;
470  MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
471  if (From != B)
472  return false;
473  if (Found)
474  return true;
475  Found = true;
476  return false;
477  });
478  if (MPhi->getNumIncomingValues() == 1)
479  removeMemoryAccess(MPhi);
480  }
481 }
482 
483 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
484  const ValueToValueMapTy &VMap,
485  PhiToDefMap &MPhiMap) {
486  auto GetNewDefiningAccess = [&](MemoryAccess *MA) -> MemoryAccess * {
487  MemoryAccess *InsnDefining = MA;
488  if (MemoryUseOrDef *DefMUD = dyn_cast<MemoryUseOrDef>(InsnDefining)) {
489  if (!MSSA->isLiveOnEntryDef(DefMUD)) {
490  Instruction *DefMUDI = DefMUD->getMemoryInst();
491  assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
492  if (Instruction *NewDefMUDI =
493  cast_or_null<Instruction>(VMap.lookup(DefMUDI)))
494  InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
495  }
496  } else {
497  MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
498  if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
499  InsnDefining = NewDefPhi;
500  }
501  assert(InsnDefining && "Defining instruction cannot be nullptr.");
502  return InsnDefining;
503  };
504 
505  const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
506  if (!Acc)
507  return;
508  for (const MemoryAccess &MA : *Acc) {
509  if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
510  Instruction *Insn = MUD->getMemoryInst();
511  // Entry does not exist if the clone of the block did not clone all
512  // instructions. This occurs in LoopRotate when cloning instructions
513  // from the old header to the old preheader. The cloned instruction may
514  // also be a simplified Value, not an Instruction (see LoopRotate).
515  if (Instruction *NewInsn =
516  dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
517  MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
518  NewInsn, GetNewDefiningAccess(MUD->getDefiningAccess()), MUD);
519  MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
520  }
521  }
522  }
523 }
524 
526  BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
527  auto *MPhi = MSSA->getMemoryAccess(Header);
528  if (!MPhi)
529  return;
530 
531  // Create phi node in the backedge block and populate it with the same
532  // incoming values as MPhi. Skip incoming values coming from Preheader.
533  auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
534  bool HasUniqueIncomingValue = true;
535  MemoryAccess *UniqueValue = nullptr;
536  for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
537  BasicBlock *IBB = MPhi->getIncomingBlock(I);
538  MemoryAccess *IV = MPhi->getIncomingValue(I);
539  if (IBB != Preheader) {
540  NewMPhi->addIncoming(IV, IBB);
541  if (HasUniqueIncomingValue) {
542  if (!UniqueValue)
543  UniqueValue = IV;
544  else if (UniqueValue != IV)
545  HasUniqueIncomingValue = false;
546  }
547  }
548  }
549 
550  // Update incoming edges into MPhi. Remove all but the incoming edge from
551  // Preheader. Add an edge from NewMPhi
552  auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
553  MPhi->setIncomingValue(0, AccFromPreheader);
554  MPhi->setIncomingBlock(0, Preheader);
555  for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
556  MPhi->unorderedDeleteIncoming(I);
557  MPhi->addIncoming(NewMPhi, BEBlock);
558 
559  // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
560  // replaced with the unique value.
561  if (HasUniqueIncomingValue)
562  removeMemoryAccess(NewMPhi);
563 }
564 
566  ArrayRef<BasicBlock *> ExitBlocks,
567  const ValueToValueMapTy &VMap,
568  bool IgnoreIncomingWithNoClones) {
569  PhiToDefMap MPhiMap;
570 
571  auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
572  assert(Phi && NewPhi && "Invalid Phi nodes.");
573  BasicBlock *NewPhiBB = NewPhi->getBlock();
574  SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
575  pred_end(NewPhiBB));
576  for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
577  MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
578  BasicBlock *IncBB = Phi->getIncomingBlock(It);
579 
580  if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
581  IncBB = NewIncBB;
582  else if (IgnoreIncomingWithNoClones)
583  continue;
584 
585  // Now we have IncBB, and will need to add incoming from it to NewPhi.
586 
587  // If IncBB is not a predecessor of NewPhiBB, then do not add it.
588  // NewPhiBB was cloned without that edge.
589  if (!NewPhiBBPreds.count(IncBB))
590  continue;
591 
592  // Determine incoming value and add it as incoming from IncBB.
593  if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
594  if (!MSSA->isLiveOnEntryDef(IncMUD)) {
595  Instruction *IncI = IncMUD->getMemoryInst();
596  assert(IncI && "Found MemoryUseOrDef with no Instruction.");
597  if (Instruction *NewIncI =
598  cast_or_null<Instruction>(VMap.lookup(IncI))) {
599  IncMUD = MSSA->getMemoryAccess(NewIncI);
600  assert(IncMUD &&
601  "MemoryUseOrDef cannot be null, all preds processed.");
602  }
603  }
604  NewPhi->addIncoming(IncMUD, IncBB);
605  } else {
606  MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
607  if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
608  NewPhi->addIncoming(NewDefPhi, IncBB);
609  else
610  NewPhi->addIncoming(IncPhi, IncBB);
611  }
612  }
613  };
614 
615  auto ProcessBlock = [&](BasicBlock *BB) {
616  BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
617  if (!NewBlock)
618  return;
619 
620  assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
621  "Cloned block should have no accesses");
622 
623  // Add MemoryPhi.
624  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
625  MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
626  MPhiMap[MPhi] = NewPhi;
627  }
628  // Update Uses and Defs.
629  cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
630  };
631 
632  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
633  ProcessBlock(BB);
634 
635  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
636  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
637  if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
638  FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
639 }
640 
642  BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
643  // All defs/phis from outside BB that are used in BB, are valid uses in P1.
644  // Since those defs/phis must have dominated BB, and also dominate P1.
645  // Defs from BB being used in BB will be replaced with the cloned defs from
646  // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
647  // incoming def into the Phi from P1.
648  PhiToDefMap MPhiMap;
649  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
650  MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
651  cloneUsesAndDefs(BB, P1, VM, MPhiMap);
652 }
653 
654 template <typename Iter>
655 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
656  ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
657  DominatorTree &DT) {
659  // Update/insert phis in all successors of exit blocks.
660  for (auto *Exit : ExitBlocks)
661  for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
662  if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
663  BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
664  Updates.push_back({DT.Insert, NewExit, ExitSucc});
665  }
666  applyInsertUpdates(Updates, DT);
667 }
668 
670  ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
671  DominatorTree &DT) {
672  const ValueToValueMapTy *const Arr[] = {&VMap};
673  privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
674  std::end(Arr), DT);
675 }
676 
678  ArrayRef<BasicBlock *> ExitBlocks,
679  ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
680  auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
681  return I.get();
682  };
683  using MappedIteratorType =
685  decltype(GetPtr)>;
686  auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
687  auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
688  privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
689 }
690 
692  DominatorTree &DT) {
693  SmallVector<CFGUpdate, 4> RevDeleteUpdates;
694  SmallVector<CFGUpdate, 4> InsertUpdates;
695  for (auto &Update : Updates) {
696  if (Update.getKind() == DT.Insert)
697  InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
698  else
699  RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
700  }
701 
702  if (!RevDeleteUpdates.empty()) {
703  // Update for inserted edges: use newDT and snapshot CFG as if deletes had
704  // not occurred.
705  // FIXME: This creates a new DT, so it's more expensive to do mix
706  // delete/inserts vs just inserts. We can do an incremental update on the DT
707  // to revert deletes, than re-delete the edges. Teaching DT to do this, is
708  // part of a pending cleanup.
709  DominatorTree NewDT(DT, RevDeleteUpdates);
710  GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
711  applyInsertUpdates(InsertUpdates, NewDT, &GD);
712  } else {
714  applyInsertUpdates(InsertUpdates, DT, &GD);
715  }
716 
717  // Update for deleted edges
718  for (auto &Update : RevDeleteUpdates)
719  removeEdge(Update.getFrom(), Update.getTo());
720 }
721 
723  DominatorTree &DT) {
725  applyInsertUpdates(Updates, DT, &GD);
726 }
727 
729  DominatorTree &DT,
730  const GraphDiff<BasicBlock *> *GD) {
731  // Get recursive last Def, assuming well formed MSSA and updated DT.
732  auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
733  while (true) {
734  MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
735  // Return last Def or Phi in BB, if it exists.
736  if (Defs)
737  return &*(--Defs->end());
738 
739  // Check number of predecessors, we only care if there's more than one.
740  unsigned Count = 0;
741  BasicBlock *Pred = nullptr;
742  for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
743  Pred = Pair.second;
744  Count++;
745  if (Count == 2)
746  break;
747  }
748 
749  // If BB has multiple predecessors, get last definition from IDom.
750  if (Count != 1) {
751  // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
752  // DT is invalidated. Return LoE as its last def. This will be added to
753  // MemoryPhi node, and later deleted when the block is deleted.
754  if (!DT.getNode(BB))
755  return MSSA->getLiveOnEntryDef();
756  if (auto *IDom = DT.getNode(BB)->getIDom())
757  if (IDom->getBlock() != BB) {
758  BB = IDom->getBlock();
759  continue;
760  }
761  return MSSA->getLiveOnEntryDef();
762  } else {
763  // Single predecessor, BB cannot be dead. GetLastDef of Pred.
764  assert(Count == 1 && Pred && "Single predecessor expected.");
765  BB = Pred;
766  }
767  };
768  llvm_unreachable("Unable to get last definition.");
769  };
770 
771  // Get nearest IDom given a set of blocks.
772  // TODO: this can be optimized by starting the search at the node with the
773  // lowest level (highest in the tree).
774  auto FindNearestCommonDominator =
775  [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
776  BasicBlock *PrevIDom = *BBSet.begin();
777  for (auto *BB : BBSet)
778  PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
779  return PrevIDom;
780  };
781 
782  // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
783  // include CurrIDom.
784  auto GetNoLongerDomBlocks =
785  [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
786  SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
787  if (PrevIDom == CurrIDom)
788  return;
789  BlocksPrevDom.push_back(PrevIDom);
790  BasicBlock *NextIDom = PrevIDom;
791  while (BasicBlock *UpIDom =
792  DT.getNode(NextIDom)->getIDom()->getBlock()) {
793  if (UpIDom == CurrIDom)
794  break;
795  BlocksPrevDom.push_back(UpIDom);
796  NextIDom = UpIDom;
797  }
798  };
799 
800  // Map a BB to its predecessors: added + previously existing. To get a
801  // deterministic order, store predecessors as SetVectors. The order in each
802  // will be defined by the order in Updates (fixed) and the order given by
803  // children<> (also fixed). Since we further iterate over these ordered sets,
804  // we lose the information of multiple edges possibly existing between two
805  // blocks, so we'll keep and EdgeCount map for that.
806  // An alternate implementation could keep unordered set for the predecessors,
807  // traverse either Updates or children<> each time to get the deterministic
808  // order, and drop the usage of EdgeCount. This alternate approach would still
809  // require querying the maps for each predecessor, and children<> call has
810  // additional computation inside for creating the snapshot-graph predecessors.
811  // As such, we favor using a little additional storage and less compute time.
812  // This decision can be revisited if we find the alternative more favorable.
813 
814  struct PredInfo {
817  };
819 
820  for (auto &Edge : Updates) {
821  BasicBlock *BB = Edge.getTo();
822  auto &AddedBlockSet = PredMap[BB].Added;
823  AddedBlockSet.insert(Edge.getFrom());
824  }
825 
826  // Store all existing predecessor for each BB, at least one must exist.
829  for (auto &BBPredPair : PredMap) {
830  auto *BB = BBPredPair.first;
831  const auto &AddedBlockSet = BBPredPair.second.Added;
832  auto &PrevBlockSet = BBPredPair.second.Prev;
833  for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
834  BasicBlock *Pi = Pair.second;
835  if (!AddedBlockSet.count(Pi))
836  PrevBlockSet.insert(Pi);
837  EdgeCountMap[{Pi, BB}]++;
838  }
839 
840  if (PrevBlockSet.empty()) {
841  assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
842  LLVM_DEBUG(
843  dbgs()
844  << "Adding a predecessor to a block with no predecessors. "
845  "This must be an edge added to a new, likely cloned, block. "
846  "Its memory accesses must be already correct, assuming completed "
847  "via the updateExitBlocksForClonedLoop API. "
848  "Assert a single such edge is added so no phi addition or "
849  "additional processing is required.\n");
850  assert(AddedBlockSet.size() == 1 &&
851  "Can only handle adding one predecessor to a new block.");
852  // Need to remove new blocks from PredMap. Remove below to not invalidate
853  // iterator here.
854  NewBlocks.insert(BB);
855  }
856  }
857  // Nothing to process for new/cloned blocks.
858  for (auto *BB : NewBlocks)
859  PredMap.erase(BB);
860 
861  SmallVector<BasicBlock *, 8> BlocksToProcess;
862  SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
863 
864  // First create MemoryPhis in all blocks that don't have one. Create in the
865  // order found in Updates, not in PredMap, to get deterministic numbering.
866  for (auto &Edge : Updates) {
867  BasicBlock *BB = Edge.getTo();
868  if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
869  MSSA->createMemoryPhi(BB);
870  }
871 
872  // Now we'll fill in the MemoryPhis with the right incoming values.
873  for (auto &BBPredPair : PredMap) {
874  auto *BB = BBPredPair.first;
875  const auto &PrevBlockSet = BBPredPair.second.Prev;
876  const auto &AddedBlockSet = BBPredPair.second.Added;
877  assert(!PrevBlockSet.empty() &&
878  "At least one previous predecessor must exist.");
879 
880  // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
881  // keeping this map before the loop. We can reuse already populated entries
882  // if an edge is added from the same predecessor to two different blocks,
883  // and this does happen in rotate. Note that the map needs to be updated
884  // when deleting non-necessary phis below, if the phi is in the map by
885  // replacing the value with DefP1.
887  for (auto *AddedPred : AddedBlockSet) {
888  auto *DefPn = GetLastDef(AddedPred);
889  assert(DefPn != nullptr && "Unable to find last definition.");
890  LastDefAddedPred[AddedPred] = DefPn;
891  }
892 
893  MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
894  // If Phi is not empty, add an incoming edge from each added pred. Must
895  // still compute blocks with defs to replace for this block below.
896  if (NewPhi->getNumOperands()) {
897  for (auto *Pred : AddedBlockSet) {
898  auto *LastDefForPred = LastDefAddedPred[Pred];
899  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
900  NewPhi->addIncoming(LastDefForPred, Pred);
901  }
902  } else {
903  // Pick any existing predecessor and get its definition. All other
904  // existing predecessors should have the same one, since no phi existed.
905  auto *P1 = *PrevBlockSet.begin();
906  MemoryAccess *DefP1 = GetLastDef(P1);
907 
908  // Check DefP1 against all Defs in LastDefPredPair. If all the same,
909  // nothing to add.
910  bool InsertPhi = false;
911  for (auto LastDefPredPair : LastDefAddedPred)
912  if (DefP1 != LastDefPredPair.second) {
913  InsertPhi = true;
914  break;
915  }
916  if (!InsertPhi) {
917  // Since NewPhi may be used in other newly added Phis, replace all uses
918  // of NewPhi with the definition coming from all predecessors (DefP1),
919  // before deleting it.
920  NewPhi->replaceAllUsesWith(DefP1);
921  removeMemoryAccess(NewPhi);
922  continue;
923  }
924 
925  // Update Phi with new values for new predecessors and old value for all
926  // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
927  // sets, the order of entries in NewPhi is deterministic.
928  for (auto *Pred : AddedBlockSet) {
929  auto *LastDefForPred = LastDefAddedPred[Pred];
930  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
931  NewPhi->addIncoming(LastDefForPred, Pred);
932  }
933  for (auto *Pred : PrevBlockSet)
934  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
935  NewPhi->addIncoming(DefP1, Pred);
936 
937  // Insert BB in the set of blocks that now have definition. We'll use this
938  // to compute IDF and add Phis there next.
939  BlocksToProcess.push_back(BB);
940  }
941 
942  // Get all blocks that used to dominate BB and no longer do after adding
943  // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
944  assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
945  BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
946  assert(PrevIDom && "Previous IDom should exists");
947  BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
948  assert(NewIDom && "BB should have a new valid idom");
949  assert(DT.dominates(NewIDom, PrevIDom) &&
950  "New idom should dominate old idom");
951  GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
952  }
953 
954  // Compute IDF and add Phis in all IDF blocks that do not have one.
956  if (!BlocksToProcess.empty()) {
957  ForwardIDFCalculator IDFs(DT);
958  SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
959  BlocksToProcess.end());
960  IDFs.setDefiningBlocks(DefiningBlocks);
961  IDFs.calculate(IDFBlocks);
962  for (auto *BBIDF : IDFBlocks) {
963  if (auto *IDFPhi = MSSA->getMemoryAccess(BBIDF)) {
964  // Update existing Phi.
965  // FIXME: some updates may be redundant, try to optimize and skip some.
966  for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
967  IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
968  } else {
969  IDFPhi = MSSA->createMemoryPhi(BBIDF);
970  for (auto &Pair : children<GraphDiffInvBBPair>({GD, BBIDF})) {
971  BasicBlock *Pi = Pair.second;
972  IDFPhi->addIncoming(GetLastDef(Pi), Pi);
973  }
974  }
975  }
976  }
977 
978  // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
979  // longer dominate, replace those with the closest dominating def.
980  // This will also update optimized accesses, as they're also uses.
981  for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
982  if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
983  for (auto &DefToReplaceUses : *DefsList) {
984  BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
985  Value::use_iterator UI = DefToReplaceUses.use_begin(),
986  E = DefToReplaceUses.use_end();
987  for (; UI != E;) {
988  Use &U = *UI;
989  ++UI;
991  if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
992  BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
993  if (!DT.dominates(DominatingBlock, DominatedBlock))
994  U.set(GetLastDef(DominatedBlock));
995  } else {
996  BasicBlock *DominatedBlock = Usr->getBlock();
997  if (!DT.dominates(DominatingBlock, DominatedBlock)) {
998  if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
999  U.set(DomBlPhi);
1000  else {
1001  auto *IDom = DT.getNode(DominatedBlock)->getIDom();
1002  assert(IDom && "Block must have a valid IDom.");
1003  U.set(GetLastDef(IDom->getBlock()));
1004  }
1005  cast<MemoryUseOrDef>(Usr)->resetOptimized();
1006  }
1007  }
1008  }
1009  }
1010  }
1011  }
1012 }
1013 
1014 // Move What before Where in the MemorySSA IR.
1015 template <class WhereType>
1016 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1017  WhereType Where) {
1018  // Mark MemoryPhi users of What not to be optimized.
1019  for (auto *U : What->users())
1020  if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
1021  NonOptPhis.insert(PhiUser);
1022 
1023  // Replace all our users with our defining access.
1024  What->replaceAllUsesWith(What->getDefiningAccess());
1025 
1026  // Let MemorySSA take care of moving it around in the lists.
1027  MSSA->moveTo(What, BB, Where);
1028 
1029  // Now reinsert it into the IR and do whatever fixups needed.
1030  if (auto *MD = dyn_cast<MemoryDef>(What))
1031  insertDef(MD);
1032  else
1033  insertUse(cast<MemoryUse>(What));
1034 
1035  // Clear dangling pointers. We added all MemoryPhi users, but not all
1036  // of them are removed by fixupDefs().
1037  NonOptPhis.clear();
1038 }
1039 
1040 // Move What before Where in the MemorySSA IR.
1042  moveTo(What, Where->getBlock(), Where->getIterator());
1043 }
1044 
1045 // Move What after Where in the MemorySSA IR.
1047  moveTo(What, Where->getBlock(), ++Where->getIterator());
1048 }
1049 
1051  MemorySSA::InsertionPlace Where) {
1052  return moveTo(What, BB, Where);
1053 }
1054 
1055 // All accesses in To used to be in From. Move to end and update access lists.
1056 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1057  Instruction *Start) {
1058 
1059  MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
1060  if (!Accs)
1061  return;
1062 
1063  MemoryAccess *FirstInNew = nullptr;
1064  for (Instruction &I : make_range(Start->getIterator(), To->end()))
1065  if ((FirstInNew = MSSA->getMemoryAccess(&I)))
1066  break;
1067  if (!FirstInNew)
1068  return;
1069 
1070  auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1071  do {
1072  auto NextIt = ++MUD->getIterator();
1073  MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1074  ? nullptr
1075  : cast<MemoryUseOrDef>(&*NextIt);
1076  MSSA->moveTo(MUD, To, MemorySSA::End);
1077  // Moving MUD from Accs in the moveTo above, may delete Accs, so we need to
1078  // retrieve it again.
1079  Accs = MSSA->getWritableBlockAccesses(From);
1080  MUD = NextMUD;
1081  } while (MUD);
1082 }
1083 
1085  BasicBlock *To,
1086  Instruction *Start) {
1087  assert(MSSA->getBlockAccesses(To) == nullptr &&
1088  "To block is expected to be free of MemoryAccesses.");
1089  moveAllAccesses(From, To, Start);
1090  for (BasicBlock *Succ : successors(To))
1091  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1092  MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1093 }
1094 
1096  Instruction *Start) {
1097  assert(From->getSinglePredecessor() == To &&
1098  "From block is expected to have a single predecessor (To).");
1099  moveAllAccesses(From, To, Start);
1100  for (BasicBlock *Succ : successors(From))
1101  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1102  MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1103 }
1104 
1105 /// If all arguments of a MemoryPHI are defined by the same incoming
1106 /// argument, return that argument.
1108  MemoryAccess *MA = nullptr;
1109 
1110  for (auto &Arg : MP->operands()) {
1111  if (!MA)
1112  MA = cast<MemoryAccess>(Arg);
1113  else if (MA != Arg)
1114  return nullptr;
1115  }
1116  return MA;
1117 }
1118 
1120  BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1121  bool IdenticalEdgesWereMerged) {
1122  assert(!MSSA->getWritableBlockAccesses(New) &&
1123  "Access list should be null for a new block.");
1124  MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1125  if (!Phi)
1126  return;
1127  if (Old->hasNPredecessors(1)) {
1128  assert(pred_size(New) == Preds.size() &&
1129  "Should have moved all predecessors.");
1130  MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1131  } else {
1132  assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1133  "new immediate predecessor.");
1134  MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1135  SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
1136  // Currently only support the case of removing a single incoming edge when
1137  // identical edges were not merged.
1138  if (!IdenticalEdgesWereMerged)
1139  assert(PredsSet.size() == Preds.size() &&
1140  "If identical edges were not merged, we cannot have duplicate "
1141  "blocks in the predecessors");
1142  Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
1143  if (PredsSet.count(B)) {
1144  NewPhi->addIncoming(MA, B);
1145  if (!IdenticalEdgesWereMerged)
1146  PredsSet.erase(B);
1147  return true;
1148  }
1149  return false;
1150  });
1151  Phi->addIncoming(NewPhi, New);
1152  if (onlySingleValue(NewPhi))
1153  removeMemoryAccess(NewPhi);
1154  }
1155 }
1156 
1158  assert(!MSSA->isLiveOnEntryDef(MA) &&
1159  "Trying to remove the live on entry def");
1160  // We can only delete phi nodes if they have no uses, or we can replace all
1161  // uses with a single definition.
1162  MemoryAccess *NewDefTarget = nullptr;
1163  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1164  // Note that it is sufficient to know that all edges of the phi node have
1165  // the same argument. If they do, by the definition of dominance frontiers
1166  // (which we used to place this phi), that argument must dominate this phi,
1167  // and thus, must dominate the phi's uses, and so we will not hit the assert
1168  // below.
1169  NewDefTarget = onlySingleValue(MP);
1170  assert((NewDefTarget || MP->use_empty()) &&
1171  "We can't delete this memory phi");
1172  } else {
1173  NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1174  }
1175 
1176  SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1177 
1178  // Re-point the uses at our defining access
1179  if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1180  // Reset optimized on users of this store, and reset the uses.
1181  // A few notes:
1182  // 1. This is a slightly modified version of RAUW to avoid walking the
1183  // uses twice here.
1184  // 2. If we wanted to be complete, we would have to reset the optimized
1185  // flags on users of phi nodes if doing the below makes a phi node have all
1186  // the same arguments. Instead, we prefer users to removeMemoryAccess those
1187  // phi nodes, because doing it here would be N^3.
1188  if (MA->hasValueHandle())
1189  ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1190  // Note: We assume MemorySSA is not used in metadata since it's not really
1191  // part of the IR.
1192 
1193  while (!MA->use_empty()) {
1194  Use &U = *MA->use_begin();
1195  if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1196  MUD->resetOptimized();
1197  if (OptimizePhis)
1198  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1199  PhisToCheck.insert(MP);
1200  U.set(NewDefTarget);
1201  }
1202  }
1203 
1204  // The call below to erase will destroy MA, so we can't change the order we
1205  // are doing things here
1206  MSSA->removeFromLookups(MA);
1207  MSSA->removeFromLists(MA);
1208 
1209  // Optionally optimize Phi uses. This will recursively remove trivial phis.
1210  if (!PhisToCheck.empty()) {
1211  SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1212  PhisToCheck.end()};
1213  PhisToCheck.clear();
1214 
1215  unsigned PhisSize = PhisToOptimize.size();
1216  while (PhisSize-- > 0)
1217  if (MemoryPhi *MP =
1218  cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val())) {
1219  auto OperRange = MP->operands();
1220  tryRemoveTrivialPhi(MP, OperRange);
1221  }
1222  }
1223 }
1224 
1226  const SmallPtrSetImpl<BasicBlock *> &DeadBlocks) {
1227  // First delete all uses of BB in MemoryPhis.
1228  for (BasicBlock *BB : DeadBlocks) {
1229  Instruction *TI = BB->getTerminator();
1230  assert(TI && "Basic block expected to have a terminator instruction");
1231  for (BasicBlock *Succ : successors(TI))
1232  if (!DeadBlocks.count(Succ))
1233  if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1234  MP->unorderedDeleteIncomingBlock(BB);
1235  if (MP->getNumIncomingValues() == 1)
1236  removeMemoryAccess(MP);
1237  }
1238  // Drop all references of all accesses in BB
1239  if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1240  for (MemoryAccess &MA : *Acc)
1241  MA.dropAllReferences();
1242  }
1243 
1244  // Next, delete all memory accesses in each block
1245  for (BasicBlock *BB : DeadBlocks) {
1246  MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1247  if (!Acc)
1248  continue;
1249  for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
1250  MemoryAccess *MA = &*AB;
1251  ++AB;
1252  MSSA->removeFromLookups(MA);
1253  MSSA->removeFromLists(MA);
1254  }
1255  }
1256 }
1257 
1258 void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
1259  for (auto &VH : UpdatedPHIs)
1260  if (auto *MPhi = cast_or_null<MemoryPhi>(VH)) {
1261  auto OperRange = MPhi->operands();
1262  tryRemoveTrivialPhi(MPhi, OperRange);
1263  }
1264 }
1265 
1267  const BasicBlock *BB = I->getParent();
1268  // Remove memory accesses in BB for I and all following instructions.
1269  auto BBI = I->getIterator(), BBE = BB->end();
1270  // FIXME: If this becomes too expensive, iterate until the first instruction
1271  // with a memory access, then iterate over MemoryAccesses.
1272  while (BBI != BBE)
1273  removeMemoryAccess(&*(BBI++));
1274  // Update phis in BB's successors to remove BB.
1275  SmallVector<WeakVH, 16> UpdatedPHIs;
1276  for (const BasicBlock *Successor : successors(BB)) {
1278  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
1279  MPhi->unorderedDeleteIncomingBlock(BB);
1280  UpdatedPHIs.push_back(MPhi);
1281  }
1282  }
1283  // Optimize trivial phis.
1284  tryRemoveTrivialPhis(UpdatedPHIs);
1285 }
1286 
1288  const BasicBlock *To) {
1289  const BasicBlock *BB = BI->getParent();
1290  SmallVector<WeakVH, 16> UpdatedPHIs;
1291  for (const BasicBlock *Succ : successors(BB)) {
1293  if (Succ != To)
1294  if (auto *MPhi = MSSA->getMemoryAccess(Succ)) {
1295  MPhi->unorderedDeleteIncomingBlock(BB);
1296  UpdatedPHIs.push_back(MPhi);
1297  }
1298  }
1299  // Optimize trivial phis.
1300  tryRemoveTrivialPhis(UpdatedPHIs);
1301 }
1302 
1304  Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1305  MemorySSA::InsertionPlace Point) {
1306  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1307  MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1308  return NewAccess;
1309 }
1310 
1312  Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1313  assert(I->getParent() == InsertPt->getBlock() &&
1314  "New and old access must be in the same block");
1315  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1316  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1317  InsertPt->getIterator());
1318  return NewAccess;
1319 }
1320 
1322  Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1323  assert(I->getParent() == InsertPt->getBlock() &&
1324  "New and old access must be in the same block");
1325  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1326  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1327  ++InsertPt->getIterator());
1328  return NewAccess;
1329 }
use_iterator use_end()
Definition: Value.h:346
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:233
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:802
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
Definition: BasicBlock.cpp:260
static MemoryAccess * onlySingleValue(MemoryPhi *MP)
If all arguments of a MemoryPHI are defined by the same incoming argument, return that argument...
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:224
void dropAllReferences()
Drop all references to operands.
Definition: User.h:294
This class represents lattice values for constants.
Definition: AllocatorList.h:23
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list...
Definition: MemorySSA.h:178
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2082
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:257
void updateExitBlocksForClonedLoop(ArrayRef< BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap, DominatorTree &DT)
Update phi nodes in exit block successors following cloning.
iterator begin() const
Definition: ArrayRef.h:136
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
void applyInsertUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG insert updates, analogous with the DT edge updates.
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess&#39;s for a given basic block.
Definition: MemorySSA.h:758
void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
void removeBlocks(const SmallPtrSetImpl< BasicBlock *> &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
This file contains the declarations for metadata subclasses.
bool hasValueHandle() const
Return true if there is a value handle associated with this value.
Definition: Value.h:485
void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where)
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:375
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
findNearestCommonDominator - Find nearest common dominator basic block for basic block A and B...
void insertUse(MemoryUse *Use)
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: MemorySSA.h:575
reverse_iterator rbegin()
Definition: BasicBlock.h:273
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:137
DefsOnlyType::self_iterator getDefsIterator()
Definition: MemorySSA.h:190
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:92
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:299
op_iterator op_begin()
Definition: User.h:229
void setIncomingValue(unsigned I, MemoryAccess *V)
Definition: MemorySSA.h:534
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:268
Represents read-only accesses to memory.
Definition: MemorySSA.h:319
void removeEdge(BasicBlock *From, BasicBlock *To)
Update the MemoryPhi in To following an edge deletion between From and To.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
Definition: MemorySSA.h:821
block_iterator block_begin()
Definition: MemorySSA.h:500
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void applyUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG updates, analogous with the DT edge updates.
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef&#39;s and MemoryPhi&#39;s for a given basic block.
Definition: MemorySSA.h:766
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition: MemorySSA.cpp:1581
A simple intrusive list implementation.
Definition: simple_ilist.h:78
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:40
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref&#39;ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:720
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:82
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:170
static void ValueIsRAUWd(Value *Old, Value *New)
Definition: Value.cpp:889
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
Definition: Sink.cpp:198
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before or after an existing MemoryAccess.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
Definition: MemorySSA.h:533
use_iterator_impl< Use > use_iterator
Definition: Value.h:331
NodeT * getBlock() const
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:233
static constexpr UpdateKind Insert
void updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader, BasicBlock *LoopPreheader, BasicBlock *BackedgeBlock)
Update MemorySSA when inserting a unique backedge block for a loop.
void set(Value *Val)
Definition: Value.h:670
AllAccessType::reverse_self_iterator getReverseIterator()
Definition: MemorySSA.h:184
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, MemoryAccess *NewDef)
Conditional or Unconditional Branch instruction.
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:327
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void removeDuplicatePhiEdgesBetween(const BasicBlock *From, const BasicBlock *To)
Update the MemoryPhi in To to have a single incoming edge from From, following a CFG change that repl...
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: MemorySSA.h:564
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:112
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:788
void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was spliced into From and To.
op_iterator op_end()
Definition: User.h:231
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:115
op_range operands()
Definition: User.h:237
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
self_iterator getIterator()
Definition: ilist_node.h:81
void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, ArrayRef< BasicBlock *> ExitBlocks, const ValueToValueMapTy &VM, bool IgnoreIncomingWithNoClones=false)
Update MemorySSA after a loop was cloned, given the blocks in RPO order, the exit blocks and a 1:1 ma...
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block, with a specified clobbering defin...
Compute iterated dominance frontiers using a linear time algorithm.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:388
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr)
Definition: MemorySSA.cpp:1688
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:297
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=MayAlias)
Definition: MemorySSA.h:299
unsigned getNumOperands() const
Definition: User.h:191
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
BlockVerifier::State From
iterator end()
Definition: BasicBlock.h:270
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:248
Module.h This file contains the declarations for the Module class.
iterator end() const
Definition: ArrayRef.h:137
void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock *> Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
BasicBlock * getBlock() const
Definition: MemorySSA.h:159
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:124
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:742
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:247
iterator_range< user_iterator > users()
Definition: Value.h:399
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: MemorySSA.h:530
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
Definition: MemorySSA.h:543
use_iterator use_begin()
Definition: Value.h:338
block_iterator block_end()
Definition: MemorySSA.h:511
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
unsigned pred_size(const BasicBlock *BB)
Get the number of predecessors of BB.
Definition: CFG.h:121
#define I(x, y, z)
Definition: MD5.cpp:58
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:72
MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was merged into To.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:738
reverse_iterator rend(StringRef path)
Get reverse end iterator over path.
Definition: Path.cpp:304
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:211
iterator insert(iterator I, reference Node)
Insert a node by reference; never copies.
Definition: simple_ilist.h:159
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:375
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
succ_range successors(Instruction *I)
Definition: CFG.h:259
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
Definition: MemorySSA.h:196
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:481
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition: MemorySSA.h:808
#define LLVM_DEBUG(X)
Definition: Debug.h:122
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1237
void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where)
void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM)
bool use_empty() const
Definition: Value.h:322
void changeCondBranchToUnconditionalTo(const BranchInst *BI, const BasicBlock *To)
Conditional branch BI is changed or replaced with an unconditional branch to To.
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143
const BasicBlock * getParent() const
Definition: Instruction.h:66
user_iterator user_end()
Definition: Value.h:383