LLVM  9.0.0svn
MemorySSAUpdater.cpp
Go to the documentation of this file.
1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSAUpdater class.
10 //
11 //===----------------------------------------------------------------===//
13 #include "llvm/ADT/STLExtras.h"
14 #include "llvm/ADT/SetVector.h"
15 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/GlobalVariable.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/IR/Metadata.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Support/Debug.h"
27 #include <algorithm>
28 
29 #define DEBUG_TYPE "memoryssa"
30 using namespace llvm;
31 
32 // This is the marker algorithm from "Simple and Efficient Construction of
33 // Static Single Assignment Form"
34 // The simple, non-marker algorithm places phi nodes at any join
35 // Here, we place markers, and only place phi nodes if they end up necessary.
36 // They are only necessary if they break a cycle (IE we recursively visit
37 // ourselves again), or we discover, while getting the value of the operands,
38 // that there are two or more definitions needing to be merged.
39 // This still will leave non-minimal form in the case of irreducible control
40 // flow, where phi nodes may be in cycles with themselves, but unnecessary.
41 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
42  BasicBlock *BB,
43  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
44  // First, do a cache lookup. Without this cache, certain CFG structures
45  // (like a series of if statements) take exponential time to visit.
46  auto Cached = CachedPreviousDef.find(BB);
47  if (Cached != CachedPreviousDef.end()) {
48  return Cached->second;
49  }
50 
51  if (BasicBlock *Pred = BB->getSinglePredecessor()) {
52  // Single predecessor case, just recurse, we can only have one definition.
53  MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
54  CachedPreviousDef.insert({BB, Result});
55  return Result;
56  }
57 
58  if (VisitedBlocks.count(BB)) {
59  // We hit our node again, meaning we had a cycle, we must insert a phi
60  // node to break it so we have an operand. The only case this will
61  // insert useless phis is if we have irreducible control flow.
62  MemoryAccess *Result = MSSA->createMemoryPhi(BB);
63  CachedPreviousDef.insert({BB, Result});
64  return Result;
65  }
66 
67  if (VisitedBlocks.insert(BB).second) {
68  // Mark us visited so we can detect a cycle
70 
71  // Recurse to get the values in our predecessors for placement of a
72  // potential phi node. This will insert phi nodes if we cycle in order to
73  // break the cycle and have an operand.
74  for (auto *Pred : predecessors(BB))
75  PhiOps.push_back(getPreviousDefFromEnd(Pred, CachedPreviousDef));
76 
77  // Now try to simplify the ops to avoid placing a phi.
78  // This may return null if we never created a phi yet, that's okay
79  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
80 
81  // See if we can avoid the phi by simplifying it.
82  auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
83  // If we couldn't simplify, we may have to create a phi
84  if (Result == Phi) {
85  if (!Phi)
86  Phi = MSSA->createMemoryPhi(BB);
87 
88  // See if the existing phi operands match what we need.
89  // Unlike normal SSA, we only allow one phi node per block, so we can't just
90  // create a new one.
91  if (Phi->getNumOperands() != 0) {
92  // FIXME: Figure out whether this is dead code and if so remove it.
93  if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
94  // These will have been filled in by the recursive read we did above.
95  llvm::copy(PhiOps, Phi->op_begin());
96  std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
97  }
98  } else {
99  unsigned i = 0;
100  for (auto *Pred : predecessors(BB))
101  Phi->addIncoming(&*PhiOps[i++], Pred);
102  InsertedPHIs.push_back(Phi);
103  }
104  Result = Phi;
105  }
106 
107  // Set ourselves up for the next variable by resetting visited state.
108  VisitedBlocks.erase(BB);
109  CachedPreviousDef.insert({BB, Result});
110  return Result;
111  }
112  llvm_unreachable("Should have hit one of the three cases above");
113 }
114 
115 // This starts at the memory access, and goes backwards in the block to find the
116 // previous definition. If a definition is not found the block of the access,
117 // it continues globally, creating phi nodes to ensure we have a single
118 // definition.
119 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
120  if (auto *LocalResult = getPreviousDefInBlock(MA))
121  return LocalResult;
123  return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
124 }
125 
126 // This starts at the memory access, and goes backwards in the block to the find
127 // the previous definition. If the definition is not found in the block of the
128 // access, it returns nullptr.
129 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
130  auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
131 
132  // It's possible there are no defs, or we got handed the first def to start.
133  if (Defs) {
134  // If this is a def, we can just use the def iterators.
135  if (!isa<MemoryUse>(MA)) {
136  auto Iter = MA->getReverseDefsIterator();
137  ++Iter;
138  if (Iter != Defs->rend())
139  return &*Iter;
140  } else {
141  // Otherwise, have to walk the all access iterator.
142  auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
143  for (auto &U : make_range(++MA->getReverseIterator(), End))
144  if (!isa<MemoryUse>(U))
145  return cast<MemoryAccess>(&U);
146  // Note that if MA comes before Defs->begin(), we won't hit a def.
147  return nullptr;
148  }
149  }
150  return nullptr;
151 }
152 
153 // This starts at the end of block
154 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
155  BasicBlock *BB,
156  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
157  auto *Defs = MSSA->getWritableBlockDefs(BB);
158 
159  if (Defs)
160  return &*Defs->rbegin();
161 
162  return getPreviousDefRecursive(BB, CachedPreviousDef);
163 }
164 // Recurse over a set of phi uses to eliminate the trivial ones
165 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
166  if (!Phi)
167  return nullptr;
168  TrackingVH<MemoryAccess> Res(Phi);
170  std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
171  for (auto &U : Uses) {
172  if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U)) {
173  auto OperRange = UsePhi->operands();
174  tryRemoveTrivialPhi(UsePhi, OperRange);
175  }
176  }
177  return Res;
178 }
179 
180 // Eliminate trivial phis
181 // Phis are trivial if they are defined either by themselves, or all the same
182 // argument.
183 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
184 // We recursively try to remove them.
185 template <class RangeType>
186 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
187  RangeType &Operands) {
188  // Bail out on non-opt Phis.
189  if (NonOptPhis.count(Phi))
190  return Phi;
191 
192  // Detect equal or self arguments
193  MemoryAccess *Same = nullptr;
194  for (auto &Op : Operands) {
195  // If the same or self, good so far
196  if (Op == Phi || Op == Same)
197  continue;
198  // not the same, return the phi since it's not eliminatable by us
199  if (Same)
200  return Phi;
201  Same = cast<MemoryAccess>(&*Op);
202  }
203  // Never found a non-self reference, the phi is undef
204  if (Same == nullptr)
205  return MSSA->getLiveOnEntryDef();
206  if (Phi) {
207  Phi->replaceAllUsesWith(Same);
208  removeMemoryAccess(Phi);
209  }
210 
211  // We should only end up recursing in case we replaced something, in which
212  // case, we may have made other Phis trivial.
213  return recursePhi(Same);
214 }
215 
217  InsertedPHIs.clear();
218  MU->setDefiningAccess(getPreviousDef(MU));
219  // Unlike for defs, there is no extra work to do. Because uses do not create
220  // new may-defs, there are only two cases:
221  //
222  // 1. There was a def already below us, and therefore, we should not have
223  // created a phi node because it was already needed for the def.
224  //
225  // 2. There is no def below us, and therefore, there is no extra renaming work
226  // to do.
227 }
228 
229 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
230 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
231  MemoryAccess *NewDef) {
232  // Replace any operand with us an incoming block with the new defining
233  // access.
234  int i = MP->getBasicBlockIndex(BB);
235  assert(i != -1 && "Should have found the basic block in the phi");
236  // We can't just compare i against getNumOperands since one is signed and the
237  // other not. So use it to index into the block iterator.
238  for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
239  ++BBIter) {
240  if (*BBIter != BB)
241  break;
242  MP->setIncomingValue(i, NewDef);
243  ++i;
244  }
245 }
246 
247 // A brief description of the algorithm:
248 // First, we compute what should define the new def, using the SSA
249 // construction algorithm.
250 // Then, we update the defs below us (and any new phi nodes) in the graph to
251 // point to the correct new defs, to ensure we only have one variable, and no
252 // disconnected stores.
253 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
254  InsertedPHIs.clear();
255 
256  // See if we had a local def, and if not, go hunting.
257  MemoryAccess *DefBefore = getPreviousDef(MD);
258  bool DefBeforeSameBlock = DefBefore->getBlock() == MD->getBlock();
259 
260  // There is a def before us, which means we can replace any store/phi uses
261  // of that thing with us, since we are in the way of whatever was there
262  // before.
263  // We now define that def's memorydefs and memoryphis
264  if (DefBeforeSameBlock) {
265  for (auto UI = DefBefore->use_begin(), UE = DefBefore->use_end();
266  UI != UE;) {
267  Use &U = *UI++;
268  // Leave the MemoryUses alone.
269  // Also make sure we skip ourselves to avoid self references.
270  if (isa<MemoryUse>(U.getUser()) || U.getUser() == MD)
271  continue;
272  U.set(MD);
273  }
274  }
275 
276  // and that def is now our defining access.
277  MD->setDefiningAccess(DefBefore);
278 
279  SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
280  if (!DefBeforeSameBlock) {
281  // If there was a local def before us, we must have the same effect it
282  // did. Because every may-def is the same, any phis/etc we would create, it
283  // would also have created. If there was no local def before us, we
284  // performed a global update, and have to search all successors and make
285  // sure we update the first def in each of them (following all paths until
286  // we hit the first def along each path). This may also insert phi nodes.
287  // TODO: There are other cases we can skip this work, such as when we have a
288  // single successor, and only used a straight line of single pred blocks
289  // backwards to find the def. To make that work, we'd have to track whether
290  // getDefRecursive only ever used the single predecessor case. These types
291  // of paths also only exist in between CFG simplifications.
292  FixupList.push_back(MD);
293  }
294 
295  while (!FixupList.empty()) {
296  unsigned StartingPHISize = InsertedPHIs.size();
297  fixupDefs(FixupList);
298  FixupList.clear();
299  // Put any new phis on the fixup list, and process them
300  FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
301  }
302  // Now that all fixups are done, rename all uses if we are asked.
303  if (RenameUses) {
305  BasicBlock *StartBlock = MD->getBlock();
306  // We are guaranteed there is a def in the block, because we just got it
307  // handed to us in this function.
308  MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
309  // Convert to incoming value if it's a memorydef. A phi *is* already an
310  // incoming value.
311  if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
312  FirstDef = MD->getDefiningAccess();
313 
314  MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
315  // We just inserted a phi into this block, so the incoming value will become
316  // the phi anyway, so it does not matter what we pass.
317  for (auto &MP : InsertedPHIs) {
318  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
319  if (Phi)
320  MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
321  }
322  }
323 }
324 
325 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
328  for (auto &Var : Vars) {
329  MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
330  if (!NewDef)
331  continue;
332  // First, see if there is a local def after the operand.
333  auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
334  auto DefIter = NewDef->getDefsIterator();
335 
336  // The temporary Phi is being fixed, unmark it for not to optimize.
337  if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
338  NonOptPhis.erase(Phi);
339 
340  // If there is a local def after us, we only have to rename that.
341  if (++DefIter != Defs->end()) {
342  cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
343  continue;
344  }
345 
346  // Otherwise, we need to search down through the CFG.
347  // For each of our successors, handle it directly if their is a phi, or
348  // place on the fixup worklist.
349  for (const auto *S : successors(NewDef->getBlock())) {
350  if (auto *MP = MSSA->getMemoryAccess(S))
351  setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
352  else
353  Worklist.push_back(S);
354  }
355 
356  while (!Worklist.empty()) {
357  const BasicBlock *FixupBlock = Worklist.back();
358  Worklist.pop_back();
359 
360  // Get the first def in the block that isn't a phi node.
361  if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
362  auto *FirstDef = &*Defs->begin();
363  // The loop above and below should have taken care of phi nodes
364  assert(!isa<MemoryPhi>(FirstDef) &&
365  "Should have already handled phi nodes!");
366  // We are now this def's defining access, make sure we actually dominate
367  // it
368  assert(MSSA->dominates(NewDef, FirstDef) &&
369  "Should have dominated the new access");
370 
371  // This may insert new phi nodes, because we are not guaranteed the
372  // block we are processing has a single pred, and depending where the
373  // store was inserted, it may require phi nodes below it.
374  cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
375  return;
376  }
377  // We didn't find a def, so we must continue.
378  for (const auto *S : successors(FixupBlock)) {
379  // If there is a phi node, handle it.
380  // Otherwise, put the block on the worklist
381  if (auto *MP = MSSA->getMemoryAccess(S))
382  setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
383  else {
384  // If we cycle, we should have ended up at a phi node that we already
385  // processed. FIXME: Double check this
386  if (!Seen.insert(S).second)
387  continue;
388  Worklist.push_back(S);
389  }
390  }
391  }
392  }
393 }
394 
396  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
397  MPhi->unorderedDeleteIncomingBlock(From);
398  if (MPhi->getNumIncomingValues() == 1)
399  removeMemoryAccess(MPhi);
400  }
401 }
402 
404  BasicBlock *To) {
405  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
406  bool Found = false;
407  MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
408  if (From != B)
409  return false;
410  if (Found)
411  return true;
412  Found = true;
413  return false;
414  });
415  if (MPhi->getNumIncomingValues() == 1)
416  removeMemoryAccess(MPhi);
417  }
418 }
419 
420 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
421  const ValueToValueMapTy &VMap,
422  PhiToDefMap &MPhiMap) {
423  auto GetNewDefiningAccess = [&](MemoryAccess *MA) -> MemoryAccess * {
424  MemoryAccess *InsnDefining = MA;
425  if (MemoryUseOrDef *DefMUD = dyn_cast<MemoryUseOrDef>(InsnDefining)) {
426  if (!MSSA->isLiveOnEntryDef(DefMUD)) {
427  Instruction *DefMUDI = DefMUD->getMemoryInst();
428  assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
429  if (Instruction *NewDefMUDI =
430  cast_or_null<Instruction>(VMap.lookup(DefMUDI)))
431  InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
432  }
433  } else {
434  MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
435  if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
436  InsnDefining = NewDefPhi;
437  }
438  assert(InsnDefining && "Defining instruction cannot be nullptr.");
439  return InsnDefining;
440  };
441 
442  const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
443  if (!Acc)
444  return;
445  for (const MemoryAccess &MA : *Acc) {
446  if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
447  Instruction *Insn = MUD->getMemoryInst();
448  // Entry does not exist if the clone of the block did not clone all
449  // instructions. This occurs in LoopRotate when cloning instructions
450  // from the old header to the old preheader. The cloned instruction may
451  // also be a simplified Value, not an Instruction (see LoopRotate).
452  if (Instruction *NewInsn =
453  dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
454  MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
455  NewInsn, GetNewDefiningAccess(MUD->getDefiningAccess()), MUD);
456  MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
457  }
458  }
459  }
460 }
461 
463  ArrayRef<BasicBlock *> ExitBlocks,
464  const ValueToValueMapTy &VMap,
465  bool IgnoreIncomingWithNoClones) {
466  PhiToDefMap MPhiMap;
467 
468  auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
469  assert(Phi && NewPhi && "Invalid Phi nodes.");
470  BasicBlock *NewPhiBB = NewPhi->getBlock();
471  SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
472  pred_end(NewPhiBB));
473  for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
474  MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
475  BasicBlock *IncBB = Phi->getIncomingBlock(It);
476 
477  if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
478  IncBB = NewIncBB;
479  else if (IgnoreIncomingWithNoClones)
480  continue;
481 
482  // Now we have IncBB, and will need to add incoming from it to NewPhi.
483 
484  // If IncBB is not a predecessor of NewPhiBB, then do not add it.
485  // NewPhiBB was cloned without that edge.
486  if (!NewPhiBBPreds.count(IncBB))
487  continue;
488 
489  // Determine incoming value and add it as incoming from IncBB.
490  if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
491  if (!MSSA->isLiveOnEntryDef(IncMUD)) {
492  Instruction *IncI = IncMUD->getMemoryInst();
493  assert(IncI && "Found MemoryUseOrDef with no Instruction.");
494  if (Instruction *NewIncI =
495  cast_or_null<Instruction>(VMap.lookup(IncI))) {
496  IncMUD = MSSA->getMemoryAccess(NewIncI);
497  assert(IncMUD &&
498  "MemoryUseOrDef cannot be null, all preds processed.");
499  }
500  }
501  NewPhi->addIncoming(IncMUD, IncBB);
502  } else {
503  MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
504  if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
505  NewPhi->addIncoming(NewDefPhi, IncBB);
506  else
507  NewPhi->addIncoming(IncPhi, IncBB);
508  }
509  }
510  };
511 
512  auto ProcessBlock = [&](BasicBlock *BB) {
513  BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
514  if (!NewBlock)
515  return;
516 
517  assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
518  "Cloned block should have no accesses");
519 
520  // Add MemoryPhi.
521  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
522  MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
523  MPhiMap[MPhi] = NewPhi;
524  }
525  // Update Uses and Defs.
526  cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
527  };
528 
529  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
530  ProcessBlock(BB);
531 
532  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
533  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
534  if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
535  FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
536 }
537 
539  BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
540  // All defs/phis from outside BB that are used in BB, are valid uses in P1.
541  // Since those defs/phis must have dominated BB, and also dominate P1.
542  // Defs from BB being used in BB will be replaced with the cloned defs from
543  // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
544  // incoming def into the Phi from P1.
545  PhiToDefMap MPhiMap;
546  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
547  MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
548  cloneUsesAndDefs(BB, P1, VM, MPhiMap);
549 }
550 
551 template <typename Iter>
552 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
553  ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
554  DominatorTree &DT) {
556  // Update/insert phis in all successors of exit blocks.
557  for (auto *Exit : ExitBlocks)
558  for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
559  if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
560  BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
561  Updates.push_back({DT.Insert, NewExit, ExitSucc});
562  }
563  applyInsertUpdates(Updates, DT);
564 }
565 
567  ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
568  DominatorTree &DT) {
569  const ValueToValueMapTy *const Arr[] = {&VMap};
570  privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
571  std::end(Arr), DT);
572 }
573 
575  ArrayRef<BasicBlock *> ExitBlocks,
576  ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
577  auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
578  return I.get();
579  };
580  using MappedIteratorType =
582  decltype(GetPtr)>;
583  auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
584  auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
585  privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
586 }
587 
589  DominatorTree &DT) {
590  SmallVector<CFGUpdate, 4> RevDeleteUpdates;
591  SmallVector<CFGUpdate, 4> InsertUpdates;
592  for (auto &Update : Updates) {
593  if (Update.getKind() == DT.Insert)
594  InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
595  else
596  RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
597  }
598 
599  if (!RevDeleteUpdates.empty()) {
600  // Update for inserted edges: use newDT and snapshot CFG as if deletes had
601  // not occurred.
602  // FIXME: This creates a new DT, so it's more expensive to do mix
603  // delete/inserts vs just inserts. We can do an incremental update on the DT
604  // to revert deletes, than re-delete the edges. Teaching DT to do this, is
605  // part of a pending cleanup.
606  DominatorTree NewDT(DT, RevDeleteUpdates);
607  GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
608  applyInsertUpdates(InsertUpdates, NewDT, &GD);
609  } else {
611  applyInsertUpdates(InsertUpdates, DT, &GD);
612  }
613 
614  // Update for deleted edges
615  for (auto &Update : RevDeleteUpdates)
616  removeEdge(Update.getFrom(), Update.getTo());
617 }
618 
620  DominatorTree &DT) {
622  applyInsertUpdates(Updates, DT, &GD);
623 }
624 
626  DominatorTree &DT,
627  const GraphDiff<BasicBlock *> *GD) {
628  // Get recursive last Def, assuming well formed MSSA and updated DT.
629  auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
630  while (true) {
631  MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
632  // Return last Def or Phi in BB, if it exists.
633  if (Defs)
634  return &*(--Defs->end());
635 
636  // Check number of predecessors, we only care if there's more than one.
637  unsigned Count = 0;
638  BasicBlock *Pred = nullptr;
639  for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
640  Pred = Pair.second;
641  Count++;
642  if (Count == 2)
643  break;
644  }
645 
646  // If BB has multiple predecessors, get last definition from IDom.
647  if (Count != 1) {
648  // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
649  // DT is invalidated. Return LoE as its last def. This will be added to
650  // MemoryPhi node, and later deleted when the block is deleted.
651  if (!DT.getNode(BB))
652  return MSSA->getLiveOnEntryDef();
653  if (auto *IDom = DT.getNode(BB)->getIDom())
654  if (IDom->getBlock() != BB) {
655  BB = IDom->getBlock();
656  continue;
657  }
658  return MSSA->getLiveOnEntryDef();
659  } else {
660  // Single predecessor, BB cannot be dead. GetLastDef of Pred.
661  assert(Count == 1 && Pred && "Single predecessor expected.");
662  BB = Pred;
663  }
664  };
665  llvm_unreachable("Unable to get last definition.");
666  };
667 
668  // Get nearest IDom given a set of blocks.
669  // TODO: this can be optimized by starting the search at the node with the
670  // lowest level (highest in the tree).
671  auto FindNearestCommonDominator =
672  [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
673  BasicBlock *PrevIDom = *BBSet.begin();
674  for (auto *BB : BBSet)
675  PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
676  return PrevIDom;
677  };
678 
679  // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
680  // include CurrIDom.
681  auto GetNoLongerDomBlocks =
682  [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
683  SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
684  if (PrevIDom == CurrIDom)
685  return;
686  BlocksPrevDom.push_back(PrevIDom);
687  BasicBlock *NextIDom = PrevIDom;
688  while (BasicBlock *UpIDom =
689  DT.getNode(NextIDom)->getIDom()->getBlock()) {
690  if (UpIDom == CurrIDom)
691  break;
692  BlocksPrevDom.push_back(UpIDom);
693  NextIDom = UpIDom;
694  }
695  };
696 
697  // Map a BB to its predecessors: added + previously existing. To get a
698  // deterministic order, store predecessors as SetVectors. The order in each
699  // will be defined by the order in Updates (fixed) and the order given by
700  // children<> (also fixed). Since we further iterate over these ordered sets,
701  // we lose the information of multiple edges possibly existing between two
702  // blocks, so we'll keep and EdgeCount map for that.
703  // An alternate implementation could keep unordered set for the predecessors,
704  // traverse either Updates or children<> each time to get the deterministic
705  // order, and drop the usage of EdgeCount. This alternate approach would still
706  // require querying the maps for each predecessor, and children<> call has
707  // additional computation inside for creating the snapshot-graph predecessors.
708  // As such, we favor using a little additional storage and less compute time.
709  // This decision can be revisited if we find the alternative more favorable.
710 
711  struct PredInfo {
714  };
716 
717  for (auto &Edge : Updates) {
718  BasicBlock *BB = Edge.getTo();
719  auto &AddedBlockSet = PredMap[BB].Added;
720  AddedBlockSet.insert(Edge.getFrom());
721  }
722 
723  // Store all existing predecessor for each BB, at least one must exist.
726  for (auto &BBPredPair : PredMap) {
727  auto *BB = BBPredPair.first;
728  const auto &AddedBlockSet = BBPredPair.second.Added;
729  auto &PrevBlockSet = BBPredPair.second.Prev;
730  for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
731  BasicBlock *Pi = Pair.second;
732  if (!AddedBlockSet.count(Pi))
733  PrevBlockSet.insert(Pi);
734  EdgeCountMap[{Pi, BB}]++;
735  }
736 
737  if (PrevBlockSet.empty()) {
738  assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
739  LLVM_DEBUG(
740  dbgs()
741  << "Adding a predecessor to a block with no predecessors. "
742  "This must be an edge added to a new, likely cloned, block. "
743  "Its memory accesses must be already correct, assuming completed "
744  "via the updateExitBlocksForClonedLoop API. "
745  "Assert a single such edge is added so no phi addition or "
746  "additional processing is required.\n");
747  assert(AddedBlockSet.size() == 1 &&
748  "Can only handle adding one predecessor to a new block.");
749  // Need to remove new blocks from PredMap. Remove below to not invalidate
750  // iterator here.
751  NewBlocks.insert(BB);
752  }
753  }
754  // Nothing to process for new/cloned blocks.
755  for (auto *BB : NewBlocks)
756  PredMap.erase(BB);
757 
758  SmallVector<BasicBlock *, 8> BlocksToProcess;
759  SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
760 
761  // First create MemoryPhis in all blocks that don't have one. Create in the
762  // order found in Updates, not in PredMap, to get deterministic numbering.
763  for (auto &Edge : Updates) {
764  BasicBlock *BB = Edge.getTo();
765  if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
766  MSSA->createMemoryPhi(BB);
767  }
768 
769  // Now we'll fill in the MemoryPhis with the right incoming values.
770  for (auto &BBPredPair : PredMap) {
771  auto *BB = BBPredPair.first;
772  const auto &PrevBlockSet = BBPredPair.second.Prev;
773  const auto &AddedBlockSet = BBPredPair.second.Added;
774  assert(!PrevBlockSet.empty() &&
775  "At least one previous predecessor must exist.");
776 
777  // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
778  // keeping this map before the loop. We can reuse already populated entries
779  // if an edge is added from the same predecessor to two different blocks,
780  // and this does happen in rotate. Note that the map needs to be updated
781  // when deleting non-necessary phis below, if the phi is in the map by
782  // replacing the value with DefP1.
784  for (auto *AddedPred : AddedBlockSet) {
785  auto *DefPn = GetLastDef(AddedPred);
786  assert(DefPn != nullptr && "Unable to find last definition.");
787  LastDefAddedPred[AddedPred] = DefPn;
788  }
789 
790  MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
791  // If Phi is not empty, add an incoming edge from each added pred. Must
792  // still compute blocks with defs to replace for this block below.
793  if (NewPhi->getNumOperands()) {
794  for (auto *Pred : AddedBlockSet) {
795  auto *LastDefForPred = LastDefAddedPred[Pred];
796  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
797  NewPhi->addIncoming(LastDefForPred, Pred);
798  }
799  } else {
800  // Pick any existing predecessor and get its definition. All other
801  // existing predecessors should have the same one, since no phi existed.
802  auto *P1 = *PrevBlockSet.begin();
803  MemoryAccess *DefP1 = GetLastDef(P1);
804 
805  // Check DefP1 against all Defs in LastDefPredPair. If all the same,
806  // nothing to add.
807  bool InsertPhi = false;
808  for (auto LastDefPredPair : LastDefAddedPred)
809  if (DefP1 != LastDefPredPair.second) {
810  InsertPhi = true;
811  break;
812  }
813  if (!InsertPhi) {
814  // Since NewPhi may be used in other newly added Phis, replace all uses
815  // of NewPhi with the definition coming from all predecessors (DefP1),
816  // before deleting it.
817  NewPhi->replaceAllUsesWith(DefP1);
818  removeMemoryAccess(NewPhi);
819  continue;
820  }
821 
822  // Update Phi with new values for new predecessors and old value for all
823  // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
824  // sets, the order of entries in NewPhi is deterministic.
825  for (auto *Pred : AddedBlockSet) {
826  auto *LastDefForPred = LastDefAddedPred[Pred];
827  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
828  NewPhi->addIncoming(LastDefForPred, Pred);
829  }
830  for (auto *Pred : PrevBlockSet)
831  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
832  NewPhi->addIncoming(DefP1, Pred);
833 
834  // Insert BB in the set of blocks that now have definition. We'll use this
835  // to compute IDF and add Phis there next.
836  BlocksToProcess.push_back(BB);
837  }
838 
839  // Get all blocks that used to dominate BB and no longer do after adding
840  // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
841  assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
842  BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
843  assert(PrevIDom && "Previous IDom should exists");
844  BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
845  assert(NewIDom && "BB should have a new valid idom");
846  assert(DT.dominates(NewIDom, PrevIDom) &&
847  "New idom should dominate old idom");
848  GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
849  }
850 
851  // Compute IDF and add Phis in all IDF blocks that do not have one.
853  if (!BlocksToProcess.empty()) {
854  ForwardIDFCalculator IDFs(DT);
855  SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
856  BlocksToProcess.end());
857  IDFs.setDefiningBlocks(DefiningBlocks);
858  IDFs.calculate(IDFBlocks);
859  for (auto *BBIDF : IDFBlocks) {
860  if (auto *IDFPhi = MSSA->getMemoryAccess(BBIDF)) {
861  // Update existing Phi.
862  // FIXME: some updates may be redundant, try to optimize and skip some.
863  for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
864  IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
865  } else {
866  IDFPhi = MSSA->createMemoryPhi(BBIDF);
867  for (auto &Pair : children<GraphDiffInvBBPair>({GD, BBIDF})) {
868  BasicBlock *Pi = Pair.second;
869  IDFPhi->addIncoming(GetLastDef(Pi), Pi);
870  }
871  }
872  }
873  }
874 
875  // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
876  // longer dominate, replace those with the closest dominating def.
877  // This will also update optimized accesses, as they're also uses.
878  for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
879  if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
880  for (auto &DefToReplaceUses : *DefsList) {
881  BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
882  Value::use_iterator UI = DefToReplaceUses.use_begin(),
883  E = DefToReplaceUses.use_end();
884  for (; UI != E;) {
885  Use &U = *UI;
886  ++UI;
888  if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
889  BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
890  if (!DT.dominates(DominatingBlock, DominatedBlock))
891  U.set(GetLastDef(DominatedBlock));
892  } else {
893  BasicBlock *DominatedBlock = Usr->getBlock();
894  if (!DT.dominates(DominatingBlock, DominatedBlock)) {
895  if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
896  U.set(DomBlPhi);
897  else {
898  auto *IDom = DT.getNode(DominatedBlock)->getIDom();
899  assert(IDom && "Block must have a valid IDom.");
900  U.set(GetLastDef(IDom->getBlock()));
901  }
902  cast<MemoryUseOrDef>(Usr)->resetOptimized();
903  }
904  }
905  }
906  }
907  }
908  }
909 }
910 
911 // Move What before Where in the MemorySSA IR.
912 template <class WhereType>
913 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
914  WhereType Where) {
915  // Mark MemoryPhi users of What not to be optimized.
916  for (auto *U : What->users())
917  if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
918  NonOptPhis.insert(PhiUser);
919 
920  // Replace all our users with our defining access.
921  What->replaceAllUsesWith(What->getDefiningAccess());
922 
923  // Let MemorySSA take care of moving it around in the lists.
924  MSSA->moveTo(What, BB, Where);
925 
926  // Now reinsert it into the IR and do whatever fixups needed.
927  if (auto *MD = dyn_cast<MemoryDef>(What))
928  insertDef(MD);
929  else
930  insertUse(cast<MemoryUse>(What));
931 
932  // Clear dangling pointers. We added all MemoryPhi users, but not all
933  // of them are removed by fixupDefs().
934  NonOptPhis.clear();
935 }
936 
937 // Move What before Where in the MemorySSA IR.
939  moveTo(What, Where->getBlock(), Where->getIterator());
940 }
941 
942 // Move What after Where in the MemorySSA IR.
944  moveTo(What, Where->getBlock(), ++Where->getIterator());
945 }
946 
949  return moveTo(What, BB, Where);
950 }
951 
952 // All accesses in To used to be in From. Move to end and update access lists.
953 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
954  Instruction *Start) {
955 
956  MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
957  if (!Accs)
958  return;
959 
960  MemoryAccess *FirstInNew = nullptr;
961  for (Instruction &I : make_range(Start->getIterator(), To->end()))
962  if ((FirstInNew = MSSA->getMemoryAccess(&I)))
963  break;
964  if (!FirstInNew)
965  return;
966 
967  auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
968  do {
969  auto NextIt = ++MUD->getIterator();
970  MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
971  ? nullptr
972  : cast<MemoryUseOrDef>(&*NextIt);
973  MSSA->moveTo(MUD, To, MemorySSA::End);
974  // Moving MUD from Accs in the moveTo above, may delete Accs, so we need to
975  // retrieve it again.
976  Accs = MSSA->getWritableBlockAccesses(From);
977  MUD = NextMUD;
978  } while (MUD);
979 }
980 
982  BasicBlock *To,
983  Instruction *Start) {
984  assert(MSSA->getBlockAccesses(To) == nullptr &&
985  "To block is expected to be free of MemoryAccesses.");
986  moveAllAccesses(From, To, Start);
987  for (BasicBlock *Succ : successors(To))
988  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
989  MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
990 }
991 
993  Instruction *Start) {
994  assert(From->getSinglePredecessor() == To &&
995  "From block is expected to have a single predecessor (To).");
996  moveAllAccesses(From, To, Start);
997  for (BasicBlock *Succ : successors(From))
998  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
999  MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1000 }
1001 
1002 /// If all arguments of a MemoryPHI are defined by the same incoming
1003 /// argument, return that argument.
1005  MemoryAccess *MA = nullptr;
1006 
1007  for (auto &Arg : MP->operands()) {
1008  if (!MA)
1009  MA = cast<MemoryAccess>(Arg);
1010  else if (MA != Arg)
1011  return nullptr;
1012  }
1013  return MA;
1014 }
1015 
1017  BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1018  bool IdenticalEdgesWereMerged) {
1019  assert(!MSSA->getWritableBlockAccesses(New) &&
1020  "Access list should be null for a new block.");
1021  MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1022  if (!Phi)
1023  return;
1024  if (Old->hasNPredecessors(1)) {
1025  assert(pred_size(New) == Preds.size() &&
1026  "Should have moved all predecessors.");
1027  MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1028  } else {
1029  assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1030  "new immediate predecessor.");
1031  MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1032  SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
1033  // Currently only support the case of removing a single incoming edge when
1034  // identical edges were not merged.
1035  if (!IdenticalEdgesWereMerged)
1036  assert(PredsSet.size() == Preds.size() &&
1037  "If identical edges were not merged, we cannot have duplicate "
1038  "blocks in the predecessors");
1039  Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
1040  if (PredsSet.count(B)) {
1041  NewPhi->addIncoming(MA, B);
1042  if (!IdenticalEdgesWereMerged)
1043  PredsSet.erase(B);
1044  return true;
1045  }
1046  return false;
1047  });
1048  Phi->addIncoming(NewPhi, New);
1049  if (onlySingleValue(NewPhi))
1050  removeMemoryAccess(NewPhi);
1051  }
1052 }
1053 
1055  assert(!MSSA->isLiveOnEntryDef(MA) &&
1056  "Trying to remove the live on entry def");
1057  // We can only delete phi nodes if they have no uses, or we can replace all
1058  // uses with a single definition.
1059  MemoryAccess *NewDefTarget = nullptr;
1060  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1061  // Note that it is sufficient to know that all edges of the phi node have
1062  // the same argument. If they do, by the definition of dominance frontiers
1063  // (which we used to place this phi), that argument must dominate this phi,
1064  // and thus, must dominate the phi's uses, and so we will not hit the assert
1065  // below.
1066  NewDefTarget = onlySingleValue(MP);
1067  assert((NewDefTarget || MP->use_empty()) &&
1068  "We can't delete this memory phi");
1069  } else {
1070  NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1071  }
1072 
1073  SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1074 
1075  // Re-point the uses at our defining access
1076  if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1077  // Reset optimized on users of this store, and reset the uses.
1078  // A few notes:
1079  // 1. This is a slightly modified version of RAUW to avoid walking the
1080  // uses twice here.
1081  // 2. If we wanted to be complete, we would have to reset the optimized
1082  // flags on users of phi nodes if doing the below makes a phi node have all
1083  // the same arguments. Instead, we prefer users to removeMemoryAccess those
1084  // phi nodes, because doing it here would be N^3.
1085  if (MA->hasValueHandle())
1086  ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1087  // Note: We assume MemorySSA is not used in metadata since it's not really
1088  // part of the IR.
1089 
1090  while (!MA->use_empty()) {
1091  Use &U = *MA->use_begin();
1092  if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1093  MUD->resetOptimized();
1094  if (OptimizePhis)
1095  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1096  PhisToCheck.insert(MP);
1097  U.set(NewDefTarget);
1098  }
1099  }
1100 
1101  // The call below to erase will destroy MA, so we can't change the order we
1102  // are doing things here
1103  MSSA->removeFromLookups(MA);
1104  MSSA->removeFromLists(MA);
1105 
1106  // Optionally optimize Phi uses. This will recursively remove trivial phis.
1107  if (!PhisToCheck.empty()) {
1108  SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1109  PhisToCheck.end()};
1110  PhisToCheck.clear();
1111 
1112  unsigned PhisSize = PhisToOptimize.size();
1113  while (PhisSize-- > 0)
1114  if (MemoryPhi *MP =
1115  cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val())) {
1116  auto OperRange = MP->operands();
1117  tryRemoveTrivialPhi(MP, OperRange);
1118  }
1119  }
1120 }
1121 
1123  const SmallPtrSetImpl<BasicBlock *> &DeadBlocks) {
1124  // First delete all uses of BB in MemoryPhis.
1125  for (BasicBlock *BB : DeadBlocks) {
1126  Instruction *TI = BB->getTerminator();
1127  assert(TI && "Basic block expected to have a terminator instruction");
1128  for (BasicBlock *Succ : successors(TI))
1129  if (!DeadBlocks.count(Succ))
1130  if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1131  MP->unorderedDeleteIncomingBlock(BB);
1132  if (MP->getNumIncomingValues() == 1)
1133  removeMemoryAccess(MP);
1134  }
1135  // Drop all references of all accesses in BB
1136  if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1137  for (MemoryAccess &MA : *Acc)
1138  MA.dropAllReferences();
1139  }
1140 
1141  // Next, delete all memory accesses in each block
1142  for (BasicBlock *BB : DeadBlocks) {
1143  MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1144  if (!Acc)
1145  continue;
1146  for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
1147  MemoryAccess *MA = &*AB;
1148  ++AB;
1149  MSSA->removeFromLookups(MA);
1150  MSSA->removeFromLists(MA);
1151  }
1152  }
1153 }
1154 
1156  Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1157  MemorySSA::InsertionPlace Point) {
1158  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1159  MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1160  return NewAccess;
1161 }
1162 
1164  Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1165  assert(I->getParent() == InsertPt->getBlock() &&
1166  "New and old access must be in the same block");
1167  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1168  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1169  InsertPt->getIterator());
1170  return NewAccess;
1171 }
1172 
1174  Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1175  assert(I->getParent() == InsertPt->getBlock() &&
1176  "New and old access must be in the same block");
1177  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1178  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1179  ++InsertPt->getIterator());
1180  return NewAccess;
1181 }
use_iterator use_end()
Definition: Value.h:346
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:258
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:794
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
Definition: BasicBlock.cpp:260
static MemoryAccess * onlySingleValue(MemoryPhi *MP)
If all arguments of a MemoryPHI are defined by the same incoming argument, return that argument...
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:249
void dropAllReferences()
Drop all references to operands.
Definition: User.h:294
This class represents lattice values for constants.
Definition: AllocatorList.h:23
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list...
Definition: MemorySSA.h:175
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2010
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:254
void updateExitBlocksForClonedLoop(ArrayRef< BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap, DominatorTree &DT)
Update phi nodes in exit block successors following cloning.
iterator begin() const
Definition: ArrayRef.h:136
void removeDuplicatePhiEdgesBetween(BasicBlock *From, BasicBlock *To)
Update the MemoryPhi in To to have a single incoming edge from From, following a CFG change that repl...
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
void applyInsertUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG insert updates, analogous with the DT edge updates.
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess&#39;s for a given basic block.
Definition: MemorySSA.h:750
void removeBlocks(const SmallPtrSetImpl< BasicBlock *> &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
This file contains the declarations for metadata subclasses.
bool hasValueHandle() const
Return true if there is a value handle associated with this value.
Definition: Value.h:485
void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where)
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:372
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
findNearestCommonDominator - Find nearest common dominator basic block for basic block A and B...
void insertUse(MemoryUse *Use)
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: MemorySSA.h:572
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:137
DefsOnlyType::self_iterator getDefsIterator()
Definition: MemorySSA.h:187
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:92
op_iterator op_begin()
Definition: User.h:229
void setIncomingValue(unsigned I, MemoryAccess *V)
Definition: MemorySSA.h:531
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:268
Represents read-only accesses to memory.
Definition: MemorySSA.h:316
void removeEdge(BasicBlock *From, BasicBlock *To)
Update the MemoryPhi in To following an edge deletion between From and To.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
Definition: MemorySSA.h:813
block_iterator block_begin()
Definition: MemorySSA.h:497
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void applyUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG updates, analogous with the DT edge updates.
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition: MemorySSA.cpp:1510
A simple intrusive list implementation.
Definition: simple_ilist.h:78
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:40
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref&#39;ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:712
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:82
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:170
static void ValueIsRAUWd(Value *Old, Value *New)
Definition: Value.cpp:885
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
Definition: Sink.cpp:198
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before or after an existing MemoryAccess.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
Definition: MemorySSA.h:530
use_iterator_impl< Use > use_iterator
Definition: Value.h:331
NodeT * getBlock() const
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:233
static constexpr UpdateKind Insert
void set(Value *Val)
Definition: Value.h:670
AllAccessType::reverse_self_iterator getReverseIterator()
Definition: MemorySSA.h:181
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, MemoryAccess *NewDef)
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:327
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: MemorySSA.h:561
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:112
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:780
void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was spliced into From and To.
op_iterator op_end()
Definition: User.h:231
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:115
op_range operands()
Definition: User.h:237
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
self_iterator getIterator()
Definition: ilist_node.h:81
void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, ArrayRef< BasicBlock *> ExitBlocks, const ValueToValueMapTy &VM, bool IgnoreIncomingWithNoClones=false)
Update MemorySSA after a loop was cloned, given the blocks in RPO order, the exit blocks and a 1:1 ma...
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block, with a specified clobbering defin...
Compute iterated dominance frontiers using a linear time algorithm.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:388
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr)
Definition: MemorySSA.cpp:1617
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:297
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=MayAlias)
Definition: MemorySSA.h:296
unsigned getNumOperands() const
Definition: User.h:191
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
BlockVerifier::State From
iterator end()
Definition: BasicBlock.h:270
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:839
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:248
Module.h This file contains the declarations for the Module class.
iterator end() const
Definition: ArrayRef.h:137
void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock *> Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
BasicBlock * getBlock() const
Definition: MemorySSA.h:156
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:124
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:734
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:244
iterator_range< user_iterator > users()
Definition: Value.h:399
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: MemorySSA.h:527
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
Definition: MemorySSA.h:540
use_iterator use_begin()
Definition: Value.h:338
block_iterator block_end()
Definition: MemorySSA.h:508
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
unsigned pred_size(const BasicBlock *BB)
Get the number of predecessors of BB.
Definition: CFG.h:121
#define I(x, y, z)
Definition: MD5.cpp:58
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:72
MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:322
void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was merged into To.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:730
reverse_iterator rend(StringRef path)
Get reverse end iterator over path.
Definition: Path.cpp:328
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:211
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:375
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
succ_range successors(Instruction *I)
Definition: CFG.h:259
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
Definition: MemorySSA.h:193
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:478
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition: MemorySSA.h:800
#define LLVM_DEBUG(X)
Definition: Debug.h:122
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1237
void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where)
void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM)
bool use_empty() const
Definition: Value.h:322
reverse_iterator rbegin()
Definition: simple_ilist.h:121
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143
const BasicBlock * getParent() const
Definition: Instruction.h:66
user_iterator user_end()
Definition: Value.h:383