LLVM  4.0.0
GVNHoist.cpp
Go to the documentation of this file.
1 //===- GVNHoist.cpp - Hoist scalar and load expressions -------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass hoists expressions from branches to a common dominator. It uses
11 // GVN (global value numbering) to discover expressions computing the same
12 // values. The primary goals of code-hoisting are:
13 // 1. To reduce the code size.
14 // 2. In some cases reduce critical path (by exposing more ILP).
15 //
16 // Hoisting may affect the performance in some cases. To mitigate that, hoisting
17 // is disabled in the following cases.
18 // 1. Scalars across calls.
19 // 2. geps when corresponding load/store cannot be hoisted.
20 //===----------------------------------------------------------------------===//
21 
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Transforms/Scalar.h"
30 
31 using namespace llvm;
32 
33 #define DEBUG_TYPE "gvn-hoist"
34 
35 STATISTIC(NumHoisted, "Number of instructions hoisted");
36 STATISTIC(NumRemoved, "Number of instructions removed");
37 STATISTIC(NumLoadsHoisted, "Number of loads hoisted");
38 STATISTIC(NumLoadsRemoved, "Number of loads removed");
39 STATISTIC(NumStoresHoisted, "Number of stores hoisted");
40 STATISTIC(NumStoresRemoved, "Number of stores removed");
41 STATISTIC(NumCallsHoisted, "Number of calls hoisted");
42 STATISTIC(NumCallsRemoved, "Number of calls removed");
43 
44 static cl::opt<int>
45  MaxHoistedThreshold("gvn-max-hoisted", cl::Hidden, cl::init(-1),
46  cl::desc("Max number of instructions to hoist "
47  "(default unlimited = -1)"));
49  "gvn-hoist-max-bbs", cl::Hidden, cl::init(4),
50  cl::desc("Max number of basic blocks on the path between "
51  "hoisting locations (default = 4, unlimited = -1)"));
52 
54  "gvn-hoist-max-depth", cl::Hidden, cl::init(100),
55  cl::desc("Hoist instructions from the beginning of the BB up to the "
56  "maximum specified depth (default = 100, unlimited = -1)"));
57 
58 static cl::opt<int>
59  MaxChainLength("gvn-hoist-max-chain-length", cl::Hidden, cl::init(10),
60  cl::desc("Maximum length of dependent chains to hoist "
61  "(default = 10, unlimited = -1)"));
62 
63 namespace {
64 
65 // Provides a sorting function based on the execution order of two instructions.
66 struct SortByDFSIn {
67 private:
69 
70 public:
71  SortByDFSIn(DenseMap<const Value *, unsigned> &D) : DFSNumber(D) {}
72 
73  // Returns true when A executes before B.
74  bool operator()(const Instruction *A, const Instruction *B) const {
75  // FIXME: libc++ has a std::sort() algorithm that will call the compare
76  // function on the same element. Once PR20837 is fixed and some more years
77  // pass by and all the buildbots have moved to a corrected std::sort(),
78  // enable the following assert:
79  //
80  // assert(A != B);
81 
82  const BasicBlock *BA = A->getParent();
83  const BasicBlock *BB = B->getParent();
84  unsigned ADFS, BDFS;
85  if (BA == BB) {
86  ADFS = DFSNumber.lookup(A);
87  BDFS = DFSNumber.lookup(B);
88  } else {
89  ADFS = DFSNumber.lookup(BA);
90  BDFS = DFSNumber.lookup(BB);
91  }
92  assert(ADFS && BDFS);
93  return ADFS < BDFS;
94  }
95 };
96 
97 // A map from a pair of VNs to all the instructions with those VNs.
99  VNtoInsns;
100 // An invalid value number Used when inserting a single value number into
101 // VNtoInsns.
102 enum : unsigned { InvalidVN = ~2U };
103 
104 // Records all scalar instructions candidate for code hoisting.
105 class InsnInfo {
106  VNtoInsns VNtoScalars;
107 
108 public:
109  // Inserts I and its value number in VNtoScalars.
110  void insert(Instruction *I, GVN::ValueTable &VN) {
111  // Scalar instruction.
112  unsigned V = VN.lookupOrAdd(I);
113  VNtoScalars[{V, InvalidVN}].push_back(I);
114  }
115 
116  const VNtoInsns &getVNTable() const { return VNtoScalars; }
117 };
118 
119 // Records all load instructions candidate for code hoisting.
120 class LoadInfo {
121  VNtoInsns VNtoLoads;
122 
123 public:
124  // Insert Load and the value number of its memory address in VNtoLoads.
125  void insert(LoadInst *Load, GVN::ValueTable &VN) {
126  if (Load->isSimple()) {
127  unsigned V = VN.lookupOrAdd(Load->getPointerOperand());
128  VNtoLoads[{V, InvalidVN}].push_back(Load);
129  }
130  }
131 
132  const VNtoInsns &getVNTable() const { return VNtoLoads; }
133 };
134 
135 // Records all store instructions candidate for code hoisting.
136 class StoreInfo {
137  VNtoInsns VNtoStores;
138 
139 public:
140  // Insert the Store and a hash number of the store address and the stored
141  // value in VNtoStores.
142  void insert(StoreInst *Store, GVN::ValueTable &VN) {
143  if (!Store->isSimple())
144  return;
145  // Hash the store address and the stored value.
146  Value *Ptr = Store->getPointerOperand();
147  Value *Val = Store->getValueOperand();
148  VNtoStores[{VN.lookupOrAdd(Ptr), VN.lookupOrAdd(Val)}].push_back(Store);
149  }
150 
151  const VNtoInsns &getVNTable() const { return VNtoStores; }
152 };
153 
154 // Records all call instructions candidate for code hoisting.
155 class CallInfo {
156  VNtoInsns VNtoCallsScalars;
157  VNtoInsns VNtoCallsLoads;
158  VNtoInsns VNtoCallsStores;
159 
160 public:
161  // Insert Call and its value numbering in one of the VNtoCalls* containers.
162  void insert(CallInst *Call, GVN::ValueTable &VN) {
163  // A call that doesNotAccessMemory is handled as a Scalar,
164  // onlyReadsMemory will be handled as a Load instruction,
165  // all other calls will be handled as stores.
166  unsigned V = VN.lookupOrAdd(Call);
167  auto Entry = std::make_pair(V, InvalidVN);
168 
169  if (Call->doesNotAccessMemory())
170  VNtoCallsScalars[Entry].push_back(Call);
171  else if (Call->onlyReadsMemory())
172  VNtoCallsLoads[Entry].push_back(Call);
173  else
174  VNtoCallsStores[Entry].push_back(Call);
175  }
176 
177  const VNtoInsns &getScalarVNTable() const { return VNtoCallsScalars; }
178 
179  const VNtoInsns &getLoadVNTable() const { return VNtoCallsLoads; }
180 
181  const VNtoInsns &getStoreVNTable() const { return VNtoCallsStores; }
182 };
183 
184 typedef DenseMap<const BasicBlock *, bool> BBSideEffectsSet;
185 typedef SmallVector<Instruction *, 4> SmallVecInsn;
186 typedef SmallVectorImpl<Instruction *> SmallVecImplInsn;
187 
188 static void combineKnownMetadata(Instruction *ReplInst, Instruction *I) {
189  static const unsigned KnownIDs[] = {
194  combineMetadata(ReplInst, I, KnownIDs);
195 }
196 
197 // This pass hoists common computations across branches sharing common
198 // dominator. The primary goal is to reduce the code size, and in some
199 // cases reduce critical path (by exposing more ILP).
200 class GVNHoist {
201 public:
202  GVNHoist(DominatorTree *DT, AliasAnalysis *AA, MemoryDependenceResults *MD,
203  MemorySSA *MSSA)
204  : DT(DT), AA(AA), MD(MD), MSSA(MSSA),
205  HoistingGeps(false),
206  HoistedCtr(0)
207  { }
208 
209  bool run(Function &F) {
210  VN.setDomTree(DT);
211  VN.setAliasAnalysis(AA);
212  VN.setMemDep(MD);
213  bool Res = false;
214  // Perform DFS Numbering of instructions.
215  unsigned BBI = 0;
216  for (const BasicBlock *BB : depth_first(&F.getEntryBlock())) {
217  DFSNumber[BB] = ++BBI;
218  unsigned I = 0;
219  for (auto &Inst : *BB)
220  DFSNumber[&Inst] = ++I;
221  }
222 
223  int ChainLength = 0;
224 
225  // FIXME: use lazy evaluation of VN to avoid the fix-point computation.
226  while (1) {
227  if (MaxChainLength != -1 && ++ChainLength >= MaxChainLength)
228  return Res;
229 
230  auto HoistStat = hoistExpressions(F);
231  if (HoistStat.first + HoistStat.second == 0)
232  return Res;
233 
234  if (HoistStat.second > 0)
235  // To address a limitation of the current GVN, we need to rerun the
236  // hoisting after we hoisted loads or stores in order to be able to
237  // hoist all scalars dependent on the hoisted ld/st.
238  VN.clear();
239 
240  Res = true;
241  }
242 
243  return Res;
244  }
245 
246 private:
247  GVN::ValueTable VN;
248  DominatorTree *DT;
249  AliasAnalysis *AA;
251  MemorySSA *MSSA;
252  const bool HoistingGeps;
254  BBSideEffectsSet BBSideEffects;
255  int HoistedCtr;
256 
257  enum InsKind { Unknown, Scalar, Load, Store };
258 
259  // Return true when there are exception handling in BB.
260  bool hasEH(const BasicBlock *BB) {
261  auto It = BBSideEffects.find(BB);
262  if (It != BBSideEffects.end())
263  return It->second;
264 
265  if (BB->isEHPad() || BB->hasAddressTaken()) {
266  BBSideEffects[BB] = true;
267  return true;
268  }
269 
270  if (BB->getTerminator()->mayThrow()) {
271  BBSideEffects[BB] = true;
272  return true;
273  }
274 
275  BBSideEffects[BB] = false;
276  return false;
277  }
278 
279  // Return true when a successor of BB dominates A.
280  bool successorDominate(const BasicBlock *BB, const BasicBlock *A) {
281  for (const BasicBlock *Succ : BB->getTerminator()->successors())
282  if (DT->dominates(Succ, A))
283  return true;
284 
285  return false;
286  }
287 
288  // Return true when all paths from HoistBB to the end of the function pass
289  // through one of the blocks in WL.
290  bool hoistingFromAllPaths(const BasicBlock *HoistBB,
292 
293  // Copy WL as the loop will remove elements from it.
294  SmallPtrSet<const BasicBlock *, 2> WorkList(WL.begin(), WL.end());
295 
296  for (auto It = df_begin(HoistBB), E = df_end(HoistBB); It != E;) {
297  // There exists a path from HoistBB to the exit of the function if we are
298  // still iterating in DF traversal and we removed all instructions from
299  // the work list.
300  if (WorkList.empty())
301  return false;
302 
303  const BasicBlock *BB = *It;
304  if (WorkList.erase(BB)) {
305  // Stop DFS traversal when BB is in the work list.
306  It.skipChildren();
307  continue;
308  }
309 
310  // Check for end of function, calls that do not return, etc.
312  return false;
313 
314  // When reaching the back-edge of a loop, there may be a path through the
315  // loop that does not pass through B or C before exiting the loop.
316  if (successorDominate(BB, HoistBB))
317  return false;
318 
319  // Increment DFS traversal when not skipping children.
320  ++It;
321  }
322 
323  return true;
324  }
325 
326  /* Return true when I1 appears before I2 in the instructions of BB. */
327  bool firstInBB(const Instruction *I1, const Instruction *I2) {
328  assert(I1->getParent() == I2->getParent());
329  unsigned I1DFS = DFSNumber.lookup(I1);
330  unsigned I2DFS = DFSNumber.lookup(I2);
331  assert(I1DFS && I2DFS);
332  return I1DFS < I2DFS;
333  }
334 
335  // Return true when there are memory uses of Def in BB.
336  bool hasMemoryUse(const Instruction *NewPt, MemoryDef *Def,
337  const BasicBlock *BB) {
338  const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
339  if (!Acc)
340  return false;
341 
342  Instruction *OldPt = Def->getMemoryInst();
343  const BasicBlock *OldBB = OldPt->getParent();
344  const BasicBlock *NewBB = NewPt->getParent();
345  bool ReachedNewPt = false;
346 
347  for (const MemoryAccess &MA : *Acc)
348  if (const MemoryUse *MU = dyn_cast<MemoryUse>(&MA)) {
349  Instruction *Insn = MU->getMemoryInst();
350 
351  // Do not check whether MU aliases Def when MU occurs after OldPt.
352  if (BB == OldBB && firstInBB(OldPt, Insn))
353  break;
354 
355  // Do not check whether MU aliases Def when MU occurs before NewPt.
356  if (BB == NewBB) {
357  if (!ReachedNewPt) {
358  if (firstInBB(Insn, NewPt))
359  continue;
360  ReachedNewPt = true;
361  }
362  }
363  if (defClobbersUseOrDef(Def, MU, *AA))
364  return true;
365  }
366 
367  return false;
368  }
369 
370  // Return true when there are exception handling or loads of memory Def
371  // between Def and NewPt. This function is only called for stores: Def is
372  // the MemoryDef of the store to be hoisted.
373 
374  // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and
375  // return true when the counter NBBsOnAllPaths reaces 0, except when it is
376  // initialized to -1 which is unlimited.
377  bool hasEHOrLoadsOnPath(const Instruction *NewPt, MemoryDef *Def,
378  int &NBBsOnAllPaths) {
379  const BasicBlock *NewBB = NewPt->getParent();
380  const BasicBlock *OldBB = Def->getBlock();
381  assert(DT->dominates(NewBB, OldBB) && "invalid path");
382  assert(DT->dominates(Def->getDefiningAccess()->getBlock(), NewBB) &&
383  "def does not dominate new hoisting point");
384 
385  // Walk all basic blocks reachable in depth-first iteration on the inverse
386  // CFG from OldBB to NewBB. These blocks are all the blocks that may be
387  // executed between the execution of NewBB and OldBB. Hoisting an expression
388  // from OldBB into NewBB has to be safe on all execution paths.
389  for (auto I = idf_begin(OldBB), E = idf_end(OldBB); I != E;) {
390  if (*I == NewBB) {
391  // Stop traversal when reaching HoistPt.
392  I.skipChildren();
393  continue;
394  }
395 
396  // Stop walk once the limit is reached.
397  if (NBBsOnAllPaths == 0)
398  return true;
399 
400  // Impossible to hoist with exceptions on the path.
401  if (hasEH(*I))
402  return true;
403 
404  // Check that we do not move a store past loads.
405  if (hasMemoryUse(NewPt, Def, *I))
406  return true;
407 
408  // -1 is unlimited number of blocks on all paths.
409  if (NBBsOnAllPaths != -1)
410  --NBBsOnAllPaths;
411 
412  ++I;
413  }
414 
415  return false;
416  }
417 
418  // Return true when there are exception handling between HoistPt and BB.
419  // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and
420  // return true when the counter NBBsOnAllPaths reaches 0, except when it is
421  // initialized to -1 which is unlimited.
422  bool hasEHOnPath(const BasicBlock *HoistPt, const BasicBlock *BB,
423  int &NBBsOnAllPaths) {
424  assert(DT->dominates(HoistPt, BB) && "Invalid path");
425 
426  // Walk all basic blocks reachable in depth-first iteration on
427  // the inverse CFG from BBInsn to NewHoistPt. These blocks are all the
428  // blocks that may be executed between the execution of NewHoistPt and
429  // BBInsn. Hoisting an expression from BBInsn into NewHoistPt has to be safe
430  // on all execution paths.
431  for (auto I = idf_begin(BB), E = idf_end(BB); I != E;) {
432  if (*I == HoistPt) {
433  // Stop traversal when reaching NewHoistPt.
434  I.skipChildren();
435  continue;
436  }
437 
438  // Stop walk once the limit is reached.
439  if (NBBsOnAllPaths == 0)
440  return true;
441 
442  // Impossible to hoist with exceptions on the path.
443  if (hasEH(*I))
444  return true;
445 
446  // -1 is unlimited number of blocks on all paths.
447  if (NBBsOnAllPaths != -1)
448  --NBBsOnAllPaths;
449 
450  ++I;
451  }
452 
453  return false;
454  }
455 
456  // Return true when it is safe to hoist a memory load or store U from OldPt
457  // to NewPt.
458  bool safeToHoistLdSt(const Instruction *NewPt, const Instruction *OldPt,
459  MemoryUseOrDef *U, InsKind K, int &NBBsOnAllPaths) {
460 
461  // In place hoisting is safe.
462  if (NewPt == OldPt)
463  return true;
464 
465  const BasicBlock *NewBB = NewPt->getParent();
466  const BasicBlock *OldBB = OldPt->getParent();
467  const BasicBlock *UBB = U->getBlock();
468 
469  // Check for dependences on the Memory SSA.
471  BasicBlock *DBB = D->getBlock();
472  if (DT->properlyDominates(NewBB, DBB))
473  // Cannot move the load or store to NewBB above its definition in DBB.
474  return false;
475 
476  if (NewBB == DBB && !MSSA->isLiveOnEntryDef(D))
477  if (auto *UD = dyn_cast<MemoryUseOrDef>(D))
478  if (firstInBB(NewPt, UD->getMemoryInst()))
479  // Cannot move the load or store to NewPt above its definition in D.
480  return false;
481 
482  // Check for unsafe hoistings due to side effects.
483  if (K == InsKind::Store) {
484  if (hasEHOrLoadsOnPath(NewPt, dyn_cast<MemoryDef>(U), NBBsOnAllPaths))
485  return false;
486  } else if (hasEHOnPath(NewBB, OldBB, NBBsOnAllPaths))
487  return false;
488 
489  if (UBB == NewBB) {
490  if (DT->properlyDominates(DBB, NewBB))
491  return true;
492  assert(UBB == DBB);
493  assert(MSSA->locallyDominates(D, U));
494  }
495 
496  // No side effects: it is safe to hoist.
497  return true;
498  }
499 
500  // Return true when it is safe to hoist scalar instructions from all blocks in
501  // WL to HoistBB.
502  bool safeToHoistScalar(const BasicBlock *HoistBB,
504  int &NBBsOnAllPaths) {
505  // Check that the hoisted expression is needed on all paths.
506  if (!hoistingFromAllPaths(HoistBB, WL))
507  return false;
508 
509  for (const BasicBlock *BB : WL)
510  if (hasEHOnPath(HoistBB, BB, NBBsOnAllPaths))
511  return false;
512 
513  return true;
514  }
515 
516  // Each element of a hoisting list contains the basic block where to hoist and
517  // a list of instructions to be hoisted.
518  typedef std::pair<BasicBlock *, SmallVecInsn> HoistingPointInfo;
519  typedef SmallVector<HoistingPointInfo, 4> HoistingPointList;
520 
521  // Partition InstructionsToHoist into a set of candidates which can share a
522  // common hoisting point. The partitions are collected in HPL. IsScalar is
523  // true when the instructions in InstructionsToHoist are scalars. IsLoad is
524  // true when the InstructionsToHoist are loads, false when they are stores.
525  void partitionCandidates(SmallVecImplInsn &InstructionsToHoist,
526  HoistingPointList &HPL, InsKind K) {
527  // No need to sort for two instructions.
528  if (InstructionsToHoist.size() > 2) {
529  SortByDFSIn Pred(DFSNumber);
530  std::sort(InstructionsToHoist.begin(), InstructionsToHoist.end(), Pred);
531  }
532 
533  int NumBBsOnAllPaths = MaxNumberOfBBSInPath;
534 
535  SmallVecImplInsn::iterator II = InstructionsToHoist.begin();
536  SmallVecImplInsn::iterator Start = II;
537  Instruction *HoistPt = *II;
538  BasicBlock *HoistBB = HoistPt->getParent();
539  MemoryUseOrDef *UD;
540  if (K != InsKind::Scalar)
541  UD = MSSA->getMemoryAccess(HoistPt);
542 
543  for (++II; II != InstructionsToHoist.end(); ++II) {
544  Instruction *Insn = *II;
545  BasicBlock *BB = Insn->getParent();
546  BasicBlock *NewHoistBB;
547  Instruction *NewHoistPt;
548 
549  if (BB == HoistBB) { // Both are in the same Basic Block.
550  NewHoistBB = HoistBB;
551  NewHoistPt = firstInBB(Insn, HoistPt) ? Insn : HoistPt;
552  } else {
553  // If the hoisting point contains one of the instructions,
554  // then hoist there, otherwise hoist before the terminator.
555  NewHoistBB = DT->findNearestCommonDominator(HoistBB, BB);
556  if (NewHoistBB == BB)
557  NewHoistPt = Insn;
558  else if (NewHoistBB == HoistBB)
559  NewHoistPt = HoistPt;
560  else
561  NewHoistPt = NewHoistBB->getTerminator();
562  }
563 
565  WL.insert(HoistBB);
566  WL.insert(BB);
567 
568  if (K == InsKind::Scalar) {
569  if (safeToHoistScalar(NewHoistBB, WL, NumBBsOnAllPaths)) {
570  // Extend HoistPt to NewHoistPt.
571  HoistPt = NewHoistPt;
572  HoistBB = NewHoistBB;
573  continue;
574  }
575  } else {
576  // When NewBB already contains an instruction to be hoisted, the
577  // expression is needed on all paths.
578  // Check that the hoisted expression is needed on all paths: it is
579  // unsafe to hoist loads to a place where there may be a path not
580  // loading from the same address: for instance there may be a branch on
581  // which the address of the load may not be initialized.
582  if ((HoistBB == NewHoistBB || BB == NewHoistBB ||
583  hoistingFromAllPaths(NewHoistBB, WL)) &&
584  // Also check that it is safe to move the load or store from HoistPt
585  // to NewHoistPt, and from Insn to NewHoistPt.
586  safeToHoistLdSt(NewHoistPt, HoistPt, UD, K, NumBBsOnAllPaths) &&
587  safeToHoistLdSt(NewHoistPt, Insn, MSSA->getMemoryAccess(Insn),
588  K, NumBBsOnAllPaths)) {
589  // Extend HoistPt to NewHoistPt.
590  HoistPt = NewHoistPt;
591  HoistBB = NewHoistBB;
592  continue;
593  }
594  }
595 
596  // At this point it is not safe to extend the current hoisting to
597  // NewHoistPt: save the hoisting list so far.
598  if (std::distance(Start, II) > 1)
599  HPL.push_back({HoistBB, SmallVecInsn(Start, II)});
600 
601  // Start over from BB.
602  Start = II;
603  if (K != InsKind::Scalar)
604  UD = MSSA->getMemoryAccess(*Start);
605  HoistPt = Insn;
606  HoistBB = BB;
607  NumBBsOnAllPaths = MaxNumberOfBBSInPath;
608  }
609 
610  // Save the last partition.
611  if (std::distance(Start, II) > 1)
612  HPL.push_back({HoistBB, SmallVecInsn(Start, II)});
613  }
614 
615  // Initialize HPL from Map.
616  void computeInsertionPoints(const VNtoInsns &Map, HoistingPointList &HPL,
617  InsKind K) {
618  for (const auto &Entry : Map) {
619  if (MaxHoistedThreshold != -1 && ++HoistedCtr > MaxHoistedThreshold)
620  return;
621 
622  const SmallVecInsn &V = Entry.second;
623  if (V.size() < 2)
624  continue;
625 
626  // Compute the insertion point and the list of expressions to be hoisted.
627  SmallVecInsn InstructionsToHoist;
628  for (auto I : V)
629  if (!hasEH(I->getParent()))
630  InstructionsToHoist.push_back(I);
631 
632  if (!InstructionsToHoist.empty())
633  partitionCandidates(InstructionsToHoist, HPL, K);
634  }
635  }
636 
637  // Return true when all operands of Instr are available at insertion point
638  // HoistPt. When limiting the number of hoisted expressions, one could hoist
639  // a load without hoisting its access function. So before hoisting any
640  // expression, make sure that all its operands are available at insert point.
641  bool allOperandsAvailable(const Instruction *I,
642  const BasicBlock *HoistPt) const {
643  for (const Use &Op : I->operands())
644  if (const auto *Inst = dyn_cast<Instruction>(&Op))
645  if (!DT->dominates(Inst->getParent(), HoistPt))
646  return false;
647 
648  return true;
649  }
650 
651  // Same as allOperandsAvailable with recursive check for GEP operands.
652  bool allGepOperandsAvailable(const Instruction *I,
653  const BasicBlock *HoistPt) const {
654  for (const Use &Op : I->operands())
655  if (const auto *Inst = dyn_cast<Instruction>(&Op))
656  if (!DT->dominates(Inst->getParent(), HoistPt)) {
657  if (const GetElementPtrInst *GepOp =
658  dyn_cast<GetElementPtrInst>(Inst)) {
659  if (!allGepOperandsAvailable(GepOp, HoistPt))
660  return false;
661  // Gep is available if all operands of GepOp are available.
662  } else {
663  // Gep is not available if it has operands other than GEPs that are
664  // defined in blocks not dominating HoistPt.
665  return false;
666  }
667  }
668  return true;
669  }
670 
671  // Make all operands of the GEP available.
672  void makeGepsAvailable(Instruction *Repl, BasicBlock *HoistPt,
673  const SmallVecInsn &InstructionsToHoist,
674  Instruction *Gep) const {
675  assert(allGepOperandsAvailable(Gep, HoistPt) &&
676  "GEP operands not available");
677 
678  Instruction *ClonedGep = Gep->clone();
679  for (unsigned i = 0, e = Gep->getNumOperands(); i != e; ++i)
680  if (Instruction *Op = dyn_cast<Instruction>(Gep->getOperand(i))) {
681 
682  // Check whether the operand is already available.
683  if (DT->dominates(Op->getParent(), HoistPt))
684  continue;
685 
686  // As a GEP can refer to other GEPs, recursively make all the operands
687  // of this GEP available at HoistPt.
688  if (GetElementPtrInst *GepOp = dyn_cast<GetElementPtrInst>(Op))
689  makeGepsAvailable(ClonedGep, HoistPt, InstructionsToHoist, GepOp);
690  }
691 
692  // Copy Gep and replace its uses in Repl with ClonedGep.
693  ClonedGep->insertBefore(HoistPt->getTerminator());
694 
695  // Conservatively discard any optimization hints, they may differ on the
696  // other paths.
697  ClonedGep->dropUnknownNonDebugMetadata();
698 
699  // If we have optimization hints which agree with each other along different
700  // paths, preserve them.
701  for (const Instruction *OtherInst : InstructionsToHoist) {
702  const GetElementPtrInst *OtherGep;
703  if (auto *OtherLd = dyn_cast<LoadInst>(OtherInst))
704  OtherGep = cast<GetElementPtrInst>(OtherLd->getPointerOperand());
705  else
706  OtherGep = cast<GetElementPtrInst>(
707  cast<StoreInst>(OtherInst)->getPointerOperand());
708  ClonedGep->andIRFlags(OtherGep);
709  }
710 
711  // Replace uses of Gep with ClonedGep in Repl.
712  Repl->replaceUsesOfWith(Gep, ClonedGep);
713  }
714 
715  // In the case Repl is a load or a store, we make all their GEPs
716  // available: GEPs are not hoisted by default to avoid the address
717  // computations to be hoisted without the associated load or store.
718  bool makeGepOperandsAvailable(Instruction *Repl, BasicBlock *HoistPt,
719  const SmallVecInsn &InstructionsToHoist) const {
720  // Check whether the GEP of a ld/st can be synthesized at HoistPt.
721  GetElementPtrInst *Gep = nullptr;
722  Instruction *Val = nullptr;
723  if (auto *Ld = dyn_cast<LoadInst>(Repl)) {
724  Gep = dyn_cast<GetElementPtrInst>(Ld->getPointerOperand());
725  } else if (auto *St = dyn_cast<StoreInst>(Repl)) {
726  Gep = dyn_cast<GetElementPtrInst>(St->getPointerOperand());
727  Val = dyn_cast<Instruction>(St->getValueOperand());
728  // Check that the stored value is available.
729  if (Val) {
730  if (isa<GetElementPtrInst>(Val)) {
731  // Check whether we can compute the GEP at HoistPt.
732  if (!allGepOperandsAvailable(Val, HoistPt))
733  return false;
734  } else if (!DT->dominates(Val->getParent(), HoistPt))
735  return false;
736  }
737  }
738 
739  // Check whether we can compute the Gep at HoistPt.
740  if (!Gep || !allGepOperandsAvailable(Gep, HoistPt))
741  return false;
742 
743  makeGepsAvailable(Repl, HoistPt, InstructionsToHoist, Gep);
744 
745  if (Val && isa<GetElementPtrInst>(Val))
746  makeGepsAvailable(Repl, HoistPt, InstructionsToHoist, Val);
747 
748  return true;
749  }
750 
751  std::pair<unsigned, unsigned> hoist(HoistingPointList &HPL) {
752  unsigned NI = 0, NL = 0, NS = 0, NC = 0, NR = 0;
753  for (const HoistingPointInfo &HP : HPL) {
754  // Find out whether we already have one of the instructions in HoistPt,
755  // in which case we do not have to move it.
756  BasicBlock *HoistPt = HP.first;
757  const SmallVecInsn &InstructionsToHoist = HP.second;
758  Instruction *Repl = nullptr;
759  for (Instruction *I : InstructionsToHoist)
760  if (I->getParent() == HoistPt)
761  // If there are two instructions in HoistPt to be hoisted in place:
762  // update Repl to be the first one, such that we can rename the uses
763  // of the second based on the first.
764  if (!Repl || firstInBB(I, Repl))
765  Repl = I;
766 
767  // Keep track of whether we moved the instruction so we know whether we
768  // should move the MemoryAccess.
769  bool MoveAccess = true;
770  if (Repl) {
771  // Repl is already in HoistPt: it remains in place.
772  assert(allOperandsAvailable(Repl, HoistPt) &&
773  "instruction depends on operands that are not available");
774  MoveAccess = false;
775  } else {
776  // When we do not find Repl in HoistPt, select the first in the list
777  // and move it to HoistPt.
778  Repl = InstructionsToHoist.front();
779 
780  // We can move Repl in HoistPt only when all operands are available.
781  // The order in which hoistings are done may influence the availability
782  // of operands.
783  if (!allOperandsAvailable(Repl, HoistPt)) {
784 
785  // When HoistingGeps there is nothing more we can do to make the
786  // operands available: just continue.
787  if (HoistingGeps)
788  continue;
789 
790  // When not HoistingGeps we need to copy the GEPs.
791  if (!makeGepOperandsAvailable(Repl, HoistPt, InstructionsToHoist))
792  continue;
793  }
794 
795  // Move the instruction at the end of HoistPt.
796  Instruction *Last = HoistPt->getTerminator();
797  MD->removeInstruction(Repl);
798  Repl->moveBefore(Last);
799 
800  DFSNumber[Repl] = DFSNumber[Last]++;
801  }
802 
803  MemoryAccess *NewMemAcc = MSSA->getMemoryAccess(Repl);
804 
805  if (MoveAccess) {
806  if (MemoryUseOrDef *OldMemAcc =
807  dyn_cast_or_null<MemoryUseOrDef>(NewMemAcc)) {
808  // The definition of this ld/st will not change: ld/st hoisting is
809  // legal when the ld/st is not moved past its current definition.
810  MemoryAccess *Def = OldMemAcc->getDefiningAccess();
811  NewMemAcc =
812  MSSA->createMemoryAccessInBB(Repl, Def, HoistPt, MemorySSA::End);
813  OldMemAcc->replaceAllUsesWith(NewMemAcc);
814  MSSA->removeMemoryAccess(OldMemAcc);
815  }
816  }
817 
818  if (isa<LoadInst>(Repl))
819  ++NL;
820  else if (isa<StoreInst>(Repl))
821  ++NS;
822  else if (isa<CallInst>(Repl))
823  ++NC;
824  else // Scalar
825  ++NI;
826 
827  // Remove and rename all other instructions.
828  for (Instruction *I : InstructionsToHoist)
829  if (I != Repl) {
830  ++NR;
831  if (auto *ReplacementLoad = dyn_cast<LoadInst>(Repl)) {
832  ReplacementLoad->setAlignment(
833  std::min(ReplacementLoad->getAlignment(),
834  cast<LoadInst>(I)->getAlignment()));
835  ++NumLoadsRemoved;
836  } else if (auto *ReplacementStore = dyn_cast<StoreInst>(Repl)) {
837  ReplacementStore->setAlignment(
838  std::min(ReplacementStore->getAlignment(),
839  cast<StoreInst>(I)->getAlignment()));
840  ++NumStoresRemoved;
841  } else if (auto *ReplacementAlloca = dyn_cast<AllocaInst>(Repl)) {
842  ReplacementAlloca->setAlignment(
843  std::max(ReplacementAlloca->getAlignment(),
844  cast<AllocaInst>(I)->getAlignment()));
845  } else if (isa<CallInst>(Repl)) {
846  ++NumCallsRemoved;
847  }
848 
849  if (NewMemAcc) {
850  // Update the uses of the old MSSA access with NewMemAcc.
851  MemoryAccess *OldMA = MSSA->getMemoryAccess(I);
852  OldMA->replaceAllUsesWith(NewMemAcc);
853  MSSA->removeMemoryAccess(OldMA);
854  }
855 
856  Repl->andIRFlags(I);
857  combineKnownMetadata(Repl, I);
858  I->replaceAllUsesWith(Repl);
859  // Also invalidate the Alias Analysis cache.
860  MD->removeInstruction(I);
861  I->eraseFromParent();
862  }
863 
864  // Remove MemorySSA phi nodes with the same arguments.
865  if (NewMemAcc) {
867  for (User *U : NewMemAcc->users())
868  if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(U))
869  UsePhis.insert(Phi);
870 
871  for (auto *Phi : UsePhis) {
872  auto In = Phi->incoming_values();
873  if (all_of(In, [&](Use &U) { return U == NewMemAcc; })) {
874  Phi->replaceAllUsesWith(NewMemAcc);
875  MSSA->removeMemoryAccess(Phi);
876  }
877  }
878  }
879  }
880 
881  NumHoisted += NL + NS + NC + NI;
882  NumRemoved += NR;
883  NumLoadsHoisted += NL;
884  NumStoresHoisted += NS;
885  NumCallsHoisted += NC;
886  return {NI, NL + NC + NS};
887  }
888 
889  // Hoist all expressions. Returns Number of scalars hoisted
890  // and number of non-scalars hoisted.
891  std::pair<unsigned, unsigned> hoistExpressions(Function &F) {
892  InsnInfo II;
893  LoadInfo LI;
894  StoreInfo SI;
895  CallInfo CI;
896  for (BasicBlock *BB : depth_first(&F.getEntryBlock())) {
897  int InstructionNb = 0;
898  for (Instruction &I1 : *BB) {
899  // Only hoist the first instructions in BB up to MaxDepthInBB. Hoisting
900  // deeper may increase the register pressure and compilation time.
901  if (MaxDepthInBB != -1 && InstructionNb++ >= MaxDepthInBB)
902  break;
903 
904  // Do not value number terminator instructions.
905  if (isa<TerminatorInst>(&I1))
906  break;
907 
908  if (auto *Load = dyn_cast<LoadInst>(&I1))
909  LI.insert(Load, VN);
910  else if (auto *Store = dyn_cast<StoreInst>(&I1))
911  SI.insert(Store, VN);
912  else if (auto *Call = dyn_cast<CallInst>(&I1)) {
913  if (auto *Intr = dyn_cast<IntrinsicInst>(Call)) {
914  if (isa<DbgInfoIntrinsic>(Intr) ||
915  Intr->getIntrinsicID() == Intrinsic::assume)
916  continue;
917  }
918  if (Call->mayHaveSideEffects())
919  break;
920 
921  if (Call->isConvergent())
922  break;
923 
924  CI.insert(Call, VN);
925  } else if (HoistingGeps || !isa<GetElementPtrInst>(&I1))
926  // Do not hoist scalars past calls that may write to memory because
927  // that could result in spills later. geps are handled separately.
928  // TODO: We can relax this for targets like AArch64 as they have more
929  // registers than X86.
930  II.insert(&I1, VN);
931  }
932  }
933 
934  HoistingPointList HPL;
935  computeInsertionPoints(II.getVNTable(), HPL, InsKind::Scalar);
936  computeInsertionPoints(LI.getVNTable(), HPL, InsKind::Load);
937  computeInsertionPoints(SI.getVNTable(), HPL, InsKind::Store);
938  computeInsertionPoints(CI.getScalarVNTable(), HPL, InsKind::Scalar);
939  computeInsertionPoints(CI.getLoadVNTable(), HPL, InsKind::Load);
940  computeInsertionPoints(CI.getStoreVNTable(), HPL, InsKind::Store);
941  return hoist(HPL);
942  }
943 };
944 
945 class GVNHoistLegacyPass : public FunctionPass {
946 public:
947  static char ID;
948 
949  GVNHoistLegacyPass() : FunctionPass(ID) {
951  }
952 
953  bool runOnFunction(Function &F) override {
954  if (skipFunction(F))
955  return false;
956  auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
957  auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
958  auto &MD = getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
959  auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
960 
961  GVNHoist G(&DT, &AA, &MD, &MSSA);
962  return G.run(F);
963  }
964 
965  void getAnalysisUsage(AnalysisUsage &AU) const override {
972  }
973 };
974 } // namespace
975 
978  AliasAnalysis &AA = AM.getResult<AAManager>(F);
980  MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
981  GVNHoist G(&DT, &AA, &MD, &MSSA);
982  if (!G.run(F))
983  return PreservedAnalyses::all();
984 
988  return PA;
989 }
990 
991 char GVNHoistLegacyPass::ID = 0;
992 INITIALIZE_PASS_BEGIN(GVNHoistLegacyPass, "gvn-hoist",
993  "Early GVN Hoisting of Expressions", false, false)
998 INITIALIZE_PASS_END(GVNHoistLegacyPass, "gvn-hoist",
999  "Early GVN Hoisting of Expressions", false, false)
1000 
1001 FunctionPass *llvm::createGVNHoistPass() { return new GVNHoistLegacyPass(); }
void setDomTree(DominatorTree *D)
Definition: GVN.h:100
Value * getValueOperand()
Definition: Instructions.h:391
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:76
static cl::opt< int > MaxDepthInBB("gvn-hoist-max-depth", cl::Hidden, cl::init(100), cl::desc("Hoist instructions from the beginning of the BB up to the ""maximum specified depth (default = 100, unlimited = -1)"))
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
Provides a lazy, caching interface for making common memory aliasing information queries, backed by LLVM's alias analysis passes.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
gvn Early GVN Hoisting of false
Definition: GVNHoist.cpp:998
STATISTIC(NumFunctions,"Total number of functions")
size_t i
void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs)
Drop all unknown metadata except for debug locations.
Definition: Metadata.cpp:1156
bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
Definition: MemorySSA.cpp:272
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
Definition: GVNHoist.cpp:976
unsigned getNumOperands() const
Definition: User.h:167
bool isSimple() const
Definition: Instructions.h:384
This class represents a function call, abstracting a target machine's calling convention.
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
Definition: Instruction.h:450
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:289
void setAliasAnalysis(AliasAnalysis *A)
Definition: GVN.h:97
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:736
bool isEHPad() const
Return true if this basic block is an exception handling block.
Definition: BasicBlock.h:315
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:189
FunctionPass * createGVNHoistPass()
Definition: GVNHoist.cpp:1001
An instruction for reading from memory.
Definition: Instructions.h:164
bool isSimple() const
Definition: Instructions.h:263
gvn Early GVN Hoisting of Expressions
Definition: GVNHoist.cpp:998
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:345
Represents read-only accesses to memory.
Definition: MemorySSA.h:234
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:53
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:723
static Value * getPointerOperand(Instruction &Inst)
void clear()
Remove all entries from the ValueTable.
Definition: GVN.cpp:556
Constant Hoisting
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:500
static GCRegistry::Add< StatepointGC > D("statepoint-example","an example strategy for statepoint")
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:308
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:202
An analysis that produces MemoryDependenceResults for a function.
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following: ...
BasicBlock * getBlock() const
Definition: MemorySSA.h:141
static unsigned getAlignment(GlobalVariable *GV)
#define F(x, y, z)
Definition: MD5.cpp:51
The core GVN pass object.
Definition: GVN.h:46
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction...
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
An instruction for storing to memory.
Definition: Instructions.h:300
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:401
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:96
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
void replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:24
idf_iterator< T > idf_begin(const T &G)
idf_iterator< T > idf_end(const T &G)
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:830
succ_range successors()
Definition: InstrTypes.h:280
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:395
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:107
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs...ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:653
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction...
Definition: Instruction.cpp:82
LLVM Basic Block Representation.
Definition: BasicBlock.h:51
This class holds the mapping between values and value numbers.
Definition: GVN.h:68
df_iterator< T > df_end(const T &G)
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
A manager for alias analyses.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
Represent the analysis usage information of a pass.
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE,"Assign register bank of generic virtual registers", false, false) RegBankSelect
bool doesNotAccessMemory() const
Determine if the call does not access memory.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
Value * getOperand(unsigned i) const
Definition: User.h:145
op_range operands()
Definition: User.h:213
Value * getPointerOperand()
Definition: Instructions.h:270
iterator begin() const
Definition: SmallPtrSet.h:398
static cl::opt< int > MaxHoistedThreshold("gvn-max-hoisted", cl::Hidden, cl::init(-1), cl::desc("Max number of instructions to hoist ""(default unlimited = -1)"))
INITIALIZE_PASS_BEGIN(GVNHoistLegacyPass,"gvn-hoist","Early GVN Hoisting of Expressions", false, false) INITIALIZE_PASS_END(GVNHoistLegacyPass
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:113
static cl::opt< int > MaxChainLength("gvn-hoist-max-chain-length", cl::Hidden, cl::init(10), cl::desc("Maximum length of dependent chains to hoist ""(default = 10, unlimited = -1)"))
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance...
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:403
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:425
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
const DataFlowGraph & G
Definition: RDFGraph.cpp:206
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:690
static cl::opt< int > MaxNumberOfBBSInPath("gvn-hoist-max-bbs", cl::Hidden, cl::init(4), cl::desc("Max number of basic blocks on the path between ""hoisting locations (default = 4, unlimited = -1)"))
const BasicBlock & getEntryBlock() const
Definition: Function.h:519
bool isConvergent() const
Determine if the call is convergent.
#define NC
Definition: regutils.h:42
gvn hoist
When an instruction is found to only use loop invariant operands that is safe to hoist, this instruction is called to do the dirty work.
Definition: GVNHoist.cpp:998
df_iterator< T > df_begin(const T &G)
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:191
iterator_range< user_iterator > users()
Definition: Value.h:370
bool mayThrow() const
Return true if this instruction may throw an exception.
iterator end() const
Definition: SmallPtrSet.h:405
void initializeGVNHoistLegacyPassPass(PassRegistry &)
#define I(x, y, z)
Definition: MD5.cpp:54
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:124
void setMemDep(MemoryDependenceResults *M)
Definition: GVN.h:99
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:120
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
Definition: GVN.cpp:465
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:199
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
LLVM Value Representation.
Definition: Value.h:71
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Definition: Instruction.cpp:95
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:217
int * Ptr
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:354
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
Value * getPointerOperand()
Definition: Instructions.h:394
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:1682
const BasicBlock * getParent() const
Definition: Instruction.h:62