LLVM  6.0.0svn
GVN.cpp
Go to the documentation of this file.
1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs global value numbering to eliminate fully redundant
11 // instructions. It also performs simple dead load elimination.
12 //
13 // Note that this pass does the value numbering itself; it does not use the
14 // ValueNumbering analysis passes.
15 //
16 //===----------------------------------------------------------------------===//
17 
19 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/Hashing.h"
22 #include "llvm/ADT/MapVector.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SetVector.h"
27 #include "llvm/ADT/SmallPtrSet.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/Statistic.h"
32 #include "llvm/Analysis/CFG.h"
35 #include "llvm/Analysis/LoopInfo.h"
42 #include "llvm/IR/Attributes.h"
43 #include "llvm/IR/BasicBlock.h"
44 #include "llvm/IR/CallSite.h"
45 #include "llvm/IR/Constant.h"
46 #include "llvm/IR/Constants.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugLoc.h"
49 #include "llvm/IR/Dominators.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/InstrTypes.h"
52 #include "llvm/IR/Instruction.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/IntrinsicInst.h"
55 #include "llvm/IR/Intrinsics.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PassManager.h"
61 #include "llvm/IR/PatternMatch.h"
62 #include "llvm/IR/Type.h"
63 #include "llvm/IR/Use.h"
64 #include "llvm/IR/Value.h"
65 #include "llvm/Pass.h"
66 #include "llvm/Support/Casting.h"
68 #include "llvm/Support/Compiler.h"
69 #include "llvm/Support/Debug.h"
75 #include <algorithm>
76 #include <cassert>
77 #include <cstdint>
78 #include <utility>
79 #include <vector>
80 
81 using namespace llvm;
82 using namespace llvm::gvn;
83 using namespace llvm::VNCoercion;
84 using namespace PatternMatch;
85 
86 #define DEBUG_TYPE "gvn"
87 
88 STATISTIC(NumGVNInstr, "Number of instructions deleted");
89 STATISTIC(NumGVNLoad, "Number of loads deleted");
90 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
91 STATISTIC(NumGVNBlocks, "Number of blocks merged");
92 STATISTIC(NumGVNSimpl, "Number of instructions simplified");
93 STATISTIC(NumGVNEqProp, "Number of equalities propagated");
94 STATISTIC(NumPRELoad, "Number of loads PRE'd");
95 
96 static cl::opt<bool> EnablePRE("enable-pre",
97  cl::init(true), cl::Hidden);
98 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
99 
100 // Maximum allowed recursion depth.
101 static cl::opt<uint32_t>
102 MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore,
103  cl::desc("Max recurse depth (default = 1000)"));
104 
108  bool commutative = false;
110 
111  Expression(uint32_t o = ~2U) : opcode(o) {}
112 
113  bool operator==(const Expression &other) const {
114  if (opcode != other.opcode)
115  return false;
116  if (opcode == ~0U || opcode == ~1U)
117  return true;
118  if (type != other.type)
119  return false;
120  if (varargs != other.varargs)
121  return false;
122  return true;
123  }
124 
126  return hash_combine(
127  Value.opcode, Value.type,
128  hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
129  }
130 };
131 
132 namespace llvm {
133 
134 template <> struct DenseMapInfo<GVN::Expression> {
135  static inline GVN::Expression getEmptyKey() { return ~0U; }
136  static inline GVN::Expression getTombstoneKey() { return ~1U; }
137 
138  static unsigned getHashValue(const GVN::Expression &e) {
139  using llvm::hash_value;
140 
141  return static_cast<unsigned>(hash_value(e));
142  }
143 
144  static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) {
145  return LHS == RHS;
146  }
147 };
148 
149 } // end namespace llvm
150 
151 /// Represents a particular available value that we know how to materialize.
152 /// Materialization of an AvailableValue never fails. An AvailableValue is
153 /// implicitly associated with a rematerialization point which is the
154 /// location of the instruction from which it was formed.
156  enum ValType {
157  SimpleVal, // A simple offsetted value that is accessed.
158  LoadVal, // A value produced by a load.
159  MemIntrin, // A memory intrinsic which is loaded from.
160  UndefVal // A UndefValue representing a value from dead block (which
161  // is not yet physically removed from the CFG).
162  };
163 
164  /// V - The value that is live out of the block.
166 
167  /// Offset - The byte offset in Val that is interesting for the load query.
168  unsigned Offset;
169 
170  static AvailableValue get(Value *V, unsigned Offset = 0) {
171  AvailableValue Res;
172  Res.Val.setPointer(V);
173  Res.Val.setInt(SimpleVal);
174  Res.Offset = Offset;
175  return Res;
176  }
177 
178  static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
179  AvailableValue Res;
180  Res.Val.setPointer(MI);
181  Res.Val.setInt(MemIntrin);
182  Res.Offset = Offset;
183  return Res;
184  }
185 
186  static AvailableValue getLoad(LoadInst *LI, unsigned Offset = 0) {
187  AvailableValue Res;
188  Res.Val.setPointer(LI);
189  Res.Val.setInt(LoadVal);
190  Res.Offset = Offset;
191  return Res;
192  }
193 
195  AvailableValue Res;
196  Res.Val.setPointer(nullptr);
197  Res.Val.setInt(UndefVal);
198  Res.Offset = 0;
199  return Res;
200  }
201 
202  bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
203  bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; }
204  bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; }
205  bool isUndefValue() const { return Val.getInt() == UndefVal; }
206 
208  assert(isSimpleValue() && "Wrong accessor");
209  return Val.getPointer();
210  }
211 
213  assert(isCoercedLoadValue() && "Wrong accessor");
214  return cast<LoadInst>(Val.getPointer());
215  }
216 
218  assert(isMemIntrinValue() && "Wrong accessor");
219  return cast<MemIntrinsic>(Val.getPointer());
220  }
221 
222  /// Emit code at the specified insertion point to adjust the value defined
223  /// here to the specified type. This handles various coercion cases.
224  Value *MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt,
225  GVN &gvn) const;
226 };
227 
228 /// Represents an AvailableValue which can be rematerialized at the end of
229 /// the associated BasicBlock.
231  /// BB - The basic block in question.
233 
234  /// AV - The actual available value
236 
239  Res.BB = BB;
240  Res.AV = std::move(AV);
241  return Res;
242  }
243 
245  unsigned Offset = 0) {
246  return get(BB, AvailableValue::get(V, Offset));
247  }
248 
250  return get(BB, AvailableValue::getUndef());
251  }
252 
253  /// Emit code at the end of this block to adjust the value defined here to
254  /// the specified type. This handles various coercion cases.
256  return AV.MaterializeAdjustedValue(LI, BB->getTerminator(), gvn);
257  }
258 };
259 
260 //===----------------------------------------------------------------------===//
261 // ValueTable Internal Functions
262 //===----------------------------------------------------------------------===//
263 
264 GVN::Expression GVN::ValueTable::createExpr(Instruction *I) {
265  Expression e;
266  e.type = I->getType();
267  e.opcode = I->getOpcode();
268  for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
269  OI != OE; ++OI)
270  e.varargs.push_back(lookupOrAdd(*OI));
271  if (I->isCommutative()) {
272  // Ensure that commutative instructions that only differ by a permutation
273  // of their operands get the same value number by sorting the operand value
274  // numbers. Since all commutative instructions have two operands it is more
275  // efficient to sort by hand rather than using, say, std::sort.
276  assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!");
277  if (e.varargs[0] > e.varargs[1])
278  std::swap(e.varargs[0], e.varargs[1]);
279  e.commutative = true;
280  }
281 
282  if (CmpInst *C = dyn_cast<CmpInst>(I)) {
283  // Sort the operand value numbers so x<y and y>x get the same value number.
284  CmpInst::Predicate Predicate = C->getPredicate();
285  if (e.varargs[0] > e.varargs[1]) {
286  std::swap(e.varargs[0], e.varargs[1]);
287  Predicate = CmpInst::getSwappedPredicate(Predicate);
288  }
289  e.opcode = (C->getOpcode() << 8) | Predicate;
290  e.commutative = true;
291  } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) {
292  for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
293  II != IE; ++II)
294  e.varargs.push_back(*II);
295  }
296 
297  return e;
298 }
299 
300 GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode,
302  Value *LHS, Value *RHS) {
303  assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
304  "Not a comparison!");
305  Expression e;
306  e.type = CmpInst::makeCmpResultType(LHS->getType());
307  e.varargs.push_back(lookupOrAdd(LHS));
308  e.varargs.push_back(lookupOrAdd(RHS));
309 
310  // Sort the operand value numbers so x<y and y>x get the same value number.
311  if (e.varargs[0] > e.varargs[1]) {
312  std::swap(e.varargs[0], e.varargs[1]);
313  Predicate = CmpInst::getSwappedPredicate(Predicate);
314  }
315  e.opcode = (Opcode << 8) | Predicate;
316  e.commutative = true;
317  return e;
318 }
319 
320 GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
321  assert(EI && "Not an ExtractValueInst?");
322  Expression e;
323  e.type = EI->getType();
324  e.opcode = 0;
325 
327  if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) {
328  // EI might be an extract from one of our recognised intrinsics. If it
329  // is we'll synthesize a semantically equivalent expression instead on
330  // an extract value expression.
331  switch (I->getIntrinsicID()) {
332  case Intrinsic::sadd_with_overflow:
333  case Intrinsic::uadd_with_overflow:
334  e.opcode = Instruction::Add;
335  break;
336  case Intrinsic::ssub_with_overflow:
337  case Intrinsic::usub_with_overflow:
338  e.opcode = Instruction::Sub;
339  break;
340  case Intrinsic::smul_with_overflow:
341  case Intrinsic::umul_with_overflow:
342  e.opcode = Instruction::Mul;
343  break;
344  default:
345  break;
346  }
347 
348  if (e.opcode != 0) {
349  // Intrinsic recognized. Grab its args to finish building the expression.
350  assert(I->getNumArgOperands() == 2 &&
351  "Expect two args for recognised intrinsics.");
352  e.varargs.push_back(lookupOrAdd(I->getArgOperand(0)));
353  e.varargs.push_back(lookupOrAdd(I->getArgOperand(1)));
354  return e;
355  }
356  }
357 
358  // Not a recognised intrinsic. Fall back to producing an extract value
359  // expression.
360  e.opcode = EI->getOpcode();
361  for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end();
362  OI != OE; ++OI)
363  e.varargs.push_back(lookupOrAdd(*OI));
364 
365  for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end();
366  II != IE; ++II)
367  e.varargs.push_back(*II);
368 
369  return e;
370 }
371 
372 //===----------------------------------------------------------------------===//
373 // ValueTable External Functions
374 //===----------------------------------------------------------------------===//
375 
376 GVN::ValueTable::ValueTable() = default;
377 GVN::ValueTable::ValueTable(const ValueTable &) = default;
378 GVN::ValueTable::ValueTable(ValueTable &&) = default;
379 GVN::ValueTable::~ValueTable() = default;
380 
381 /// add - Insert a value into the table with a specified value number.
383  valueNumbering.insert(std::make_pair(V, num));
384  if (PHINode *PN = dyn_cast<PHINode>(V))
385  NumberingPhi[num] = PN;
386 }
387 
388 uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
389  if (AA->doesNotAccessMemory(C)) {
390  Expression exp = createExpr(C);
391  uint32_t e = assignExpNewValueNum(exp).first;
392  valueNumbering[C] = e;
393  return e;
394  } else if (AA->onlyReadsMemory(C)) {
395  Expression exp = createExpr(C);
396  auto ValNum = assignExpNewValueNum(exp);
397  if (ValNum.second) {
398  valueNumbering[C] = ValNum.first;
399  return ValNum.first;
400  }
401  if (!MD) {
402  uint32_t e = assignExpNewValueNum(exp).first;
403  valueNumbering[C] = e;
404  return e;
405  }
406 
407  MemDepResult local_dep = MD->getDependency(C);
408 
409  if (!local_dep.isDef() && !local_dep.isNonLocal()) {
410  valueNumbering[C] = nextValueNumber;
411  return nextValueNumber++;
412  }
413 
414  if (local_dep.isDef()) {
415  CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
416 
417  if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
418  valueNumbering[C] = nextValueNumber;
419  return nextValueNumber++;
420  }
421 
422  for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
423  uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
424  uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
425  if (c_vn != cd_vn) {
426  valueNumbering[C] = nextValueNumber;
427  return nextValueNumber++;
428  }
429  }
430 
431  uint32_t v = lookupOrAdd(local_cdep);
432  valueNumbering[C] = v;
433  return v;
434  }
435 
436  // Non-local case.
438  MD->getNonLocalCallDependency(CallSite(C));
439  // FIXME: Move the checking logic to MemDep!
440  CallInst* cdep = nullptr;
441 
442  // Check to see if we have a single dominating call instruction that is
443  // identical to C.
444  for (unsigned i = 0, e = deps.size(); i != e; ++i) {
445  const NonLocalDepEntry *I = &deps[i];
446  if (I->getResult().isNonLocal())
447  continue;
448 
449  // We don't handle non-definitions. If we already have a call, reject
450  // instruction dependencies.
451  if (!I->getResult().isDef() || cdep != nullptr) {
452  cdep = nullptr;
453  break;
454  }
455 
456  CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
457  // FIXME: All duplicated with non-local case.
458  if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
459  cdep = NonLocalDepCall;
460  continue;
461  }
462 
463  cdep = nullptr;
464  break;
465  }
466 
467  if (!cdep) {
468  valueNumbering[C] = nextValueNumber;
469  return nextValueNumber++;
470  }
471 
472  if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
473  valueNumbering[C] = nextValueNumber;
474  return nextValueNumber++;
475  }
476  for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
477  uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
478  uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
479  if (c_vn != cd_vn) {
480  valueNumbering[C] = nextValueNumber;
481  return nextValueNumber++;
482  }
483  }
484 
485  uint32_t v = lookupOrAdd(cdep);
486  valueNumbering[C] = v;
487  return v;
488  } else {
489  valueNumbering[C] = nextValueNumber;
490  return nextValueNumber++;
491  }
492 }
493 
494 /// Returns true if a value number exists for the specified value.
495 bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; }
496 
497 /// lookup_or_add - Returns the value number for the specified value, assigning
498 /// it a new number if it did not have one before.
500  DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
501  if (VI != valueNumbering.end())
502  return VI->second;
503 
504  if (!isa<Instruction>(V)) {
505  valueNumbering[V] = nextValueNumber;
506  return nextValueNumber++;
507  }
508 
509  Instruction* I = cast<Instruction>(V);
510  Expression exp;
511  switch (I->getOpcode()) {
512  case Instruction::Call:
513  return lookupOrAddCall(cast<CallInst>(I));
514  case Instruction::Add:
515  case Instruction::FAdd:
516  case Instruction::Sub:
517  case Instruction::FSub:
518  case Instruction::Mul:
519  case Instruction::FMul:
520  case Instruction::UDiv:
521  case Instruction::SDiv:
522  case Instruction::FDiv:
523  case Instruction::URem:
524  case Instruction::SRem:
525  case Instruction::FRem:
526  case Instruction::Shl:
527  case Instruction::LShr:
528  case Instruction::AShr:
529  case Instruction::And:
530  case Instruction::Or:
531  case Instruction::Xor:
532  case Instruction::ICmp:
533  case Instruction::FCmp:
534  case Instruction::Trunc:
535  case Instruction::ZExt:
536  case Instruction::SExt:
537  case Instruction::FPToUI:
538  case Instruction::FPToSI:
539  case Instruction::UIToFP:
540  case Instruction::SIToFP:
541  case Instruction::FPTrunc:
542  case Instruction::FPExt:
543  case Instruction::PtrToInt:
544  case Instruction::IntToPtr:
545  case Instruction::BitCast:
546  case Instruction::Select:
547  case Instruction::ExtractElement:
548  case Instruction::InsertElement:
549  case Instruction::ShuffleVector:
550  case Instruction::InsertValue:
551  case Instruction::GetElementPtr:
552  exp = createExpr(I);
553  break;
554  case Instruction::ExtractValue:
555  exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
556  break;
557  case Instruction::PHI:
558  valueNumbering[V] = nextValueNumber;
559  NumberingPhi[nextValueNumber] = cast<PHINode>(V);
560  return nextValueNumber++;
561  default:
562  valueNumbering[V] = nextValueNumber;
563  return nextValueNumber++;
564  }
565 
566  uint32_t e = assignExpNewValueNum(exp).first;
567  valueNumbering[V] = e;
568  return e;
569 }
570 
571 /// Returns the value number of the specified value. Fails if
572 /// the value has not yet been numbered.
574  DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
575  if (Verify) {
576  assert(VI != valueNumbering.end() && "Value not numbered?");
577  return VI->second;
578  }
579  return (VI != valueNumbering.end()) ? VI->second : 0;
580 }
581 
582 /// Returns the value number of the given comparison,
583 /// assigning it a new number if it did not have one before. Useful when
584 /// we deduced the result of a comparison, but don't immediately have an
585 /// instruction realizing that comparison to hand.
587  CmpInst::Predicate Predicate,
588  Value *LHS, Value *RHS) {
589  Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
590  return assignExpNewValueNum(exp).first;
591 }
592 
593 /// Remove all entries from the ValueTable.
595  valueNumbering.clear();
596  expressionNumbering.clear();
597  NumberingPhi.clear();
598  PhiTranslateTable.clear();
599  nextValueNumber = 1;
600  Expressions.clear();
601  ExprIdx.clear();
602  nextExprNumber = 0;
603 }
604 
605 /// Remove a value from the value numbering.
607  uint32_t Num = valueNumbering.lookup(V);
608  valueNumbering.erase(V);
609  // If V is PHINode, V <--> value number is an one-to-one mapping.
610  if (isa<PHINode>(V))
611  NumberingPhi.erase(Num);
612 }
613 
614 /// verifyRemoved - Verify that the value is removed from all internal data
615 /// structures.
616 void GVN::ValueTable::verifyRemoved(const Value *V) const {
618  I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
619  assert(I->first != V && "Inst still occurs in value numbering map!");
620  }
621 }
622 
623 //===----------------------------------------------------------------------===//
624 // GVN Pass
625 //===----------------------------------------------------------------------===//
626 
628  // FIXME: The order of evaluation of these 'getResult' calls is very
629  // significant! Re-ordering these variables will cause GVN when run alone to
630  // be less effective! We should fix memdep and basic-aa to not exhibit this
631  // behavior, but until then don't change the order here.
632  auto &AC = AM.getResult<AssumptionAnalysis>(F);
633  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
634  auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
635  auto &AA = AM.getResult<AAManager>(F);
636  auto &MemDep = AM.getResult<MemoryDependenceAnalysis>(F);
637  auto *LI = AM.getCachedResult<LoopAnalysis>(F);
639  bool Changed = runImpl(F, AC, DT, TLI, AA, &MemDep, LI, &ORE);
640  if (!Changed)
641  return PreservedAnalyses::all();
644  PA.preserve<GlobalsAA>();
646  return PA;
647 }
648 
649 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
650 LLVM_DUMP_METHOD void GVN::dump(DenseMap<uint32_t, Value*>& d) const {
651  errs() << "{\n";
653  E = d.end(); I != E; ++I) {
654  errs() << I->first << "\n";
655  I->second->dump();
656  }
657  errs() << "}\n";
658 }
659 #endif
660 
661 /// Return true if we can prove that the value
662 /// we're analyzing is fully available in the specified block. As we go, keep
663 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
664 /// map is actually a tri-state map with the following values:
665 /// 0) we know the block *is not* fully available.
666 /// 1) we know the block *is* fully available.
667 /// 2) we do not know whether the block is fully available or not, but we are
668 /// currently speculating that it will be.
669 /// 3) we are speculating for this block and have used that to speculate for
670 /// other blocks.
672  DenseMap<BasicBlock*, char> &FullyAvailableBlocks,
673  uint32_t RecurseDepth) {
674  if (RecurseDepth > MaxRecurseDepth)
675  return false;
676 
677  // Optimistically assume that the block is fully available and check to see
678  // if we already know about this block in one lookup.
679  std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
680  FullyAvailableBlocks.insert(std::make_pair(BB, 2));
681 
682  // If the entry already existed for this block, return the precomputed value.
683  if (!IV.second) {
684  // If this is a speculative "available" value, mark it as being used for
685  // speculation of other blocks.
686  if (IV.first->second == 2)
687  IV.first->second = 3;
688  return IV.first->second != 0;
689  }
690 
691  // Otherwise, see if it is fully available in all predecessors.
692  pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
693 
694  // If this block has no predecessors, it isn't live-in here.
695  if (PI == PE)
696  goto SpeculationFailure;
697 
698  for (; PI != PE; ++PI)
699  // If the value isn't fully available in one of our predecessors, then it
700  // isn't fully available in this block either. Undo our previous
701  // optimistic assumption and bail out.
702  if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1))
703  goto SpeculationFailure;
704 
705  return true;
706 
707 // If we get here, we found out that this is not, after
708 // all, a fully-available block. We have a problem if we speculated on this and
709 // used the speculation to mark other blocks as available.
710 SpeculationFailure:
711  char &BBVal = FullyAvailableBlocks[BB];
712 
713  // If we didn't speculate on this, just return with it set to false.
714  if (BBVal == 2) {
715  BBVal = 0;
716  return false;
717  }
718 
719  // If we did speculate on this value, we could have blocks set to 1 that are
720  // incorrect. Walk the (transitive) successors of this block and mark them as
721  // 0 if set to one.
722  SmallVector<BasicBlock*, 32> BBWorklist;
723  BBWorklist.push_back(BB);
724 
725  do {
726  BasicBlock *Entry = BBWorklist.pop_back_val();
727  // Note that this sets blocks to 0 (unavailable) if they happen to not
728  // already be in FullyAvailableBlocks. This is safe.
729  char &EntryVal = FullyAvailableBlocks[Entry];
730  if (EntryVal == 0) continue; // Already unavailable.
731 
732  // Mark as unavailable.
733  EntryVal = 0;
734 
735  BBWorklist.append(succ_begin(Entry), succ_end(Entry));
736  } while (!BBWorklist.empty());
737 
738  return false;
739 }
740 
741 /// Given a set of loads specified by ValuesPerBlock,
742 /// construct SSA form, allowing us to eliminate LI. This returns the value
743 /// that should be used at LI's definition site.
746  GVN &gvn) {
747  // Check for the fully redundant, dominating load case. In this case, we can
748  // just use the dominating value directly.
749  if (ValuesPerBlock.size() == 1 &&
750  gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
751  LI->getParent())) {
752  assert(!ValuesPerBlock[0].AV.isUndefValue() &&
753  "Dead BB dominate this block");
754  return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn);
755  }
756 
757  // Otherwise, we have to construct SSA form.
758  SmallVector<PHINode*, 8> NewPHIs;
759  SSAUpdater SSAUpdate(&NewPHIs);
760  SSAUpdate.Initialize(LI->getType(), LI->getName());
761 
762  for (const AvailableValueInBlock &AV : ValuesPerBlock) {
763  BasicBlock *BB = AV.BB;
764 
765  if (SSAUpdate.HasValueForBlock(BB))
766  continue;
767 
768  SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn));
769  }
770 
771  // Perform PHI construction.
772  return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
773 }
774 
776  Instruction *InsertPt,
777  GVN &gvn) const {
778  Value *Res;
779  Type *LoadTy = LI->getType();
780  const DataLayout &DL = LI->getModule()->getDataLayout();
781  if (isSimpleValue()) {
782  Res = getSimpleValue();
783  if (Res->getType() != LoadTy) {
784  Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
785 
786  DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
787  << *getSimpleValue() << '\n'
788  << *Res << '\n' << "\n\n\n");
789  }
790  } else if (isCoercedLoadValue()) {
791  LoadInst *Load = getCoercedLoadValue();
792  if (Load->getType() == LoadTy && Offset == 0) {
793  Res = Load;
794  } else {
795  Res = getLoadValueForLoad(Load, Offset, LoadTy, InsertPt, DL);
796  // We would like to use gvn.markInstructionForDeletion here, but we can't
797  // because the load is already memoized into the leader map table that GVN
798  // tracks. It is potentially possible to remove the load from the table,
799  // but then there all of the operations based on it would need to be
800  // rehashed. Just leave the dead load around.
801  gvn.getMemDep().removeInstruction(Load);
802  DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " "
803  << *getCoercedLoadValue() << '\n'
804  << *Res << '\n'
805  << "\n\n\n");
806  }
807  } else if (isMemIntrinValue()) {
808  Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
809  InsertPt, DL);
810  DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
811  << " " << *getMemIntrinValue() << '\n'
812  << *Res << '\n' << "\n\n\n");
813  } else {
814  assert(isUndefValue() && "Should be UndefVal");
815  DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";);
816  return UndefValue::get(LoadTy);
817  }
818  assert(Res && "failed to materialize?");
819  return Res;
820 }
821 
822 static bool isLifetimeStart(const Instruction *Inst) {
823  if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
824  return II->getIntrinsicID() == Intrinsic::lifetime_start;
825  return false;
826 }
827 
828 /// \brief Try to locate the three instruction involved in a missed
829 /// load-elimination case that is due to an intervening store.
831  DominatorTree *DT,
833  using namespace ore;
834 
835  User *OtherAccess = nullptr;
836 
837  OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", LI);
838  R << "load of type " << NV("Type", LI->getType()) << " not eliminated"
839  << setExtraArgs();
840 
841  for (auto *U : LI->getPointerOperand()->users())
842  if (U != LI && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
843  DT->dominates(cast<Instruction>(U), LI)) {
844  // FIXME: for now give up if there are multiple memory accesses that
845  // dominate the load. We need further analysis to decide which one is
846  // that we're forwarding from.
847  if (OtherAccess)
848  OtherAccess = nullptr;
849  else
850  OtherAccess = U;
851  }
852 
853  if (OtherAccess)
854  R << " in favor of " << NV("OtherAccess", OtherAccess);
855 
856  R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
857 
858  ORE->emit(R);
859 }
860 
861 bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
862  Value *Address, AvailableValue &Res) {
863  assert((DepInfo.isDef() || DepInfo.isClobber()) &&
864  "expected a local dependence");
865  assert(LI->isUnordered() && "rules below are incorrect for ordered access");
866 
867  const DataLayout &DL = LI->getModule()->getDataLayout();
868 
869  if (DepInfo.isClobber()) {
870  // If the dependence is to a store that writes to a superset of the bits
871  // read by the load, we can extract the bits we need for the load from the
872  // stored value.
873  if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
874  // Can't forward from non-atomic to atomic without violating memory model.
875  if (Address && LI->isAtomic() <= DepSI->isAtomic()) {
876  int Offset =
878  if (Offset != -1) {
879  Res = AvailableValue::get(DepSI->getValueOperand(), Offset);
880  return true;
881  }
882  }
883  }
884 
885  // Check to see if we have something like this:
886  // load i32* P
887  // load i8* (P+1)
888  // if we have this, replace the later with an extraction from the former.
889  if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
890  // If this is a clobber and L is the first instruction in its block, then
891  // we have the first instruction in the entry block.
892  // Can't forward from non-atomic to atomic without violating memory model.
893  if (DepLI != LI && Address && LI->isAtomic() <= DepLI->isAtomic()) {
894  int Offset =
895  analyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL);
896 
897  if (Offset != -1) {
898  Res = AvailableValue::getLoad(DepLI, Offset);
899  return true;
900  }
901  }
902  }
903 
904  // If the clobbering value is a memset/memcpy/memmove, see if we can
905  // forward a value on from it.
906  if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
907  if (Address && !LI->isAtomic()) {
909  DepMI, DL);
910  if (Offset != -1) {
911  Res = AvailableValue::getMI(DepMI, Offset);
912  return true;
913  }
914  }
915  }
916  // Nothing known about this clobber, have to be conservative
917  DEBUG(
918  // fast print dep, using operator<< on instruction is too slow.
919  dbgs() << "GVN: load ";
920  LI->printAsOperand(dbgs());
921  Instruction *I = DepInfo.getInst();
922  dbgs() << " is clobbered by " << *I << '\n';
923  );
924  if (ORE->allowExtraAnalysis(DEBUG_TYPE))
925  reportMayClobberedLoad(LI, DepInfo, DT, ORE);
926 
927  return false;
928  }
929  assert(DepInfo.isDef() && "follows from above");
930 
931  Instruction *DepInst = DepInfo.getInst();
932 
933  // Loading the allocation -> undef.
934  if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
935  // Loading immediately after lifetime begin -> undef.
936  isLifetimeStart(DepInst)) {
938  return true;
939  }
940 
941  // Loading from calloc (which zero initializes memory) -> zero
942  if (isCallocLikeFn(DepInst, TLI)) {
944  return true;
945  }
946 
947  if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
948  // Reject loads and stores that are to the same address but are of
949  // different types if we have to. If the stored value is larger or equal to
950  // the loaded value, we can reuse it.
951  if (S->getValueOperand()->getType() != LI->getType() &&
952  !canCoerceMustAliasedValueToLoad(S->getValueOperand(),
953  LI->getType(), DL))
954  return false;
955 
956  // Can't forward from non-atomic to atomic without violating memory model.
957  if (S->isAtomic() < LI->isAtomic())
958  return false;
959 
960  Res = AvailableValue::get(S->getValueOperand());
961  return true;
962  }
963 
964  if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
965  // If the types mismatch and we can't handle it, reject reuse of the load.
966  // If the stored value is larger or equal to the loaded value, we can reuse
967  // it.
968  if (LD->getType() != LI->getType() &&
970  return false;
971 
972  // Can't forward from non-atomic to atomic without violating memory model.
973  if (LD->isAtomic() < LI->isAtomic())
974  return false;
975 
977  return true;
978  }
979 
980  // Unknown def - must be conservative
981  DEBUG(
982  // fast print dep, using operator<< on instruction is too slow.
983  dbgs() << "GVN: load ";
984  LI->printAsOperand(dbgs());
985  dbgs() << " has unknown def " << *DepInst << '\n';
986  );
987  return false;
988 }
989 
990 void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
991  AvailValInBlkVect &ValuesPerBlock,
992  UnavailBlkVect &UnavailableBlocks) {
993  // Filter out useless results (non-locals, etc). Keep track of the blocks
994  // where we have a value available in repl, also keep track of whether we see
995  // dependencies that produce an unknown value for the load (such as a call
996  // that could potentially clobber the load).
997  unsigned NumDeps = Deps.size();
998  for (unsigned i = 0, e = NumDeps; i != e; ++i) {
999  BasicBlock *DepBB = Deps[i].getBB();
1000  MemDepResult DepInfo = Deps[i].getResult();
1001 
1002  if (DeadBlocks.count(DepBB)) {
1003  // Dead dependent mem-op disguise as a load evaluating the same value
1004  // as the load in question.
1005  ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1006  continue;
1007  }
1008 
1009  if (!DepInfo.isDef() && !DepInfo.isClobber()) {
1010  UnavailableBlocks.push_back(DepBB);
1011  continue;
1012  }
1013 
1014  // The address being loaded in this non-local block may not be the same as
1015  // the pointer operand of the load if PHI translation occurs. Make sure
1016  // to consider the right address.
1017  Value *Address = Deps[i].getAddress();
1018 
1019  AvailableValue AV;
1020  if (AnalyzeLoadAvailability(LI, DepInfo, Address, AV)) {
1021  // subtlety: because we know this was a non-local dependency, we know
1022  // it's safe to materialize anywhere between the instruction within
1023  // DepInfo and the end of it's block.
1024  ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1025  std::move(AV)));
1026  } else {
1027  UnavailableBlocks.push_back(DepBB);
1028  }
1029  }
1030 
1031  assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1032  "post condition violation");
1033 }
1034 
1035 bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
1036  UnavailBlkVect &UnavailableBlocks) {
1037  // Okay, we have *some* definitions of the value. This means that the value
1038  // is available in some of our (transitive) predecessors. Lets think about
1039  // doing PRE of this load. This will involve inserting a new load into the
1040  // predecessor when it's not available. We could do this in general, but
1041  // prefer to not increase code size. As such, we only do this when we know
1042  // that we only have to insert *one* load (which means we're basically moving
1043  // the load, not inserting a new one).
1044 
1045  SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1046  UnavailableBlocks.end());
1047 
1048  // Let's find the first basic block with more than one predecessor. Walk
1049  // backwards through predecessors if needed.
1050  BasicBlock *LoadBB = LI->getParent();
1051  BasicBlock *TmpBB = LoadBB;
1052  bool IsSafeToSpeculativelyExecute = isSafeToSpeculativelyExecute(LI);
1053 
1054  // Check that there is no implicit control flow instructions above our load in
1055  // its block. If there is an instruction that doesn't always pass the
1056  // execution to the following instruction, then moving through it may become
1057  // invalid. For example:
1058  //
1059  // int arr[LEN];
1060  // int index = ???;
1061  // ...
1062  // guard(0 <= index && index < LEN);
1063  // use(arr[index]);
1064  //
1065  // It is illegal to move the array access to any point above the guard,
1066  // because if the index is out of bounds we should deoptimize rather than
1067  // access the array.
1068  // Check that there is no guard in this block above our intruction.
1069  if (!IsSafeToSpeculativelyExecute) {
1070  auto It = FirstImplicitControlFlowInsts.find(TmpBB);
1071  if (It != FirstImplicitControlFlowInsts.end()) {
1072  assert(It->second->getParent() == TmpBB &&
1073  "Implicit control flow map broken?");
1074  if (OI->dominates(It->second, LI))
1075  return false;
1076  }
1077  }
1078  while (TmpBB->getSinglePredecessor()) {
1079  TmpBB = TmpBB->getSinglePredecessor();
1080  if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1081  return false;
1082  if (Blockers.count(TmpBB))
1083  return false;
1084 
1085  // If any of these blocks has more than one successor (i.e. if the edge we
1086  // just traversed was critical), then there are other paths through this
1087  // block along which the load may not be anticipated. Hoisting the load
1088  // above this block would be adding the load to execution paths along
1089  // which it was not previously executed.
1090  if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1091  return false;
1092 
1093  // Check that there is no implicit control flow in a block above.
1094  if (!IsSafeToSpeculativelyExecute &&
1095  FirstImplicitControlFlowInsts.count(TmpBB))
1096  return false;
1097  }
1098 
1099  assert(TmpBB);
1100  LoadBB = TmpBB;
1101 
1102  // Check to see how many predecessors have the loaded value fully
1103  // available.
1105  DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1106  for (const AvailableValueInBlock &AV : ValuesPerBlock)
1107  FullyAvailableBlocks[AV.BB] = true;
1108  for (BasicBlock *UnavailableBB : UnavailableBlocks)
1109  FullyAvailableBlocks[UnavailableBB] = false;
1110 
1111  SmallVector<BasicBlock *, 4> CriticalEdgePred;
1112  for (BasicBlock *Pred : predecessors(LoadBB)) {
1113  // If any predecessor block is an EH pad that does not allow non-PHI
1114  // instructions before the terminator, we can't PRE the load.
1115  if (Pred->getTerminator()->isEHPad()) {
1116  DEBUG(dbgs()
1117  << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1118  << Pred->getName() << "': " << *LI << '\n');
1119  return false;
1120  }
1121 
1122  if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) {
1123  continue;
1124  }
1125 
1126  if (Pred->getTerminator()->getNumSuccessors() != 1) {
1127  if (isa<IndirectBrInst>(Pred->getTerminator())) {
1128  DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1129  << Pred->getName() << "': " << *LI << '\n');
1130  return false;
1131  }
1132 
1133  if (LoadBB->isEHPad()) {
1134  DEBUG(dbgs()
1135  << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1136  << Pred->getName() << "': " << *LI << '\n');
1137  return false;
1138  }
1139 
1140  CriticalEdgePred.push_back(Pred);
1141  } else {
1142  // Only add the predecessors that will not be split for now.
1143  PredLoads[Pred] = nullptr;
1144  }
1145  }
1146 
1147  // Decide whether PRE is profitable for this load.
1148  unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
1149  assert(NumUnavailablePreds != 0 &&
1150  "Fully available value should already be eliminated!");
1151 
1152  // If this load is unavailable in multiple predecessors, reject it.
1153  // FIXME: If we could restructure the CFG, we could make a common pred with
1154  // all the preds that don't have an available LI and insert a new load into
1155  // that one block.
1156  if (NumUnavailablePreds != 1)
1157  return false;
1158 
1159  // Split critical edges, and update the unavailable predecessors accordingly.
1160  for (BasicBlock *OrigPred : CriticalEdgePred) {
1161  BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1162  assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1163  PredLoads[NewPred] = nullptr;
1164  DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1165  << LoadBB->getName() << '\n');
1166  }
1167 
1168  // Check if the load can safely be moved to all the unavailable predecessors.
1169  bool CanDoPRE = true;
1170  const DataLayout &DL = LI->getModule()->getDataLayout();
1172  for (auto &PredLoad : PredLoads) {
1173  BasicBlock *UnavailablePred = PredLoad.first;
1174 
1175  // Do PHI translation to get its value in the predecessor if necessary. The
1176  // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1177 
1178  // If all preds have a single successor, then we know it is safe to insert
1179  // the load on the pred (?!?), so we can insert code to materialize the
1180  // pointer if it is not available.
1181  PHITransAddr Address(LI->getPointerOperand(), DL, AC);
1182  Value *LoadPtr = nullptr;
1183  LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1184  *DT, NewInsts);
1185 
1186  // If we couldn't find or insert a computation of this phi translated value,
1187  // we fail PRE.
1188  if (!LoadPtr) {
1189  DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1190  << *LI->getPointerOperand() << "\n");
1191  CanDoPRE = false;
1192  break;
1193  }
1194 
1195  PredLoad.second = LoadPtr;
1196  }
1197 
1198  if (!CanDoPRE) {
1199  while (!NewInsts.empty()) {
1200  Instruction *I = NewInsts.pop_back_val();
1201  markInstructionForDeletion(I);
1202  }
1203  // HINT: Don't revert the edge-splitting as following transformation may
1204  // also need to split these critical edges.
1205  return !CriticalEdgePred.empty();
1206  }
1207 
1208  // Okay, we can eliminate this load by inserting a reload in the predecessor
1209  // and using PHI construction to get the value in the other predecessors, do
1210  // it.
1211  DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1212  DEBUG(if (!NewInsts.empty())
1213  dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1214  << *NewInsts.back() << '\n');
1215 
1216  // Assign value numbers to the new instructions.
1217  for (Instruction *I : NewInsts) {
1218  // Instructions that have been inserted in predecessor(s) to materialize
1219  // the load address do not retain their original debug locations. Doing
1220  // so could lead to confusing (but correct) source attributions.
1221  // FIXME: How do we retain source locations without causing poor debugging
1222  // behavior?
1223  I->setDebugLoc(DebugLoc());
1224 
1225  // FIXME: We really _ought_ to insert these value numbers into their
1226  // parent's availability map. However, in doing so, we risk getting into
1227  // ordering issues. If a block hasn't been processed yet, we would be
1228  // marking a value as AVAIL-IN, which isn't what we intend.
1229  VN.lookupOrAdd(I);
1230  }
1231 
1232  for (const auto &PredLoad : PredLoads) {
1233  BasicBlock *UnavailablePred = PredLoad.first;
1234  Value *LoadPtr = PredLoad.second;
1235 
1236  auto *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre",
1237  LI->isVolatile(), LI->getAlignment(),
1238  LI->getOrdering(), LI->getSyncScopeID(),
1239  UnavailablePred->getTerminator());
1240  NewLoad->setDebugLoc(LI->getDebugLoc());
1241 
1242  // Transfer the old load's AA tags to the new load.
1243  AAMDNodes Tags;
1244  LI->getAAMetadata(Tags);
1245  if (Tags)
1246  NewLoad->setAAMetadata(Tags);
1247 
1248  if (auto *MD = LI->getMetadata(LLVMContext::MD_invariant_load))
1249  NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1250  if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group))
1251  NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1252  if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range))
1253  NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1254 
1255  // We do not propagate the old load's debug location, because the new
1256  // load now lives in a different BB, and we want to avoid a jumpy line
1257  // table.
1258  // FIXME: How do we retain source locations without causing poor debugging
1259  // behavior?
1260 
1261  // Add the newly created load.
1262  ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1263  NewLoad));
1264  MD->invalidateCachedPointerInfo(LoadPtr);
1265  DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1266  }
1267 
1268  // Perform PHI construction.
1269  Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
1270  LI->replaceAllUsesWith(V);
1271  if (isa<PHINode>(V))
1272  V->takeName(LI);
1273  if (Instruction *I = dyn_cast<Instruction>(V))
1274  I->setDebugLoc(LI->getDebugLoc());
1275  if (V->getType()->isPtrOrPtrVectorTy())
1276  MD->invalidateCachedPointerInfo(V);
1277  markInstructionForDeletion(LI);
1278  ORE->emit([&]() {
1279  return OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI)
1280  << "load eliminated by PRE";
1281  });
1282  ++NumPRELoad;
1283  return true;
1284 }
1285 
1288  using namespace ore;
1289 
1290  ORE->emit([&]() {
1291  return OptimizationRemark(DEBUG_TYPE, "LoadElim", LI)
1292  << "load of type " << NV("Type", LI->getType()) << " eliminated"
1293  << setExtraArgs() << " in favor of "
1294  << NV("InfavorOfValue", AvailableValue);
1295  });
1296 }
1297 
1298 /// Attempt to eliminate a load whose dependencies are
1299 /// non-local by performing PHI construction.
1300 bool GVN::processNonLocalLoad(LoadInst *LI) {
1301  // non-local speculations are not allowed under asan.
1302  if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeAddress))
1303  return false;
1304 
1305  // Step 1: Find the non-local dependencies of the load.
1306  LoadDepVect Deps;
1307  MD->getNonLocalPointerDependency(LI, Deps);
1308 
1309  // If we had to process more than one hundred blocks to find the
1310  // dependencies, this load isn't worth worrying about. Optimizing
1311  // it will be too expensive.
1312  unsigned NumDeps = Deps.size();
1313  if (NumDeps > 100)
1314  return false;
1315 
1316  // If we had a phi translation failure, we'll have a single entry which is a
1317  // clobber in the current block. Reject this early.
1318  if (NumDeps == 1 &&
1319  !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1320  DEBUG(
1321  dbgs() << "GVN: non-local load ";
1322  LI->printAsOperand(dbgs());
1323  dbgs() << " has unknown dependencies\n";
1324  );
1325  return false;
1326  }
1327 
1328  // If this load follows a GEP, see if we can PRE the indices before analyzing.
1329  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) {
1330  for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(),
1331  OE = GEP->idx_end();
1332  OI != OE; ++OI)
1333  if (Instruction *I = dyn_cast<Instruction>(OI->get()))
1334  performScalarPRE(I);
1335  }
1336 
1337  // Step 2: Analyze the availability of the load
1338  AvailValInBlkVect ValuesPerBlock;
1339  UnavailBlkVect UnavailableBlocks;
1340  AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks);
1341 
1342  // If we have no predecessors that produce a known value for this load, exit
1343  // early.
1344  if (ValuesPerBlock.empty())
1345  return false;
1346 
1347  // Step 3: Eliminate fully redundancy.
1348  //
1349  // If all of the instructions we depend on produce a known value for this
1350  // load, then it is fully redundant and we can use PHI insertion to compute
1351  // its value. Insert PHIs and remove the fully redundant value now.
1352  if (UnavailableBlocks.empty()) {
1353  DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1354 
1355  // Perform PHI construction.
1356  Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
1357  LI->replaceAllUsesWith(V);
1358 
1359  if (isa<PHINode>(V))
1360  V->takeName(LI);
1361  if (Instruction *I = dyn_cast<Instruction>(V))
1362  // If instruction I has debug info, then we should not update it.
1363  // Also, if I has a null DebugLoc, then it is still potentially incorrect
1364  // to propagate LI's DebugLoc because LI may not post-dominate I.
1365  if (LI->getDebugLoc() && LI->getParent() == I->getParent())
1366  I->setDebugLoc(LI->getDebugLoc());
1367  if (V->getType()->isPtrOrPtrVectorTy())
1368  MD->invalidateCachedPointerInfo(V);
1369  markInstructionForDeletion(LI);
1370  ++NumGVNLoad;
1371  reportLoadElim(LI, V, ORE);
1372  return true;
1373  }
1374 
1375  // Step 4: Eliminate partial redundancy.
1376  if (!EnablePRE || !EnableLoadPRE)
1377  return false;
1378 
1379  return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks);
1380 }
1381 
1382 bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) {
1383  assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume &&
1384  "This function can only be called with llvm.assume intrinsic");
1385  Value *V = IntrinsicI->getArgOperand(0);
1386 
1387  if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1388  if (Cond->isZero()) {
1389  Type *Int8Ty = Type::getInt8Ty(V->getContext());
1390  // Insert a new store to null instruction before the load to indicate that
1391  // this code is not reachable. FIXME: We could insert unreachable
1392  // instruction directly because we can modify the CFG.
1393  new StoreInst(UndefValue::get(Int8Ty),
1395  IntrinsicI);
1396  }
1397  markInstructionForDeletion(IntrinsicI);
1398  return false;
1399  } else if (isa<Constant>(V)) {
1400  // If it's not false, and constant, it must evaluate to true. This means our
1401  // assume is assume(true), and thus, pointless, and we don't want to do
1402  // anything more here.
1403  return false;
1404  }
1405 
1406  Constant *True = ConstantInt::getTrue(V->getContext());
1407  bool Changed = false;
1408 
1409  for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
1410  BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
1411 
1412  // This property is only true in dominated successors, propagateEquality
1413  // will check dominance for us.
1414  Changed |= propagateEquality(V, True, Edge, false);
1415  }
1416 
1417  // We can replace assume value with true, which covers cases like this:
1418  // call void @llvm.assume(i1 %cmp)
1419  // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
1420  ReplaceWithConstMap[V] = True;
1421 
1422  // If one of *cmp *eq operand is const, adding it to map will cover this:
1423  // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
1424  // call void @llvm.assume(i1 %cmp)
1425  // ret float %0 ; will change it to ret float 3.000000e+00
1426  if (auto *CmpI = dyn_cast<CmpInst>(V)) {
1427  if (CmpI->getPredicate() == CmpInst::Predicate::ICMP_EQ ||
1428  CmpI->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1429  (CmpI->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1430  CmpI->getFastMathFlags().noNaNs())) {
1431  Value *CmpLHS = CmpI->getOperand(0);
1432  Value *CmpRHS = CmpI->getOperand(1);
1433  if (isa<Constant>(CmpLHS))
1434  std::swap(CmpLHS, CmpRHS);
1435  auto *RHSConst = dyn_cast<Constant>(CmpRHS);
1436 
1437  // If only one operand is constant.
1438  if (RHSConst != nullptr && !isa<Constant>(CmpLHS))
1439  ReplaceWithConstMap[CmpLHS] = RHSConst;
1440  }
1441  }
1442  return Changed;
1443 }
1444 
1446  auto *ReplInst = dyn_cast<Instruction>(Repl);
1447  if (!ReplInst)
1448  return;
1449 
1450  // Patch the replacement so that it is not more restrictive than the value
1451  // being replaced.
1452  // Note that if 'I' is a load being replaced by some operation,
1453  // for example, by an arithmetic operation, then andIRFlags()
1454  // would just erase all math flags from the original arithmetic
1455  // operation, which is clearly not wanted and not needed.
1456  if (!isa<LoadInst>(I))
1457  ReplInst->andIRFlags(I);
1458 
1459  // FIXME: If both the original and replacement value are part of the
1460  // same control-flow region (meaning that the execution of one
1461  // guarantees the execution of the other), then we can combine the
1462  // noalias scopes here and do better than the general conservative
1463  // answer used in combineMetadata().
1464 
1465  // In general, GVN unifies expressions over different control-flow
1466  // regions, and so we need a conservative combination of the noalias
1467  // scopes.
1468  static const unsigned KnownIDs[] = {
1473  combineMetadata(ReplInst, I, KnownIDs);
1474 }
1475 
1477  patchReplacementInstruction(I, Repl);
1478  I->replaceAllUsesWith(Repl);
1479 }
1480 
1481 /// Attempt to eliminate a load, first by eliminating it
1482 /// locally, and then attempting non-local elimination if that fails.
1483 bool GVN::processLoad(LoadInst *L) {
1484  if (!MD)
1485  return false;
1486 
1487  // This code hasn't been audited for ordered or volatile memory access
1488  if (!L->isUnordered())
1489  return false;
1490 
1491  if (L->use_empty()) {
1492  markInstructionForDeletion(L);
1493  return true;
1494  }
1495 
1496  // ... to a pointer that has been loaded from before...
1497  MemDepResult Dep = MD->getDependency(L);
1498 
1499  // If it is defined in another block, try harder.
1500  if (Dep.isNonLocal())
1501  return processNonLocalLoad(L);
1502 
1503  // Only handle the local case below
1504  if (!Dep.isDef() && !Dep.isClobber()) {
1505  // This might be a NonFuncLocal or an Unknown
1506  DEBUG(
1507  // fast print dep, using operator<< on instruction is too slow.
1508  dbgs() << "GVN: load ";
1509  L->printAsOperand(dbgs());
1510  dbgs() << " has unknown dependence\n";
1511  );
1512  return false;
1513  }
1514 
1515  AvailableValue AV;
1516  if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) {
1517  Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this);
1518 
1519  // Replace the load!
1520  patchAndReplaceAllUsesWith(L, AvailableValue);
1521  markInstructionForDeletion(L);
1522  ++NumGVNLoad;
1523  reportLoadElim(L, AvailableValue, ORE);
1524  // Tell MDA to rexamine the reused pointer since we might have more
1525  // information after forwarding it.
1526  if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
1527  MD->invalidateCachedPointerInfo(AvailableValue);
1528  return true;
1529  }
1530 
1531  return false;
1532 }
1533 
1534 /// Return a pair the first field showing the value number of \p Exp and the
1535 /// second field showing whether it is a value number newly created.
1536 std::pair<uint32_t, bool>
1537 GVN::ValueTable::assignExpNewValueNum(Expression &Exp) {
1538  uint32_t &e = expressionNumbering[Exp];
1539  bool CreateNewValNum = !e;
1540  if (CreateNewValNum) {
1541  Expressions.push_back(Exp);
1542  if (ExprIdx.size() < nextValueNumber + 1)
1543  ExprIdx.resize(nextValueNumber * 2);
1544  e = nextValueNumber;
1545  ExprIdx[nextValueNumber++] = nextExprNumber++;
1546  }
1547  return {e, CreateNewValNum};
1548 }
1549 
1550 /// Return whether all the values related with the same \p num are
1551 /// defined in \p BB.
1552 bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
1553  GVN &Gvn) {
1554  LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1555  while (Vals && Vals->BB == BB)
1556  Vals = Vals->Next;
1557  return !Vals;
1558 }
1559 
1560 /// Wrap phiTranslateImpl to provide caching functionality.
1562  const BasicBlock *PhiBlock, uint32_t Num,
1563  GVN &Gvn) {
1564  auto FindRes = PhiTranslateTable.find({Num, Pred});
1565  if (FindRes != PhiTranslateTable.end())
1566  return FindRes->second;
1567  uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
1568  PhiTranslateTable.insert({{Num, Pred}, NewNum});
1569  return NewNum;
1570 }
1571 
1572 /// Translate value number \p Num using phis, so that it has the values of
1573 /// the phis in BB.
1574 uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
1575  const BasicBlock *PhiBlock,
1576  uint32_t Num, GVN &Gvn) {
1577  if (PHINode *PN = NumberingPhi[Num]) {
1578  for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
1579  if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
1580  if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
1581  return TransVal;
1582  }
1583  return Num;
1584  }
1585 
1586  // If there is any value related with Num is defined in a BB other than
1587  // PhiBlock, it cannot depend on a phi in PhiBlock without going through
1588  // a backedge. We can do an early exit in that case to save compile time.
1589  if (!areAllValsInBB(Num, PhiBlock, Gvn))
1590  return Num;
1591 
1592  if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
1593  return Num;
1594  Expression Exp = Expressions[ExprIdx[Num]];
1595 
1596  for (unsigned i = 0; i < Exp.varargs.size(); i++) {
1597  // For InsertValue and ExtractValue, some varargs are index numbers
1598  // instead of value numbers. Those index numbers should not be
1599  // translated.
1600  if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
1601  (i > 0 && Exp.opcode == Instruction::ExtractValue))
1602  continue;
1603  Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
1604  }
1605 
1606  if (Exp.commutative) {
1607  assert(Exp.varargs.size() == 2 && "Unsupported commutative expression!");
1608  if (Exp.varargs[0] > Exp.varargs[1]) {
1609  std::swap(Exp.varargs[0], Exp.varargs[1]);
1610  uint32_t Opcode = Exp.opcode >> 8;
1611  if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
1612  Exp.opcode = (Opcode << 8) |
1614  static_cast<CmpInst::Predicate>(Exp.opcode & 255));
1615  }
1616  }
1617 
1618  if (uint32_t NewNum = expressionNumbering[Exp])
1619  return NewNum;
1620  return Num;
1621 }
1622 
1623 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed
1624 /// again.
1626  const BasicBlock &CurrBlock) {
1627  for (const BasicBlock *Pred : predecessors(&CurrBlock)) {
1628  auto FindRes = PhiTranslateTable.find({Num, Pred});
1629  if (FindRes != PhiTranslateTable.end())
1630  PhiTranslateTable.erase(FindRes);
1631  }
1632 }
1633 
1634 // In order to find a leader for a given value number at a
1635 // specific basic block, we first obtain the list of all Values for that number,
1636 // and then scan the list to find one whose block dominates the block in
1637 // question. This is fast because dominator tree queries consist of only
1638 // a few comparisons of DFS numbers.
1639 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
1640  LeaderTableEntry Vals = LeaderTable[num];
1641  if (!Vals.Val) return nullptr;
1642 
1643  Value *Val = nullptr;
1644  if (DT->dominates(Vals.BB, BB)) {
1645  Val = Vals.Val;
1646  if (isa<Constant>(Val)) return Val;
1647  }
1648 
1649  LeaderTableEntry* Next = Vals.Next;
1650  while (Next) {
1651  if (DT->dominates(Next->BB, BB)) {
1652  if (isa<Constant>(Next->Val)) return Next->Val;
1653  if (!Val) Val = Next->Val;
1654  }
1655 
1656  Next = Next->Next;
1657  }
1658 
1659  return Val;
1660 }
1661 
1662 /// There is an edge from 'Src' to 'Dst'. Return
1663 /// true if every path from the entry block to 'Dst' passes via this edge. In
1664 /// particular 'Dst' must not be reachable via another edge from 'Src'.
1666  DominatorTree *DT) {
1667  // While in theory it is interesting to consider the case in which Dst has
1668  // more than one predecessor, because Dst might be part of a loop which is
1669  // only reachable from Src, in practice it is pointless since at the time
1670  // GVN runs all such loops have preheaders, which means that Dst will have
1671  // been changed to have only one predecessor, namely Src.
1672  const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
1673  assert((!Pred || Pred == E.getStart()) &&
1674  "No edge between these basic blocks!");
1675  return Pred != nullptr;
1676 }
1677 
1678 void GVN::assignBlockRPONumber(Function &F) {
1679  uint32_t NextBlockNumber = 1;
1681  for (BasicBlock *BB : RPOT)
1682  BlockRPONumber[BB] = NextBlockNumber++;
1683 }
1684 
1685 // Tries to replace instruction with const, using information from
1686 // ReplaceWithConstMap.
1687 bool GVN::replaceOperandsWithConsts(Instruction *Instr) const {
1688  bool Changed = false;
1689  for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
1690  Value *Operand = Instr->getOperand(OpNum);
1691  auto it = ReplaceWithConstMap.find(Operand);
1692  if (it != ReplaceWithConstMap.end()) {
1693  assert(!isa<Constant>(Operand) &&
1694  "Replacing constants with constants is invalid");
1695  DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " << *it->second
1696  << " in instruction " << *Instr << '\n');
1697  Instr->setOperand(OpNum, it->second);
1698  Changed = true;
1699  }
1700  }
1701  return Changed;
1702 }
1703 
1704 /// The given values are known to be equal in every block
1705 /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
1706 /// 'RHS' everywhere in the scope. Returns whether a change was made.
1707 /// If DominatesByEdge is false, then it means that we will propagate the RHS
1708 /// value starting from the end of Root.Start.
1709 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
1710  bool DominatesByEdge) {
1712  Worklist.push_back(std::make_pair(LHS, RHS));
1713  bool Changed = false;
1714  // For speed, compute a conservative fast approximation to
1715  // DT->dominates(Root, Root.getEnd());
1716  const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
1717 
1718  while (!Worklist.empty()) {
1719  std::pair<Value*, Value*> Item = Worklist.pop_back_val();
1720  LHS = Item.first; RHS = Item.second;
1721 
1722  if (LHS == RHS)
1723  continue;
1724  assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
1725 
1726  // Don't try to propagate equalities between constants.
1727  if (isa<Constant>(LHS) && isa<Constant>(RHS))
1728  continue;
1729 
1730  // Prefer a constant on the right-hand side, or an Argument if no constants.
1731  if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
1732  std::swap(LHS, RHS);
1733  assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
1734 
1735  // If there is no obvious reason to prefer the left-hand side over the
1736  // right-hand side, ensure the longest lived term is on the right-hand side,
1737  // so the shortest lived term will be replaced by the longest lived.
1738  // This tends to expose more simplifications.
1739  uint32_t LVN = VN.lookupOrAdd(LHS);
1740  if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
1741  (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
1742  // Move the 'oldest' value to the right-hand side, using the value number
1743  // as a proxy for age.
1744  uint32_t RVN = VN.lookupOrAdd(RHS);
1745  if (LVN < RVN) {
1746  std::swap(LHS, RHS);
1747  LVN = RVN;
1748  }
1749  }
1750 
1751  // If value numbering later sees that an instruction in the scope is equal
1752  // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
1753  // the invariant that instructions only occur in the leader table for their
1754  // own value number (this is used by removeFromLeaderTable), do not do this
1755  // if RHS is an instruction (if an instruction in the scope is morphed into
1756  // LHS then it will be turned into RHS by the next GVN iteration anyway, so
1757  // using the leader table is about compiling faster, not optimizing better).
1758  // The leader table only tracks basic blocks, not edges. Only add to if we
1759  // have the simple case where the edge dominates the end.
1760  if (RootDominatesEnd && !isa<Instruction>(RHS))
1761  addToLeaderTable(LVN, RHS, Root.getEnd());
1762 
1763  // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
1764  // LHS always has at least one use that is not dominated by Root, this will
1765  // never do anything if LHS has only one use.
1766  if (!LHS->hasOneUse()) {
1767  unsigned NumReplacements =
1768  DominatesByEdge
1769  ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
1770  : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
1771 
1772  Changed |= NumReplacements > 0;
1773  NumGVNEqProp += NumReplacements;
1774  }
1775 
1776  // Now try to deduce additional equalities from this one. For example, if
1777  // the known equality was "(A != B)" == "false" then it follows that A and B
1778  // are equal in the scope. Only boolean equalities with an explicit true or
1779  // false RHS are currently supported.
1780  if (!RHS->getType()->isIntegerTy(1))
1781  // Not a boolean equality - bail out.
1782  continue;
1783  ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
1784  if (!CI)
1785  // RHS neither 'true' nor 'false' - bail out.
1786  continue;
1787  // Whether RHS equals 'true'. Otherwise it equals 'false'.
1788  bool isKnownTrue = CI->isMinusOne();
1789  bool isKnownFalse = !isKnownTrue;
1790 
1791  // If "A && B" is known true then both A and B are known true. If "A || B"
1792  // is known false then both A and B are known false.
1793  Value *A, *B;
1794  if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) ||
1795  (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) {
1796  Worklist.push_back(std::make_pair(A, RHS));
1797  Worklist.push_back(std::make_pair(B, RHS));
1798  continue;
1799  }
1800 
1801  // If we are propagating an equality like "(A == B)" == "true" then also
1802  // propagate the equality A == B. When propagating a comparison such as
1803  // "(A >= B)" == "true", replace all instances of "A < B" with "false".
1804  if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
1805  Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
1806 
1807  // If "A == B" is known true, or "A != B" is known false, then replace
1808  // A with B everywhere in the scope.
1809  if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) ||
1810  (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE))
1811  Worklist.push_back(std::make_pair(Op0, Op1));
1812 
1813  // Handle the floating point versions of equality comparisons too.
1814  if ((isKnownTrue && Cmp->getPredicate() == CmpInst::FCMP_OEQ) ||
1815  (isKnownFalse && Cmp->getPredicate() == CmpInst::FCMP_UNE)) {
1816 
1817  // Floating point -0.0 and 0.0 compare equal, so we can only
1818  // propagate values if we know that we have a constant and that
1819  // its value is non-zero.
1820 
1821  // FIXME: We should do this optimization if 'no signed zeros' is
1822  // applicable via an instruction-level fast-math-flag or some other
1823  // indicator that relaxed FP semantics are being used.
1824 
1825  if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->isZero())
1826  Worklist.push_back(std::make_pair(Op0, Op1));
1827  }
1828 
1829  // If "A >= B" is known true, replace "A < B" with false everywhere.
1830  CmpInst::Predicate NotPred = Cmp->getInversePredicate();
1831  Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
1832  // Since we don't have the instruction "A < B" immediately to hand, work
1833  // out the value number that it would have and use that to find an
1834  // appropriate instruction (if any).
1835  uint32_t NextNum = VN.getNextUnusedValueNumber();
1836  uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
1837  // If the number we were assigned was brand new then there is no point in
1838  // looking for an instruction realizing it: there cannot be one!
1839  if (Num < NextNum) {
1840  Value *NotCmp = findLeader(Root.getEnd(), Num);
1841  if (NotCmp && isa<Instruction>(NotCmp)) {
1842  unsigned NumReplacements =
1843  DominatesByEdge
1844  ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
1845  : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
1846  Root.getStart());
1847  Changed |= NumReplacements > 0;
1848  NumGVNEqProp += NumReplacements;
1849  }
1850  }
1851  // Ensure that any instruction in scope that gets the "A < B" value number
1852  // is replaced with false.
1853  // The leader table only tracks basic blocks, not edges. Only add to if we
1854  // have the simple case where the edge dominates the end.
1855  if (RootDominatesEnd)
1856  addToLeaderTable(Num, NotVal, Root.getEnd());
1857 
1858  continue;
1859  }
1860  }
1861 
1862  return Changed;
1863 }
1864 
1865 /// When calculating availability, handle an instruction
1866 /// by inserting it into the appropriate sets
1867 bool GVN::processInstruction(Instruction *I) {
1868  // Ignore dbg info intrinsics.
1869  if (isa<DbgInfoIntrinsic>(I))
1870  return false;
1871 
1872  // If the instruction can be easily simplified then do so now in preference
1873  // to value numbering it. Value numbering often exposes redundancies, for
1874  // example if it determines that %y is equal to %x then the instruction
1875  // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
1876  const DataLayout &DL = I->getModule()->getDataLayout();
1877  if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) {
1878  bool Changed = false;
1879  if (!I->use_empty()) {
1880  I->replaceAllUsesWith(V);
1881  Changed = true;
1882  }
1883  if (isInstructionTriviallyDead(I, TLI)) {
1884  markInstructionForDeletion(I);
1885  Changed = true;
1886  }
1887  if (Changed) {
1888  if (MD && V->getType()->isPtrOrPtrVectorTy())
1889  MD->invalidateCachedPointerInfo(V);
1890  ++NumGVNSimpl;
1891  return true;
1892  }
1893  }
1894 
1895  if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I))
1896  if (IntrinsicI->getIntrinsicID() == Intrinsic::assume)
1897  return processAssumeIntrinsic(IntrinsicI);
1898 
1899  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1900  if (processLoad(LI))
1901  return true;
1902 
1903  unsigned Num = VN.lookupOrAdd(LI);
1904  addToLeaderTable(Num, LI, LI->getParent());
1905  return false;
1906  }
1907 
1908  // For conditional branches, we can perform simple conditional propagation on
1909  // the condition value itself.
1910  if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1911  if (!BI->isConditional())
1912  return false;
1913 
1914  if (isa<Constant>(BI->getCondition()))
1915  return processFoldableCondBr(BI);
1916 
1917  Value *BranchCond = BI->getCondition();
1918  BasicBlock *TrueSucc = BI->getSuccessor(0);
1919  BasicBlock *FalseSucc = BI->getSuccessor(1);
1920  // Avoid multiple edges early.
1921  if (TrueSucc == FalseSucc)
1922  return false;
1923 
1924  BasicBlock *Parent = BI->getParent();
1925  bool Changed = false;
1926 
1927  Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
1928  BasicBlockEdge TrueE(Parent, TrueSucc);
1929  Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
1930 
1931  Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
1932  BasicBlockEdge FalseE(Parent, FalseSucc);
1933  Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
1934 
1935  return Changed;
1936  }
1937 
1938  // For switches, propagate the case values into the case destinations.
1939  if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
1940  Value *SwitchCond = SI->getCondition();
1941  BasicBlock *Parent = SI->getParent();
1942  bool Changed = false;
1943 
1944  // Remember how many outgoing edges there are to every successor.
1946  for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
1947  ++SwitchEdges[SI->getSuccessor(i)];
1948 
1949  for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
1950  i != e; ++i) {
1951  BasicBlock *Dst = i->getCaseSuccessor();
1952  // If there is only a single edge, propagate the case value into it.
1953  if (SwitchEdges.lookup(Dst) == 1) {
1954  BasicBlockEdge E(Parent, Dst);
1955  Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
1956  }
1957  }
1958  return Changed;
1959  }
1960 
1961  // Instructions with void type don't return a value, so there's
1962  // no point in trying to find redundancies in them.
1963  if (I->getType()->isVoidTy())
1964  return false;
1965 
1966  uint32_t NextNum = VN.getNextUnusedValueNumber();
1967  unsigned Num = VN.lookupOrAdd(I);
1968 
1969  // Allocations are always uniquely numbered, so we can save time and memory
1970  // by fast failing them.
1971  if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) {
1972  addToLeaderTable(Num, I, I->getParent());
1973  return false;
1974  }
1975 
1976  // If the number we were assigned was a brand new VN, then we don't
1977  // need to do a lookup to see if the number already exists
1978  // somewhere in the domtree: it can't!
1979  if (Num >= NextNum) {
1980  addToLeaderTable(Num, I, I->getParent());
1981  return false;
1982  }
1983 
1984  // Perform fast-path value-number based elimination of values inherited from
1985  // dominators.
1986  Value *Repl = findLeader(I->getParent(), Num);
1987  if (!Repl) {
1988  // Failure, just remember this instance for future use.
1989  addToLeaderTable(Num, I, I->getParent());
1990  return false;
1991  } else if (Repl == I) {
1992  // If I was the result of a shortcut PRE, it might already be in the table
1993  // and the best replacement for itself. Nothing to do.
1994  return false;
1995  }
1996 
1997  // Remove it!
1998  patchAndReplaceAllUsesWith(I, Repl);
1999  if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2000  MD->invalidateCachedPointerInfo(Repl);
2001  markInstructionForDeletion(I);
2002  return true;
2003 }
2004 
2005 /// runOnFunction - This is the main transformation entry point for a function.
2006 bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2007  const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2008  MemoryDependenceResults *RunMD, LoopInfo *LI,
2009  OptimizationRemarkEmitter *RunORE) {
2010  AC = &RunAC;
2011  DT = &RunDT;
2012  VN.setDomTree(DT);
2013  TLI = &RunTLI;
2014  VN.setAliasAnalysis(&RunAA);
2015  MD = RunMD;
2016  OrderedInstructions OrderedInstrs(DT);
2017  OI = &OrderedInstrs;
2018  VN.setMemDep(MD);
2019  ORE = RunORE;
2020 
2021  bool Changed = false;
2022  bool ShouldContinue = true;
2023 
2024  // Merge unconditional branches, allowing PRE to catch more
2025  // optimization opportunities.
2026  for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2027  BasicBlock *BB = &*FI++;
2028 
2029  bool removedBlock = MergeBlockIntoPredecessor(BB, DT, LI, MD);
2030  if (removedBlock)
2031  ++NumGVNBlocks;
2032 
2033  Changed |= removedBlock;
2034  }
2035 
2036  unsigned Iteration = 0;
2037  while (ShouldContinue) {
2038  DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2039  ShouldContinue = iterateOnFunction(F);
2040  Changed |= ShouldContinue;
2041  ++Iteration;
2042  }
2043 
2044  if (EnablePRE) {
2045  // Fabricate val-num for dead-code in order to suppress assertion in
2046  // performPRE().
2047  assignValNumForDeadCode();
2048  assignBlockRPONumber(F);
2049  bool PREChanged = true;
2050  while (PREChanged) {
2051  PREChanged = performPRE(F);
2052  Changed |= PREChanged;
2053  }
2054  }
2055 
2056  // FIXME: Should perform GVN again after PRE does something. PRE can move
2057  // computations into blocks where they become fully redundant. Note that
2058  // we can't do this until PRE's critical edge splitting updates memdep.
2059  // Actually, when this happens, we should just fully integrate PRE into GVN.
2060 
2061  cleanupGlobalSets();
2062  // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2063  // iteration.
2064  DeadBlocks.clear();
2065 
2066  return Changed;
2067 }
2068 
2069 bool GVN::processBlock(BasicBlock *BB) {
2070  // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2071  // (and incrementing BI before processing an instruction).
2072  assert(InstrsToErase.empty() &&
2073  "We expect InstrsToErase to be empty across iterations");
2074  if (DeadBlocks.count(BB))
2075  return false;
2076 
2077  // Clearing map before every BB because it can be used only for single BB.
2078  ReplaceWithConstMap.clear();
2079  bool ChangedFunction = false;
2080 
2081  for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2082  BI != BE;) {
2083  if (!ReplaceWithConstMap.empty())
2084  ChangedFunction |= replaceOperandsWithConsts(&*BI);
2085  ChangedFunction |= processInstruction(&*BI);
2086 
2087  if (InstrsToErase.empty()) {
2088  ++BI;
2089  continue;
2090  }
2091 
2092  // If we need some instructions deleted, do it now.
2093  NumGVNInstr += InstrsToErase.size();
2094 
2095  // Avoid iterator invalidation.
2096  bool AtStart = BI == BB->begin();
2097  if (!AtStart)
2098  --BI;
2099 
2100  bool InvalidateImplicitCF = false;
2101  const Instruction *MaybeFirstICF = FirstImplicitControlFlowInsts.lookup(BB);
2102  for (auto *I : InstrsToErase) {
2103  assert(I->getParent() == BB && "Removing instruction from wrong block?");
2104  DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2105  if (MD) MD->removeInstruction(I);
2106  DEBUG(verifyRemoved(I));
2107  if (MaybeFirstICF == I) {
2108  // We have erased the first ICF in block. The map needs to be updated.
2109  InvalidateImplicitCF = true;
2110  // Do not keep dangling pointer on the erased instruction.
2111  MaybeFirstICF = nullptr;
2112  }
2113  I->eraseFromParent();
2114  }
2115 
2116  OI->invalidateBlock(BB);
2117  InstrsToErase.clear();
2118  if (InvalidateImplicitCF)
2119  fillImplicitControlFlowInfo(BB);
2120 
2121  if (AtStart)
2122  BI = BB->begin();
2123  else
2124  ++BI;
2125  }
2126 
2127  return ChangedFunction;
2128 }
2129 
2130 // Instantiate an expression in a predecessor that lacked it.
2131 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2132  BasicBlock *Curr, unsigned int ValNo) {
2133  // Because we are going top-down through the block, all value numbers
2134  // will be available in the predecessor by the time we need them. Any
2135  // that weren't originally present will have been instantiated earlier
2136  // in this loop.
2137  bool success = true;
2138  for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2139  Value *Op = Instr->getOperand(i);
2140  if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2141  continue;
2142  // This could be a newly inserted instruction, in which case, we won't
2143  // find a value number, and should give up before we hurt ourselves.
2144  // FIXME: Rewrite the infrastructure to let it easier to value number
2145  // and process newly inserted instructions.
2146  if (!VN.exists(Op)) {
2147  success = false;
2148  break;
2149  }
2150  uint32_t TValNo =
2151  VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2152  if (Value *V = findLeader(Pred, TValNo)) {
2153  Instr->setOperand(i, V);
2154  } else {
2155  success = false;
2156  break;
2157  }
2158  }
2159 
2160  // Fail out if we encounter an operand that is not available in
2161  // the PRE predecessor. This is typically because of loads which
2162  // are not value numbered precisely.
2163  if (!success)
2164  return false;
2165 
2166  Instr->insertBefore(Pred->getTerminator());
2167  Instr->setName(Instr->getName() + ".pre");
2168  Instr->setDebugLoc(Instr->getDebugLoc());
2169 
2170  unsigned Num = VN.lookupOrAdd(Instr);
2171  VN.add(Instr, Num);
2172 
2173  // Update the availability map to include the new instruction.
2174  addToLeaderTable(Num, Instr, Pred);
2175  return true;
2176 }
2177 
2178 bool GVN::performScalarPRE(Instruction *CurInst) {
2179  if (isa<AllocaInst>(CurInst) || isa<TerminatorInst>(CurInst) ||
2180  isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2181  CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2182  isa<DbgInfoIntrinsic>(CurInst))
2183  return false;
2184 
2185  // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2186  // sinking the compare again, and it would force the code generator to
2187  // move the i1 from processor flags or predicate registers into a general
2188  // purpose register.
2189  if (isa<CmpInst>(CurInst))
2190  return false;
2191 
2192  // We don't currently value number ANY inline asm calls.
2193  if (CallInst *CallI = dyn_cast<CallInst>(CurInst))
2194  if (CallI->isInlineAsm())
2195  return false;
2196 
2197  uint32_t ValNo = VN.lookup(CurInst);
2198 
2199  // Look for the predecessors for PRE opportunities. We're
2200  // only trying to solve the basic diamond case, where
2201  // a value is computed in the successor and one predecessor,
2202  // but not the other. We also explicitly disallow cases
2203  // where the successor is its own predecessor, because they're
2204  // more complicated to get right.
2205  unsigned NumWith = 0;
2206  unsigned NumWithout = 0;
2207  BasicBlock *PREPred = nullptr;
2208  BasicBlock *CurrentBlock = CurInst->getParent();
2209 
2211  for (BasicBlock *P : predecessors(CurrentBlock)) {
2212  // We're not interested in PRE where blocks with predecessors that are
2213  // not reachable.
2214  if (!DT->isReachableFromEntry(P)) {
2215  NumWithout = 2;
2216  break;
2217  }
2218  // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and
2219  // when CurInst has operand defined in CurrentBlock (so it may be defined
2220  // by phi in the loop header).
2221  if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] &&
2222  llvm::any_of(CurInst->operands(), [&](const Use &U) {
2223  if (auto *Inst = dyn_cast<Instruction>(U.get()))
2224  return Inst->getParent() == CurrentBlock;
2225  return false;
2226  })) {
2227  NumWithout = 2;
2228  break;
2229  }
2230 
2231  uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2232  Value *predV = findLeader(P, TValNo);
2233  if (!predV) {
2234  predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2235  PREPred = P;
2236  ++NumWithout;
2237  } else if (predV == CurInst) {
2238  /* CurInst dominates this predecessor. */
2239  NumWithout = 2;
2240  break;
2241  } else {
2242  predMap.push_back(std::make_pair(predV, P));
2243  ++NumWith;
2244  }
2245  }
2246 
2247  // Don't do PRE when it might increase code size, i.e. when
2248  // we would need to insert instructions in more than one pred.
2249  if (NumWithout > 1 || NumWith == 0)
2250  return false;
2251 
2252  // We may have a case where all predecessors have the instruction,
2253  // and we just need to insert a phi node. Otherwise, perform
2254  // insertion.
2255  Instruction *PREInstr = nullptr;
2256 
2257  if (NumWithout != 0) {
2258  // Don't do PRE across indirect branch.
2259  if (isa<IndirectBrInst>(PREPred->getTerminator()))
2260  return false;
2261 
2262  // We can't do PRE safely on a critical edge, so instead we schedule
2263  // the edge to be split and perform the PRE the next time we iterate
2264  // on the function.
2265  unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2266  if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2267  toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2268  return false;
2269  }
2270  // We need to insert somewhere, so let's give it a shot
2271  PREInstr = CurInst->clone();
2272  if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2273  // If we failed insertion, make sure we remove the instruction.
2274  DEBUG(verifyRemoved(PREInstr));
2275  PREInstr->deleteValue();
2276  return false;
2277  }
2278  }
2279 
2280  // Either we should have filled in the PRE instruction, or we should
2281  // not have needed insertions.
2282  assert(PREInstr != nullptr || NumWithout == 0);
2283 
2284  ++NumGVNPRE;
2285 
2286  // Create a PHI to make the value available in this block.
2287  PHINode *Phi =
2288  PHINode::Create(CurInst->getType(), predMap.size(),
2289  CurInst->getName() + ".pre-phi", &CurrentBlock->front());
2290  for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
2291  if (Value *V = predMap[i].first) {
2292  // If we use an existing value in this phi, we have to patch the original
2293  // value because the phi will be used to replace a later value.
2294  patchReplacementInstruction(CurInst, V);
2295  Phi->addIncoming(V, predMap[i].second);
2296  } else
2297  Phi->addIncoming(PREInstr, PREPred);
2298  }
2299 
2300  VN.add(Phi, ValNo);
2301  // After creating a new PHI for ValNo, the phi translate result for ValNo will
2302  // be changed, so erase the related stale entries in phi translate cache.
2303  VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
2304  addToLeaderTable(ValNo, Phi, CurrentBlock);
2305  Phi->setDebugLoc(CurInst->getDebugLoc());
2306  CurInst->replaceAllUsesWith(Phi);
2307  if (MD && Phi->getType()->isPtrOrPtrVectorTy())
2308  MD->invalidateCachedPointerInfo(Phi);
2309  VN.erase(CurInst);
2310  removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2311 
2312  DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2313  if (MD)
2314  MD->removeInstruction(CurInst);
2315  DEBUG(verifyRemoved(CurInst));
2316  bool InvalidateImplicitCF =
2317  FirstImplicitControlFlowInsts.lookup(CurInst->getParent()) == CurInst;
2318  // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
2319  // some assertion failures.
2320  OI->invalidateBlock(CurrentBlock);
2321  CurInst->eraseFromParent();
2322  if (InvalidateImplicitCF)
2323  fillImplicitControlFlowInfo(CurrentBlock);
2324  ++NumGVNInstr;
2325 
2326  return true;
2327 }
2328 
2329 /// Perform a purely local form of PRE that looks for diamond
2330 /// control flow patterns and attempts to perform simple PRE at the join point.
2331 bool GVN::performPRE(Function &F) {
2332  bool Changed = false;
2333  for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
2334  // Nothing to PRE in the entry block.
2335  if (CurrentBlock == &F.getEntryBlock())
2336  continue;
2337 
2338  // Don't perform PRE on an EH pad.
2339  if (CurrentBlock->isEHPad())
2340  continue;
2341 
2342  for (BasicBlock::iterator BI = CurrentBlock->begin(),
2343  BE = CurrentBlock->end();
2344  BI != BE;) {
2345  Instruction *CurInst = &*BI++;
2346  Changed |= performScalarPRE(CurInst);
2347  }
2348  }
2349 
2350  if (splitCriticalEdges())
2351  Changed = true;
2352 
2353  return Changed;
2354 }
2355 
2356 /// Split the critical edge connecting the given two blocks, and return
2357 /// the block inserted to the critical edge.
2358 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
2359  BasicBlock *BB =
2361  if (MD)
2362  MD->invalidateCachedPredecessors();
2363  return BB;
2364 }
2365 
2366 /// Split critical edges found during the previous
2367 /// iteration that may enable further optimization.
2368 bool GVN::splitCriticalEdges() {
2369  if (toSplit.empty())
2370  return false;
2371  do {
2372  std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2373  SplitCriticalEdge(Edge.first, Edge.second,
2375  } while (!toSplit.empty());
2376  if (MD) MD->invalidateCachedPredecessors();
2377  return true;
2378 }
2379 
2380 /// Executes one iteration of GVN
2381 bool GVN::iterateOnFunction(Function &F) {
2382  cleanupGlobalSets();
2383 
2384  // Top-down walk of the dominator tree
2385  bool Changed = false;
2386  // Needed for value numbering with phi construction to work.
2387  // RPOT walks the graph in its constructor and will not be invalidated during
2388  // processBlock.
2390 
2391  for (BasicBlock *BB : RPOT)
2392  fillImplicitControlFlowInfo(BB);
2393  for (BasicBlock *BB : RPOT)
2394  Changed |= processBlock(BB);
2395 
2396  return Changed;
2397 }
2398 
2399 void GVN::cleanupGlobalSets() {
2400  VN.clear();
2401  LeaderTable.clear();
2402  BlockRPONumber.clear();
2403  TableAllocator.Reset();
2404  FirstImplicitControlFlowInsts.clear();
2405 }
2406 
2407 void
2408 GVN::fillImplicitControlFlowInfo(BasicBlock *BB) {
2409  // Make sure that all marked instructions are actually deleted by this point,
2410  // so that we don't need to care about omitting them.
2411  assert(InstrsToErase.empty() && "Filling before removed all marked insns?");
2412  auto MayNotTransferExecutionToSuccessor = [&](const Instruction *I) {
2413  // If a block's instruction doesn't always pass the control to its successor
2414  // instruction, mark the block as having implicit control flow. We use them
2415  // to avoid wrong assumptions of sort "if A is executed and B post-dominates
2416  // A, then B is also executed". This is not true is there is an implicit
2417  // control flow instruction (e.g. a guard) between them.
2418  //
2419  // TODO: Currently, isGuaranteedToTransferExecutionToSuccessor returns false
2420  // for volatile stores and loads because they can trap. The discussion on
2421  // whether or not it is correct is still ongoing. We might want to get rid
2422  // of this logic in the future. Anyways, trapping instructions shouldn't
2423  // introduce implicit control flow, so we explicitly allow them here. This
2424  // must be removed once isGuaranteedToTransferExecutionToSuccessor is fixed.
2426  return false;
2427  if (isa<LoadInst>(I)) {
2428  assert(cast<LoadInst>(I)->isVolatile() &&
2429  "Non-volatile load should transfer execution to successor!");
2430  return false;
2431  }
2432  if (isa<StoreInst>(I)) {
2433  assert(cast<StoreInst>(I)->isVolatile() &&
2434  "Non-volatile store should transfer execution to successor!");
2435  return false;
2436  }
2437  return true;
2438  };
2439  FirstImplicitControlFlowInsts.erase(BB);
2440 
2441  for (auto &I : *BB)
2442  if (MayNotTransferExecutionToSuccessor(&I)) {
2443  FirstImplicitControlFlowInsts[BB] = &I;
2444  break;
2445  }
2446 }
2447 
2448 /// Verify that the specified instruction does not occur in our
2449 /// internal data structures.
2450 void GVN::verifyRemoved(const Instruction *Inst) const {
2451  VN.verifyRemoved(Inst);
2452 
2453  // Walk through the value number scope to make sure the instruction isn't
2454  // ferreted away in it.
2456  I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) {
2457  const LeaderTableEntry *Node = &I->second;
2458  assert(Node->Val != Inst && "Inst still in value numbering scope!");
2459 
2460  while (Node->Next) {
2461  Node = Node->Next;
2462  assert(Node->Val != Inst && "Inst still in value numbering scope!");
2463  }
2464  }
2465 }
2466 
2467 /// BB is declared dead, which implied other blocks become dead as well. This
2468 /// function is to add all these blocks to "DeadBlocks". For the dead blocks'
2469 /// live successors, update their phi nodes by replacing the operands
2470 /// corresponding to dead blocks with UndefVal.
2471 void GVN::addDeadBlock(BasicBlock *BB) {
2474 
2475  NewDead.push_back(BB);
2476  while (!NewDead.empty()) {
2477  BasicBlock *D = NewDead.pop_back_val();
2478  if (DeadBlocks.count(D))
2479  continue;
2480 
2481  // All blocks dominated by D are dead.
2483  DT->getDescendants(D, Dom);
2484  DeadBlocks.insert(Dom.begin(), Dom.end());
2485 
2486  // Figure out the dominance-frontier(D).
2487  for (BasicBlock *B : Dom) {
2488  for (BasicBlock *S : successors(B)) {
2489  if (DeadBlocks.count(S))
2490  continue;
2491 
2492  bool AllPredDead = true;
2493  for (BasicBlock *P : predecessors(S))
2494  if (!DeadBlocks.count(P)) {
2495  AllPredDead = false;
2496  break;
2497  }
2498 
2499  if (!AllPredDead) {
2500  // S could be proved dead later on. That is why we don't update phi
2501  // operands at this moment.
2502  DF.insert(S);
2503  } else {
2504  // While S is not dominated by D, it is dead by now. This could take
2505  // place if S already have a dead predecessor before D is declared
2506  // dead.
2507  NewDead.push_back(S);
2508  }
2509  }
2510  }
2511  }
2512 
2513  // For the dead blocks' live successors, update their phi nodes by replacing
2514  // the operands corresponding to dead blocks with UndefVal.
2516  I != E; I++) {
2517  BasicBlock *B = *I;
2518  if (DeadBlocks.count(B))
2519  continue;
2520 
2522  for (BasicBlock *P : Preds) {
2523  if (!DeadBlocks.count(P))
2524  continue;
2525 
2526  if (isCriticalEdge(P->getTerminator(), GetSuccessorNumber(P, B))) {
2527  if (BasicBlock *S = splitCriticalEdges(P, B))
2528  DeadBlocks.insert(P = S);
2529  }
2530 
2531  for (BasicBlock::iterator II = B->begin(); isa<PHINode>(II); ++II) {
2532  PHINode &Phi = cast<PHINode>(*II);
2534  UndefValue::get(Phi.getType()));
2535  }
2536  }
2537  }
2538 }
2539 
2540 // If the given branch is recognized as a foldable branch (i.e. conditional
2541 // branch with constant condition), it will perform following analyses and
2542 // transformation.
2543 // 1) If the dead out-coming edge is a critical-edge, split it. Let
2544 // R be the target of the dead out-coming edge.
2545 // 1) Identify the set of dead blocks implied by the branch's dead outcoming
2546 // edge. The result of this step will be {X| X is dominated by R}
2547 // 2) Identify those blocks which haves at least one dead predecessor. The
2548 // result of this step will be dominance-frontier(R).
2549 // 3) Update the PHIs in DF(R) by replacing the operands corresponding to
2550 // dead blocks with "UndefVal" in an hope these PHIs will optimized away.
2551 //
2552 // Return true iff *NEW* dead code are found.
2553 bool GVN::processFoldableCondBr(BranchInst *BI) {
2554  if (!BI || BI->isUnconditional())
2555  return false;
2556 
2557  // If a branch has two identical successors, we cannot declare either dead.
2558  if (BI->getSuccessor(0) == BI->getSuccessor(1))
2559  return false;
2560 
2561  ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
2562  if (!Cond)
2563  return false;
2564 
2565  BasicBlock *DeadRoot =
2566  Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
2567  if (DeadBlocks.count(DeadRoot))
2568  return false;
2569 
2570  if (!DeadRoot->getSinglePredecessor())
2571  DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
2572 
2573  addDeadBlock(DeadRoot);
2574  return true;
2575 }
2576 
2577 // performPRE() will trigger assert if it comes across an instruction without
2578 // associated val-num. As it normally has far more live instructions than dead
2579 // instructions, it makes more sense just to "fabricate" a val-number for the
2580 // dead code than checking if instruction involved is dead or not.
2581 void GVN::assignValNumForDeadCode() {
2582  for (BasicBlock *BB : DeadBlocks) {
2583  for (Instruction &Inst : *BB) {
2584  unsigned ValNum = VN.lookupOrAdd(&Inst);
2585  addToLeaderTable(ValNum, &Inst, BB);
2586  }
2587  }
2588 }
2589 
2591 public:
2592  static char ID; // Pass identification, replacement for typeid
2593 
2594  explicit GVNLegacyPass(bool NoLoads = false)
2595  : FunctionPass(ID), NoLoads(NoLoads) {
2597  }
2598 
2599  bool runOnFunction(Function &F) override {
2600  if (skipFunction(F))
2601  return false;
2602 
2603  auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2604 
2605  return Impl.runImpl(
2606  F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
2607  getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
2608  getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
2609  getAnalysis<AAResultsWrapperPass>().getAAResults(),
2610  NoLoads ? nullptr
2611  : &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(),
2612  LIWP ? &LIWP->getLoopInfo() : nullptr,
2613  &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE());
2614  }
2615 
2616  void getAnalysisUsage(AnalysisUsage &AU) const override {
2620  if (!NoLoads)
2623 
2628  }
2629 
2630 private:
2631  bool NoLoads;
2632  GVN Impl;
2633 };
2634 
2635 char GVNLegacyPass::ID = 0;
2636 
2637 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
2645 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
2646 
2647 // The public interface to this file...
2649  return new GVNLegacyPass(NoLoads);
2650 }
Legacy wrapper pass to provide the GlobalsAAResult object.
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
Definition: GVN.cpp:237
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition: PatternMatch.h:574
uint64_t CallInst * C
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:69
FunctionPass * createGVNPass(bool NoLoads=false)
Create a legacy GVN pass.
Definition: GVN.cpp:2648
static cl::opt< bool > EnableLoadPRE("enable-load-pre", cl::init(true))
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock)
Erase stale entry from phiTranslate cache so phiTranslate can be computed again.
Definition: GVN.cpp:1625
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
bool isUndefValue() const
Definition: GVN.cpp:205
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:523
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:72
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:843
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:39
Diagnostic information for missed-optimization remarks.
Provides a lazy, caching interface for making common memory aliasing information queries, backed by LLVM&#39;s alias analysis passes.
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
Definition: VNCoercion.cpp:219
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
Definition: GVN.cpp:178
size_type size() const
Definition: MapVector.h:57
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
Definition: GVN.cpp:168
DiagnosticInfoOptimizationBase::Argument NV
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1067
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:687
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
PointerTy getPointer() const
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:449
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
This is the interface for a simple mod/ref and alias analysis over globals.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type &#39;Ty&#39;.
Definition: SSAUpdater.cpp:54
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Returns the value number of the given comparison, assigning it a new number if it did not have one be...
Definition: GVN.cpp:586
iterator end()
Definition: Function.h:590
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value...
Definition: SSAUpdater.cpp:67
bool operator==(const Expression &other) const
Definition: GVN.cpp:113
This class represents a function call, abstracting a target machine&#39;s calling convention.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block...
This file contains the declarations for metadata subclasses.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of .assume calls within a function.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:233
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock, uint32_t Num, GVN &Gvn)
Wrap phiTranslateImpl to provide caching functionality.
Definition: GVN.cpp:1561
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:869
void deleteValue()
Delete a pointer to a generic Value.
Definition: Value.cpp:95
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:262
unsigned second
This class implements a map that also provides access to all stored values in a deterministic order...
Definition: MapVector.h:38
BasicBlock * getSuccessor(unsigned i) const
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
STATISTIC(NumFunctions, "Total number of functions")
A debug info location.
Definition: DebugLoc.h:34
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:238
F(f)
bool isCoercedLoadValue() const
Definition: GVN.cpp:203
An instruction for reading from memory.
Definition: Instructions.h:164
const BasicBlock * getEnd() const
Definition: Dominators.h:90
Hexagon Common GEP
Value * getCondition() const
This defines the Use class.
idx_iterator idx_end() const
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of &#39;From&#39; with &#39;To&#39; if that use is dominated by the given edge.
Definition: Local.cpp:1877
Use * op_iterator
Definition: User.h:209
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:93
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
Definition: VNCoercion.cpp:475
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:33
op_iterator op_begin()
Definition: User.h:214
gvn Early GVN Hoisting of Expressions
Definition: GVNHoist.cpp:1204
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:207
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:252
uint32_t lookup(Value *V, bool Verify=true) const
Returns the value number of the specified value.
Definition: GVN.cpp:573
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
void dump() const
Support for debugging, callable in GDB: V->dump()
Definition: AsmWriter.cpp:3641
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:217
static cl::opt< uint32_t > MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, cl::desc("Max recurse depth (default = 1000)"))
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: GVN.cpp:2616
static cl::opt< bool > EnablePRE("enable-pre", cl::init(true), cl::Hidden)
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Definition: GVN.cpp:2599
Option class for critical edge splitting.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
void clear()
Remove all entries from the ValueTable.
Definition: GVN.cpp:594
unsigned getNumArgOperands() const
Return the number of call arguments.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:639
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
Definition: VNCoercion.cpp:251
static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
MemoryDependenceResults & getMemDep() const
Definition: GVN.h:84
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
This file contains the simple types necessary to represent the attributes associated with functions a...
An analysis that produces MemoryDependenceResults for a function.
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:286
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:933
static const uint16_t * lookup(unsigned opcode, unsigned domain, ArrayRef< uint16_t[3]> Table)
bool isSimpleValue() const
Definition: GVN.cpp:202
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:103
Instruction * clone() const
Create a copy of &#39;this&#39; instruction that is identical in all ways except the following: ...
static void patchReplacementInstruction(Instruction *I, Value *Repl)
Definition: GVN.cpp:1445
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
ppc ctr loops PowerPC CTR Loops Verify
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:142
The core GVN pass object.
Definition: GVN.h:68
IntType getInt() const
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
Definition: VNCoercion.cpp:15
Expression(uint32_t o=~2U)
Definition: GVN.cpp:111
void andIRFlags(const Value *V)
Logical &#39;and&#39; of any supported wrapping, exact, and fast-math flags of V and this instruction...
#define DEBUG_TYPE
Definition: GVN.cpp:86
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:83
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:194
DiagnosticInfoOptimizationBase::setExtraArgs setExtraArgs
static AvailableValue getLoad(LoadInst *LI, unsigned Offset=0)
Definition: GVN.cpp:186
hash_code hash_value(const APFloat &Arg)
See friend declarations above.
Definition: APFloat.cpp:4428
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
LoadInst * getCoercedLoadValue() const
Definition: GVN.cpp:212
static GVN::Expression getEmptyKey()
Definition: GVN.cpp:135
An instruction for storing to memory.
Definition: Instructions.h:306
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:209
void add(Value *V, uint32_t num)
add - Insert a value into the table with a specified value number.
Definition: GVN.cpp:382
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:430
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:292
iterator begin()
Definition: Function.h:588
static unsigned getHashValue(const GVN::Expression &e)
Definition: GVN.cpp:138
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:140
Value * getOperand(unsigned i) const
Definition: User.h:154
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:106
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Definition: VNCoercion.cpp:202
void initializeGVNLegacyPassPass(PassRegistry &)
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
const BasicBlock & getEntryBlock() const
Definition: Function.h:572
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:837
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
Fills the AAMDNodes structure with AA metadata from this instruction.
BasicBlock * SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions())
If this edge is a critical edge, insert a new node to split the critical edge.
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block...
Definition: SSAUpdater.cpp:95
SmallVector< uint32_t, 4 > varargs
Definition: GVN.cpp:109
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:281
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:217
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction...
Definition: Instruction.cpp:75
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
PointerIntPair - This class implements a pair of a pointer and small integer.
PHITransAddr - An address value which tracks and handles phi translation.
Definition: PHITransAddr.h:36
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
Definition: PatternMatch.h:580
Conditional or Unconditional Branch instruction.
This file provides the interface for LLVM&#39;s Global Value Numbering pass which eliminates fully redund...
static GVN::Expression getTombstoneKey()
Definition: GVN.cpp:136
static Value * ConstructSSAForLoadSet(LoadInst *LI, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVN &gvn)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate LI...
Definition: GVN.cpp:744
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS)
Definition: GVN.cpp:144
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:264
A manager for alias analyses.
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
Definition: Instruction.h:535
Diagnostic information for applied optimization remarks.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
unsigned getNumIndices() const
bool isUnordered() const
Definition: Instructions.h:264
Represent the analysis usage information of a pass.
op_iterator op_end()
Definition: User.h:216
bool any_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:820
Analysis pass providing a never-invalidated alias analysis result.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:853
PointerIntPair< Value *, 2, ValType > Val
V - The value that is live out of the block.
Definition: GVN.cpp:165
MemIntrinsic * getMemIntrinValue() const
Definition: GVN.cpp:217
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
op_range operands()
Definition: User.h:222
Value * getPointerOperand()
Definition: Instructions.h:270
Value * getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingLoad returned an offset, this function can be used to actually perform th...
Definition: VNCoercion.cpp:363
GVNLegacyPass(bool NoLoads=false)
Definition: GVN.cpp:2594
static void reportLoadElim(LoadInst *LI, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
Definition: GVN.cpp:1286
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1320
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:159
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance...
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:3573
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
A memory dependence query can return one of three different answers.
DominatorTree & getDominatorTree() const
Definition: GVN.h:82
unsigned first
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo, DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
Definition: GVN.cpp:830
A function analysis which provides an AssumptionCache.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:224
Value * MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const
Emit code at the end of this block to adjust the value defined here to the specified type...
Definition: GVN.cpp:255
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:298
This is the common base class for memset/memcpy/memmove.
Iterator for intrusive lists based on ilist_node.
unsigned getNumOperands() const
Definition: User.h:176
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file. ...
iterator end()
Definition: BasicBlock.h:254
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:239
Module.h This file contains the declarations for the Module class.
Provides information about what library functions are available for the current target.
const MemDepResult & getResult() const
size_type count(const KeyT &Key) const
Definition: MapVector.h:139
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:48
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:642
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:385
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:560
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:110
Value * MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt, GVN &gvn) const
Emit code at the specified insertion point to adjust the value defined here to the specified type...
Definition: GVN.cpp:775
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:516
bool isCommutative() const
Return true if the instruction is commutative:
Definition: Instruction.h:451
void setOperand(unsigned i, Value *Val)
Definition: User.h:159
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:57
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:602
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock...
Definition: GVN.cpp:230
iterator_range< user_iterator > users()
Definition: Value.h:401
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:480
std::vector< NonLocalDepEntry > NonLocalDepInfo
An opaque object representing a hash code.
Definition: Hashing.h:72
bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates uninitialized memory (such ...
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:482
void verifyRemoved(const Value *) const
verifyRemoved - Verify that the value is removed from all internal data structures.
Definition: GVN.cpp:616
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:398
void erase(Value *v)
Remove a value from the value numbering.
Definition: GVN.cpp:606
static bool isLifetimeStart(const Instruction *Inst)
Definition: GVN.cpp:822
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition: Lint.cpp:538
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:120
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
Definition: CFG.cpp:72
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:284
bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemoryDependenceResults *MemDep=nullptr)
Attempts to merge a block into its predecessor, if possible.
unsigned getAlignment() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:226
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
void clear()
Definition: ilist.h:322
Value * getStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingStore returned an offset, this function can be used to actually perform t...
Definition: VNCoercion.cpp:343
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:220
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:108
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates zero-filled memory (such as...
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:245
#define I(x, y, z)
Definition: MD5.cpp:58
bool mayReadFromMemory() const
Return true if this instruction may read memory.
static AvailableValue get(Value *V, unsigned Offset=0)
Definition: GVN.cpp:170
uint32_t opcode
Definition: GVN.cpp:106
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:706
bool exists(Value *V) const
Returns true if a value number exists for the specified value.
Definition: GVN.cpp:495
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
idx_iterator idx_begin() const
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:174
bool isUnconditional() const
friend hash_code hash_value(const Expression &Value)
Definition: GVN.cpp:125
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
Definition: GVN.cpp:499
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:181
Value * getSimpleValue() const
Definition: GVN.cpp:207
Analysis pass providing the TargetLibraryInfo.
iterator_range< df_iterator< T > > depth_first(const T &G)
Multiway switch.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
const BasicBlock * getStart() const
Definition: Dominators.h:86
Represents a particular available value that we know how to materialize.
Definition: GVN.cpp:155
bool isSafeToSpeculativelyExecute(const Value *V, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, char > &FullyAvailableBlocks, uint32_t RecurseDepth)
Return true if we can prove that the value we&#39;re analyzing is fully available in the specified block...
Definition: GVN.cpp:671
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:856
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction has no side ef...
Definition: Local.cpp:324
LLVM Value Representation.
Definition: Value.h:73
succ_range successors(BasicBlock *BB)
Definition: CFG.h:143
static AvailableValueInBlock getUndef(BasicBlock *BB)
Definition: GVN.cpp:249
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
OptimizationRemarkEmitter legacy analysis pass.
#define DEBUG(X)
Definition: Debug.h:118
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
Definition: GVN.cpp:627
IRTranslator LLVM IR MI
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:414
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:967
This is an entry in the NonLocalDepInfo cache.
A container for analyses that lazily runs them and caches their results.
BasicBlock * BB
BB - The basic block in question.
Definition: GVN.cpp:232
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
Definition: GVN.cpp:1476
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:267
bool isMemIntrinValue() const
Definition: GVN.cpp:204
static bool isVolatile(Instruction *Inst)
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
const TerminatorInst * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
This header defines various interfaces for pass management in LLVM.
void setIncomingValue(unsigned i, Value *V)
AvailableValue AV
AV - The actual available value.
Definition: GVN.cpp:235
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:174
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:1764
The optimization diagnostic interface.
bool use_empty() const
Definition: Value.h:328
static AvailableValue getUndef()
Definition: GVN.cpp:194
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from &#39;Src&#39; to &#39;Dst&#39;.
Definition: GVN.cpp:1665
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
const BasicBlock * getParent() const
Definition: Instruction.h:66
This instruction inserts a struct field of array element value into an aggregate value.
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
Definition: SSAUpdater.cpp:63
bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
Definition: CFG.cpp:88