LLVM 17.0.0git
GVN.cpp
Go to the documentation of this file.
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs global value numbering to eliminate fully redundant
10// instructions. It also performs simple dead load elimination.
11//
12// Note that this pass does the value numbering itself; it does not use the
13// ValueNumbering analysis passes.
14//
15//===----------------------------------------------------------------------===//
16
18#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/Hashing.h"
21#include "llvm/ADT/MapVector.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SetVector.h"
27#include "llvm/ADT/Statistic.h"
31#include "llvm/Analysis/CFG.h"
45#include "llvm/IR/Attributes.h"
46#include "llvm/IR/BasicBlock.h"
47#include "llvm/IR/Constant.h"
48#include "llvm/IR/Constants.h"
49#include "llvm/IR/DebugLoc.h"
50#include "llvm/IR/Dominators.h"
51#include "llvm/IR/Function.h"
52#include "llvm/IR/InstrTypes.h"
53#include "llvm/IR/Instruction.h"
56#include "llvm/IR/LLVMContext.h"
57#include "llvm/IR/Metadata.h"
58#include "llvm/IR/Module.h"
59#include "llvm/IR/PassManager.h"
61#include "llvm/IR/Type.h"
62#include "llvm/IR/Use.h"
63#include "llvm/IR/Value.h"
65#include "llvm/Pass.h"
69#include "llvm/Support/Debug.h"
76#include <algorithm>
77#include <cassert>
78#include <cstdint>
79#include <optional>
80#include <utility>
81
82using namespace llvm;
83using namespace llvm::gvn;
84using namespace llvm::VNCoercion;
85using namespace PatternMatch;
86
87#define DEBUG_TYPE "gvn"
88
89STATISTIC(NumGVNInstr, "Number of instructions deleted");
90STATISTIC(NumGVNLoad, "Number of loads deleted");
91STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
92STATISTIC(NumGVNBlocks, "Number of blocks merged");
93STATISTIC(NumGVNSimpl, "Number of instructions simplified");
94STATISTIC(NumGVNEqProp, "Number of equalities propagated");
95STATISTIC(NumPRELoad, "Number of loads PRE'd");
96STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd");
97
98STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
99 "Number of blocks speculated as available in "
100 "IsValueFullyAvailableInBlock(), max");
101STATISTIC(MaxBBSpeculationCutoffReachedTimes,
102 "Number of times we we reached gvn-max-block-speculations cut-off "
103 "preventing further exploration");
104
105static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
106static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
107static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
108 cl::init(true));
109static cl::opt<bool>
110GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
111 cl::init(false));
112static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
113
115 "gvn-max-num-deps", cl::Hidden, cl::init(100),
116 cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
117
118// This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
120 "gvn-max-block-speculations", cl::Hidden, cl::init(600),
121 cl::desc("Max number of blocks we're willing to speculate on (and recurse "
122 "into) when deducing if a value is fully available or not in GVN "
123 "(default = 600)"));
124
126 "gvn-max-num-visited-insts", cl::Hidden, cl::init(100),
127 cl::desc("Max number of visited instructions when trying to find "
128 "dominating value of select dependency (default = 100)"));
129
132 bool commutative = false;
133 // The type is not necessarily the result type of the expression, it may be
134 // any additional type needed to disambiguate the expression.
135 Type *type = nullptr;
137
138 Expression(uint32_t o = ~2U) : opcode(o) {}
139
140 bool operator==(const Expression &other) const {
141 if (opcode != other.opcode)
142 return false;
143 if (opcode == ~0U || opcode == ~1U)
144 return true;
145 if (type != other.type)
146 return false;
147 if (varargs != other.varargs)
148 return false;
149 return true;
150 }
151
153 return hash_combine(
154 Value.opcode, Value.type,
155 hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
156 }
157};
158
159namespace llvm {
160
161template <> struct DenseMapInfo<GVNPass::Expression> {
162 static inline GVNPass::Expression getEmptyKey() { return ~0U; }
163 static inline GVNPass::Expression getTombstoneKey() { return ~1U; }
164
165 static unsigned getHashValue(const GVNPass::Expression &e) {
166 using llvm::hash_value;
167
168 return static_cast<unsigned>(hash_value(e));
169 }
170
171 static bool isEqual(const GVNPass::Expression &LHS,
172 const GVNPass::Expression &RHS) {
173 return LHS == RHS;
174 }
175};
176
177} // end namespace llvm
178
179/// Represents a particular available value that we know how to materialize.
180/// Materialization of an AvailableValue never fails. An AvailableValue is
181/// implicitly associated with a rematerialization point which is the
182/// location of the instruction from which it was formed.
184 enum class ValType {
185 SimpleVal, // A simple offsetted value that is accessed.
186 LoadVal, // A value produced by a load.
187 MemIntrin, // A memory intrinsic which is loaded from.
188 UndefVal, // A UndefValue representing a value from dead block (which
189 // is not yet physically removed from the CFG).
190 SelectVal, // A pointer select which is loaded from and for which the load
191 // can be replace by a value select.
192 };
193
194 /// Val - The value that is live out of the block.
196 /// Kind of the live-out value.
198
199 /// Offset - The byte offset in Val that is interesting for the load query.
200 unsigned Offset = 0;
201 /// V1, V2 - The dominating non-clobbered values of SelectVal.
202 Value *V1 = nullptr, *V2 = nullptr;
203
204 static AvailableValue get(Value *V, unsigned Offset = 0) {
205 AvailableValue Res;
206 Res.Val = V;
208 Res.Offset = Offset;
209 return Res;
210 }
211
212 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
213 AvailableValue Res;
214 Res.Val = MI;
216 Res.Offset = Offset;
217 return Res;
218 }
219
220 static AvailableValue getLoad(LoadInst *Load, unsigned Offset = 0) {
221 AvailableValue Res;
222 Res.Val = Load;
224 Res.Offset = Offset;
225 return Res;
226 }
227
229 AvailableValue Res;
230 Res.Val = nullptr;
232 Res.Offset = 0;
233 return Res;
234 }
235
237 AvailableValue Res;
238 Res.Val = Sel;
240 Res.Offset = 0;
241 Res.V1 = V1;
242 Res.V2 = V2;
243 return Res;
244 }
245
246 bool isSimpleValue() const { return Kind == ValType::SimpleVal; }
247 bool isCoercedLoadValue() const { return Kind == ValType::LoadVal; }
248 bool isMemIntrinValue() const { return Kind == ValType::MemIntrin; }
249 bool isUndefValue() const { return Kind == ValType::UndefVal; }
250 bool isSelectValue() const { return Kind == ValType::SelectVal; }
251
253 assert(isSimpleValue() && "Wrong accessor");
254 return Val;
255 }
256
258 assert(isCoercedLoadValue() && "Wrong accessor");
259 return cast<LoadInst>(Val);
260 }
261
263 assert(isMemIntrinValue() && "Wrong accessor");
264 return cast<MemIntrinsic>(Val);
265 }
266
268 assert(isSelectValue() && "Wrong accessor");
269 return cast<SelectInst>(Val);
270 }
271
272 /// Emit code at the specified insertion point to adjust the value defined
273 /// here to the specified type. This handles various coercion cases.
275 GVNPass &gvn) const;
276};
277
278/// Represents an AvailableValue which can be rematerialized at the end of
279/// the associated BasicBlock.
281 /// BB - The basic block in question.
282 BasicBlock *BB = nullptr;
283
284 /// AV - The actual available value
286
289 Res.BB = BB;
290 Res.AV = std::move(AV);
291 return Res;
292 }
293
295 unsigned Offset = 0) {
296 return get(BB, AvailableValue::get(V, Offset));
297 }
298
301 }
302
304 Value *V1, Value *V2) {
305 return get(BB, AvailableValue::getSelect(Sel, V1, V2));
306 }
307
308 /// Emit code at the end of this block to adjust the value defined here to
309 /// the specified type. This handles various coercion cases.
311 return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn);
312 }
313};
314
315//===----------------------------------------------------------------------===//
316// ValueTable Internal Functions
317//===----------------------------------------------------------------------===//
318
319GVNPass::Expression GVNPass::ValueTable::createExpr(Instruction *I) {
320 Expression e;
321 e.type = I->getType();
322 e.opcode = I->getOpcode();
323 if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
324 // gc.relocate is 'special' call: its second and third operands are
325 // not real values, but indices into statepoint's argument list.
326 // Use the refered to values for purposes of identity.
327 e.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
328 e.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
329 e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
330 } else {
331 for (Use &Op : I->operands())
332 e.varargs.push_back(lookupOrAdd(Op));
333 }
334 if (I->isCommutative()) {
335 // Ensure that commutative instructions that only differ by a permutation
336 // of their operands get the same value number by sorting the operand value
337 // numbers. Since commutative operands are the 1st two operands it is more
338 // efficient to sort by hand rather than using, say, std::sort.
339 assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
340 if (e.varargs[0] > e.varargs[1])
341 std::swap(e.varargs[0], e.varargs[1]);
342 e.commutative = true;
343 }
344
345 if (auto *C = dyn_cast<CmpInst>(I)) {
346 // Sort the operand value numbers so x<y and y>x get the same value number.
347 CmpInst::Predicate Predicate = C->getPredicate();
348 if (e.varargs[0] > e.varargs[1]) {
349 std::swap(e.varargs[0], e.varargs[1]);
351 }
352 e.opcode = (C->getOpcode() << 8) | Predicate;
353 e.commutative = true;
354 } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
355 e.varargs.append(E->idx_begin(), E->idx_end());
356 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
357 ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
358 e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
359 }
360
361 return e;
362}
363
364GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
365 unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) {
366 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
367 "Not a comparison!");
370 e.varargs.push_back(lookupOrAdd(LHS));
371 e.varargs.push_back(lookupOrAdd(RHS));
372
373 // Sort the operand value numbers so x<y and y>x get the same value number.
374 if (e.varargs[0] > e.varargs[1]) {
375 std::swap(e.varargs[0], e.varargs[1]);
377 }
378 e.opcode = (Opcode << 8) | Predicate;
379 e.commutative = true;
380 return e;
381}
382
384GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
385 assert(EI && "Not an ExtractValueInst?");
387 e.type = EI->getType();
388 e.opcode = 0;
389
390 WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
391 if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
392 // EI is an extract from one of our with.overflow intrinsics. Synthesize
393 // a semantically equivalent expression instead of an extract value
394 // expression.
395 e.opcode = WO->getBinaryOp();
396 e.varargs.push_back(lookupOrAdd(WO->getLHS()));
397 e.varargs.push_back(lookupOrAdd(WO->getRHS()));
398 return e;
399 }
400
401 // Not a recognised intrinsic. Fall back to producing an extract value
402 // expression.
403 e.opcode = EI->getOpcode();
404 for (Use &Op : EI->operands())
405 e.varargs.push_back(lookupOrAdd(Op));
406
407 append_range(e.varargs, EI->indices());
408
409 return e;
410}
411
412GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *GEP) {
414 Type *PtrTy = GEP->getType()->getScalarType();
415 const DataLayout &DL = GEP->getModule()->getDataLayout();
416 unsigned BitWidth = DL.getIndexTypeSizeInBits(PtrTy);
417 MapVector<Value *, APInt> VariableOffsets;
418 APInt ConstantOffset(BitWidth, 0);
419 if (PtrTy->isOpaquePointerTy() &&
420 GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
421 // For opaque pointers, convert into offset representation, to recognize
422 // equivalent address calculations that use different type encoding.
423 LLVMContext &Context = GEP->getContext();
424 E.opcode = GEP->getOpcode();
425 E.type = nullptr;
426 E.varargs.push_back(lookupOrAdd(GEP->getPointerOperand()));
427 for (const auto &Pair : VariableOffsets) {
428 E.varargs.push_back(lookupOrAdd(Pair.first));
429 E.varargs.push_back(lookupOrAdd(ConstantInt::get(Context, Pair.second)));
430 }
431 if (!ConstantOffset.isZero())
432 E.varargs.push_back(
433 lookupOrAdd(ConstantInt::get(Context, ConstantOffset)));
434 } else {
435 // If converting to offset representation fails (for typed pointers and
436 // scalable vectors), fall back to type-based implementation:
437 E.opcode = GEP->getOpcode();
438 E.type = GEP->getSourceElementType();
439 for (Use &Op : GEP->operands())
440 E.varargs.push_back(lookupOrAdd(Op));
441 }
442 return E;
443}
444
445//===----------------------------------------------------------------------===//
446// ValueTable External Functions
447//===----------------------------------------------------------------------===//
448
449GVNPass::ValueTable::ValueTable() = default;
450GVNPass::ValueTable::ValueTable(const ValueTable &) = default;
451GVNPass::ValueTable::ValueTable(ValueTable &&) = default;
452GVNPass::ValueTable::~ValueTable() = default;
454GVNPass::ValueTable::operator=(const GVNPass::ValueTable &Arg) = default;
455
456/// add - Insert a value into the table with a specified value number.
457void GVNPass::ValueTable::add(Value *V, uint32_t num) {
458 valueNumbering.insert(std::make_pair(V, num));
459 if (PHINode *PN = dyn_cast<PHINode>(V))
460 NumberingPhi[num] = PN;
461}
462
463uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
464 if (AA->doesNotAccessMemory(C) &&
465 // FIXME: Currently the calls which may access the thread id may
466 // be considered as not accessing the memory. But this is
467 // problematic for coroutines, since coroutines may resume in a
468 // different thread. So we disable the optimization here for the
469 // correctness. However, it may block many other correct
470 // optimizations. Revert this one when we detect the memory
471 // accessing kind more precisely.
472 !C->getFunction()->isPresplitCoroutine()) {
473 Expression exp = createExpr(C);
474 uint32_t e = assignExpNewValueNum(exp).first;
475 valueNumbering[C] = e;
476 return e;
477 } else if (MD && AA->onlyReadsMemory(C) &&
478 // FIXME: Currently the calls which may access the thread id may
479 // be considered as not accessing the memory. But this is
480 // problematic for coroutines, since coroutines may resume in a
481 // different thread. So we disable the optimization here for the
482 // correctness. However, it may block many other correct
483 // optimizations. Revert this one when we detect the memory
484 // accessing kind more precisely.
485 !C->getFunction()->isPresplitCoroutine()) {
486 Expression exp = createExpr(C);
487 auto ValNum = assignExpNewValueNum(exp);
488 if (ValNum.second) {
489 valueNumbering[C] = ValNum.first;
490 return ValNum.first;
491 }
492
493 MemDepResult local_dep = MD->getDependency(C);
494
495 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
496 valueNumbering[C] = nextValueNumber;
497 return nextValueNumber++;
498 }
499
500 if (local_dep.isDef()) {
501 // For masked load/store intrinsics, the local_dep may actually be
502 // a normal load or store instruction.
503 CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
504
505 if (!local_cdep || local_cdep->arg_size() != C->arg_size()) {
506 valueNumbering[C] = nextValueNumber;
507 return nextValueNumber++;
508 }
509
510 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
511 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
512 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
513 if (c_vn != cd_vn) {
514 valueNumbering[C] = nextValueNumber;
515 return nextValueNumber++;
516 }
517 }
518
519 uint32_t v = lookupOrAdd(local_cdep);
520 valueNumbering[C] = v;
521 return v;
522 }
523
524 // Non-local case.
527 // FIXME: Move the checking logic to MemDep!
528 CallInst* cdep = nullptr;
529
530 // Check to see if we have a single dominating call instruction that is
531 // identical to C.
532 for (const NonLocalDepEntry &I : deps) {
533 if (I.getResult().isNonLocal())
534 continue;
535
536 // We don't handle non-definitions. If we already have a call, reject
537 // instruction dependencies.
538 if (!I.getResult().isDef() || cdep != nullptr) {
539 cdep = nullptr;
540 break;
541 }
542
543 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I.getResult().getInst());
544 // FIXME: All duplicated with non-local case.
545 if (NonLocalDepCall && DT->properlyDominates(I.getBB(), C->getParent())) {
546 cdep = NonLocalDepCall;
547 continue;
548 }
549
550 cdep = nullptr;
551 break;
552 }
553
554 if (!cdep) {
555 valueNumbering[C] = nextValueNumber;
556 return nextValueNumber++;
557 }
558
559 if (cdep->arg_size() != C->arg_size()) {
560 valueNumbering[C] = nextValueNumber;
561 return nextValueNumber++;
562 }
563 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
564 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
565 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
566 if (c_vn != cd_vn) {
567 valueNumbering[C] = nextValueNumber;
568 return nextValueNumber++;
569 }
570 }
571
572 uint32_t v = lookupOrAdd(cdep);
573 valueNumbering[C] = v;
574 return v;
575 } else {
576 valueNumbering[C] = nextValueNumber;
577 return nextValueNumber++;
578 }
579}
580
581/// Returns true if a value number exists for the specified value.
582bool GVNPass::ValueTable::exists(Value *V) const {
583 return valueNumbering.count(V) != 0;
584}
585
586/// lookup_or_add - Returns the value number for the specified value, assigning
587/// it a new number if it did not have one before.
588uint32_t GVNPass::ValueTable::lookupOrAdd(Value *V) {
589 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
590 if (VI != valueNumbering.end())
591 return VI->second;
592
593 auto *I = dyn_cast<Instruction>(V);
594 if (!I) {
595 valueNumbering[V] = nextValueNumber;
596 return nextValueNumber++;
597 }
598
599 Expression exp;
600 switch (I->getOpcode()) {
601 case Instruction::Call:
602 return lookupOrAddCall(cast<CallInst>(I));
603 case Instruction::FNeg:
604 case Instruction::Add:
605 case Instruction::FAdd:
606 case Instruction::Sub:
607 case Instruction::FSub:
608 case Instruction::Mul:
609 case Instruction::FMul:
610 case Instruction::UDiv:
611 case Instruction::SDiv:
612 case Instruction::FDiv:
613 case Instruction::URem:
614 case Instruction::SRem:
615 case Instruction::FRem:
616 case Instruction::Shl:
617 case Instruction::LShr:
618 case Instruction::AShr:
619 case Instruction::And:
620 case Instruction::Or:
621 case Instruction::Xor:
622 case Instruction::ICmp:
623 case Instruction::FCmp:
624 case Instruction::Trunc:
625 case Instruction::ZExt:
626 case Instruction::SExt:
627 case Instruction::FPToUI:
628 case Instruction::FPToSI:
629 case Instruction::UIToFP:
630 case Instruction::SIToFP:
631 case Instruction::FPTrunc:
632 case Instruction::FPExt:
633 case Instruction::PtrToInt:
634 case Instruction::IntToPtr:
635 case Instruction::AddrSpaceCast:
636 case Instruction::BitCast:
637 case Instruction::Select:
638 case Instruction::Freeze:
639 case Instruction::ExtractElement:
640 case Instruction::InsertElement:
641 case Instruction::ShuffleVector:
642 case Instruction::InsertValue:
643 exp = createExpr(I);
644 break;
645 case Instruction::GetElementPtr:
646 exp = createGEPExpr(cast<GetElementPtrInst>(I));
647 break;
648 case Instruction::ExtractValue:
649 exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
650 break;
651 case Instruction::PHI:
652 valueNumbering[V] = nextValueNumber;
653 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
654 return nextValueNumber++;
655 default:
656 valueNumbering[V] = nextValueNumber;
657 return nextValueNumber++;
658 }
659
660 uint32_t e = assignExpNewValueNum(exp).first;
661 valueNumbering[V] = e;
662 return e;
663}
664
665/// Returns the value number of the specified value. Fails if
666/// the value has not yet been numbered.
667uint32_t GVNPass::ValueTable::lookup(Value *V, bool Verify) const {
668 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
669 if (Verify) {
670 assert(VI != valueNumbering.end() && "Value not numbered?");
671 return VI->second;
672 }
673 return (VI != valueNumbering.end()) ? VI->second : 0;
674}
675
676/// Returns the value number of the given comparison,
677/// assigning it a new number if it did not have one before. Useful when
678/// we deduced the result of a comparison, but don't immediately have an
679/// instruction realizing that comparison to hand.
680uint32_t GVNPass::ValueTable::lookupOrAddCmp(unsigned Opcode,
681 CmpInst::Predicate Predicate,
682 Value *LHS, Value *RHS) {
683 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
684 return assignExpNewValueNum(exp).first;
685}
686
687/// Remove all entries from the ValueTable.
688void GVNPass::ValueTable::clear() {
689 valueNumbering.clear();
690 expressionNumbering.clear();
691 NumberingPhi.clear();
692 PhiTranslateTable.clear();
693 nextValueNumber = 1;
694 Expressions.clear();
695 ExprIdx.clear();
696 nextExprNumber = 0;
697}
698
699/// Remove a value from the value numbering.
700void GVNPass::ValueTable::erase(Value *V) {
701 uint32_t Num = valueNumbering.lookup(V);
702 valueNumbering.erase(V);
703 // If V is PHINode, V <--> value number is an one-to-one mapping.
704 if (isa<PHINode>(V))
705 NumberingPhi.erase(Num);
706}
707
708/// verifyRemoved - Verify that the value is removed from all internal data
709/// structures.
710void GVNPass::ValueTable::verifyRemoved(const Value *V) const {
712 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
713 assert(I->first != V && "Inst still occurs in value numbering map!");
714 }
715}
716
717//===----------------------------------------------------------------------===//
718// GVN Pass
719//===----------------------------------------------------------------------===//
720
722 return Options.AllowPRE.value_or(GVNEnablePRE);
723}
724
726 return Options.AllowLoadPRE.value_or(GVNEnableLoadPRE);
727}
728
730 return Options.AllowLoadInLoopPRE.value_or(GVNEnableLoadInLoopPRE);
731}
732
734 return Options.AllowLoadPRESplitBackedge.value_or(
736}
737
739 return Options.AllowMemDep.value_or(GVNEnableMemDep);
740}
741
743 // FIXME: The order of evaluation of these 'getResult' calls is very
744 // significant! Re-ordering these variables will cause GVN when run alone to
745 // be less effective! We should fix memdep and basic-aa to not exhibit this
746 // behavior, but until then don't change the order here.
747 auto &AC = AM.getResult<AssumptionAnalysis>(F);
748 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
749 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
750 auto &AA = AM.getResult<AAManager>(F);
751 auto *MemDep =
753 auto *LI = AM.getCachedResult<LoopAnalysis>(F);
754 auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
756 bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
757 MSSA ? &MSSA->getMSSA() : nullptr);
758 if (!Changed)
759 return PreservedAnalyses::all();
763 if (MSSA)
765 if (LI)
767 return PA;
768}
769
771 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
772 static_cast<PassInfoMixin<GVNPass> *>(this)->printPipeline(
773 OS, MapClassName2PassName);
774
775 OS << '<';
776 if (Options.AllowPRE != std::nullopt)
777 OS << (*Options.AllowPRE ? "" : "no-") << "pre;";
778 if (Options.AllowLoadPRE != std::nullopt)
779 OS << (*Options.AllowLoadPRE ? "" : "no-") << "load-pre;";
780 if (Options.AllowLoadPRESplitBackedge != std::nullopt)
781 OS << (*Options.AllowLoadPRESplitBackedge ? "" : "no-")
782 << "split-backedge-load-pre;";
783 if (Options.AllowMemDep != std::nullopt)
784 OS << (*Options.AllowMemDep ? "" : "no-") << "memdep";
785 OS << '>';
786}
787
788#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
789LLVM_DUMP_METHOD void GVNPass::dump(DenseMap<uint32_t, Value *> &d) const {
790 errs() << "{\n";
791 for (auto &I : d) {
792 errs() << I.first << "\n";
793 I.second->dump();
794 }
795 errs() << "}\n";
796}
797#endif
798
799enum class AvailabilityState : char {
800 /// We know the block *is not* fully available. This is a fixpoint.
801 Unavailable = 0,
802 /// We know the block *is* fully available. This is a fixpoint.
803 Available = 1,
804 /// We do not know whether the block is fully available or not,
805 /// but we are currently speculating that it will be.
806 /// If it would have turned out that the block was, in fact, not fully
807 /// available, this would have been cleaned up into an Unavailable.
809};
810
811/// Return true if we can prove that the value
812/// we're analyzing is fully available in the specified block. As we go, keep
813/// track of which blocks we know are fully alive in FullyAvailableBlocks. This
814/// map is actually a tri-state map with the following values:
815/// 0) we know the block *is not* fully available.
816/// 1) we know the block *is* fully available.
817/// 2) we do not know whether the block is fully available or not, but we are
818/// currently speculating that it will be.
820 BasicBlock *BB,
821 DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
823 std::optional<BasicBlock *> UnavailableBB;
824
825 // The number of times we didn't find an entry for a block in a map and
826 // optimistically inserted an entry marking block as speculatively available.
827 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
828
829#ifndef NDEBUG
830 SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
832#endif
833
834 Worklist.emplace_back(BB);
835 while (!Worklist.empty()) {
836 BasicBlock *CurrBB = Worklist.pop_back_val(); // LoadFO - depth-first!
837 // Optimistically assume that the block is Speculatively Available and check
838 // to see if we already know about this block in one lookup.
839 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
840 FullyAvailableBlocks.try_emplace(
841 CurrBB, AvailabilityState::SpeculativelyAvailable);
842 AvailabilityState &State = IV.first->second;
843
844 // Did the entry already exist for this block?
845 if (!IV.second) {
846 if (State == AvailabilityState::Unavailable) {
847 UnavailableBB = CurrBB;
848 break; // Backpropagate unavailability info.
849 }
850
851#ifndef NDEBUG
852 AvailableBBs.emplace_back(CurrBB);
853#endif
854 continue; // Don't recurse further, but continue processing worklist.
855 }
856
857 // No entry found for block.
858 ++NumNewNewSpeculativelyAvailableBBs;
859 bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
860
861 // If we have exhausted our budget, mark this block as unavailable.
862 // Also, if this block has no predecessors, the value isn't live-in here.
863 if (OutOfBudget || pred_empty(CurrBB)) {
864 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
865 State = AvailabilityState::Unavailable;
866 UnavailableBB = CurrBB;
867 break; // Backpropagate unavailability info.
868 }
869
870 // Tentatively consider this block as speculatively available.
871#ifndef NDEBUG
872 NewSpeculativelyAvailableBBs.insert(CurrBB);
873#endif
874 // And further recurse into block's predecessors, in depth-first order!
875 Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
876 }
877
878#if LLVM_ENABLE_STATS
879 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
880 NumNewNewSpeculativelyAvailableBBs);
881#endif
882
883 // If the block isn't marked as fixpoint yet
884 // (the Unavailable and Available states are fixpoints)
885 auto MarkAsFixpointAndEnqueueSuccessors =
886 [&](BasicBlock *BB, AvailabilityState FixpointState) {
887 auto It = FullyAvailableBlocks.find(BB);
888 if (It == FullyAvailableBlocks.end())
889 return; // Never queried this block, leave as-is.
890 switch (AvailabilityState &State = It->second) {
891 case AvailabilityState::Unavailable:
892 case AvailabilityState::Available:
893 return; // Don't backpropagate further, continue processing worklist.
894 case AvailabilityState::SpeculativelyAvailable: // Fix it!
895 State = FixpointState;
896#ifndef NDEBUG
897 assert(NewSpeculativelyAvailableBBs.erase(BB) &&
898 "Found a speculatively available successor leftover?");
899#endif
900 // Queue successors for further processing.
901 Worklist.append(succ_begin(BB), succ_end(BB));
902 return;
903 }
904 };
905
906 if (UnavailableBB) {
907 // Okay, we have encountered an unavailable block.
908 // Mark speculatively available blocks reachable from UnavailableBB as
909 // unavailable as well. Paths are terminated when they reach blocks not in
910 // FullyAvailableBlocks or they are not marked as speculatively available.
911 Worklist.clear();
912 Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
913 while (!Worklist.empty())
914 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
915 AvailabilityState::Unavailable);
916 }
917
918#ifndef NDEBUG
919 Worklist.clear();
920 for (BasicBlock *AvailableBB : AvailableBBs)
921 Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
922 while (!Worklist.empty())
923 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
924 AvailabilityState::Available);
925
926 assert(NewSpeculativelyAvailableBBs.empty() &&
927 "Must have fixed all the new speculatively available blocks.");
928#endif
929
930 return !UnavailableBB;
931}
932
933/// Given a set of loads specified by ValuesPerBlock,
934/// construct SSA form, allowing us to eliminate Load. This returns the value
935/// that should be used at Load's definition site.
936static Value *
939 GVNPass &gvn) {
940 // Check for the fully redundant, dominating load case. In this case, we can
941 // just use the dominating value directly.
942 if (ValuesPerBlock.size() == 1 &&
943 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
944 Load->getParent())) {
945 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
946 "Dead BB dominate this block");
947 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
948 }
949
950 // Otherwise, we have to construct SSA form.
952 SSAUpdater SSAUpdate(&NewPHIs);
953 SSAUpdate.Initialize(Load->getType(), Load->getName());
954
955 for (const AvailableValueInBlock &AV : ValuesPerBlock) {
956 BasicBlock *BB = AV.BB;
957
958 if (AV.AV.isUndefValue())
959 continue;
960
961 if (SSAUpdate.HasValueForBlock(BB))
962 continue;
963
964 // If the value is the load that we will be eliminating, and the block it's
965 // available in is the block that the load is in, then don't add it as
966 // SSAUpdater will resolve the value to the relevant phi which may let it
967 // avoid phi construction entirely if there's actually only one value.
968 if (BB == Load->getParent() &&
969 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
970 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
971 continue;
972
973 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn));
974 }
975
976 // Perform PHI construction.
977 return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent());
978}
979
981 Instruction *InsertPt,
982 GVNPass &gvn) const {
983 Value *Res;
984 Type *LoadTy = Load->getType();
985 const DataLayout &DL = Load->getModule()->getDataLayout();
986 if (isSimpleValue()) {
987 Res = getSimpleValue();
988 if (Res->getType() != LoadTy) {
989 Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
990
991 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
992 << " " << *getSimpleValue() << '\n'
993 << *Res << '\n'
994 << "\n\n\n");
995 }
996 } else if (isCoercedLoadValue()) {
997 LoadInst *CoercedLoad = getCoercedLoadValue();
998 if (CoercedLoad->getType() == LoadTy && Offset == 0) {
999 Res = CoercedLoad;
1000 } else {
1001 Res = getLoadValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt, DL);
1002 // We would like to use gvn.markInstructionForDeletion here, but we can't
1003 // because the load is already memoized into the leader map table that GVN
1004 // tracks. It is potentially possible to remove the load from the table,
1005 // but then there all of the operations based on it would need to be
1006 // rehashed. Just leave the dead load around.
1007 gvn.getMemDep().removeInstruction(CoercedLoad);
1008 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
1009 << " " << *getCoercedLoadValue() << '\n'
1010 << *Res << '\n'
1011 << "\n\n\n");
1012 }
1013 } else if (isMemIntrinValue()) {
1014 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
1015 InsertPt, DL);
1016 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1017 << " " << *getMemIntrinValue() << '\n'
1018 << *Res << '\n'
1019 << "\n\n\n");
1020 } else if (isSelectValue()) {
1021 // Introduce a new value select for a load from an eligible pointer select.
1022 SelectInst *Sel = getSelectValue();
1023 assert(V1 && V2 && "both value operands of the select must be present");
1024 Res = SelectInst::Create(Sel->getCondition(), V1, V2, "", Sel);
1025 } else {
1026 llvm_unreachable("Should not materialize value from dead block");
1027 }
1028 assert(Res && "failed to materialize?");
1029 return Res;
1030}
1031
1032static bool isLifetimeStart(const Instruction *Inst) {
1033 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1034 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1035 return false;
1036}
1037
1038/// Assuming To can be reached from both From and Between, does Between lie on
1039/// every path from From to To?
1040static bool liesBetween(const Instruction *From, Instruction *Between,
1041 const Instruction *To, DominatorTree *DT) {
1042 if (From->getParent() == Between->getParent())
1043 return DT->dominates(From, Between);
1044 SmallSet<BasicBlock *, 1> Exclusion;
1045 Exclusion.insert(Between->getParent());
1046 return !isPotentiallyReachable(From, To, &Exclusion, DT);
1047}
1048
1049/// Try to locate the three instruction involved in a missed
1050/// load-elimination case that is due to an intervening store.
1052 DominatorTree *DT,
1054 using namespace ore;
1055
1056 Instruction *OtherAccess = nullptr;
1057
1058 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", Load);
1059 R << "load of type " << NV("Type", Load->getType()) << " not eliminated"
1060 << setExtraArgs();
1061
1062 for (auto *U : Load->getPointerOperand()->users()) {
1063 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1064 auto *I = cast<Instruction>(U);
1065 if (I->getFunction() == Load->getFunction() && DT->dominates(I, Load)) {
1066 // Use the most immediately dominating value
1067 if (OtherAccess) {
1068 if (DT->dominates(OtherAccess, I))
1069 OtherAccess = I;
1070 else
1071 assert(U == OtherAccess || DT->dominates(I, OtherAccess));
1072 } else
1073 OtherAccess = I;
1074 }
1075 }
1076 }
1077
1078 if (!OtherAccess) {
1079 // There is no dominating use, check if we can find a closest non-dominating
1080 // use that lies between any other potentially available use and Load.
1081 for (auto *U : Load->getPointerOperand()->users()) {
1082 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1083 auto *I = cast<Instruction>(U);
1084 if (I->getFunction() == Load->getFunction() &&
1085 isPotentiallyReachable(I, Load, nullptr, DT)) {
1086 if (OtherAccess) {
1087 if (liesBetween(OtherAccess, I, Load, DT)) {
1088 OtherAccess = I;
1089 } else if (!liesBetween(I, OtherAccess, Load, DT)) {
1090 // These uses are both partially available at Load were it not for
1091 // the clobber, but neither lies strictly after the other.
1092 OtherAccess = nullptr;
1093 break;
1094 } // else: keep current OtherAccess since it lies between U and Load
1095 } else {
1096 OtherAccess = I;
1097 }
1098 }
1099 }
1100 }
1101 }
1102
1103 if (OtherAccess)
1104 R << " in favor of " << NV("OtherAccess", OtherAccess);
1105
1106 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
1107
1108 ORE->emit(R);
1109}
1110
1111// Find non-clobbered value for Loc memory location in extended basic block
1112// (chain of basic blocks with single predecessors) starting From instruction.
1113static Value *findDominatingValue(const MemoryLocation &Loc, Type *LoadTy,
1114 Instruction *From, AAResults *AA) {
1115 uint32_t NumVisitedInsts = 0;
1116 BasicBlock *FromBB = From->getParent();
1117 BatchAAResults BatchAA(*AA);
1118 for (BasicBlock *BB = FromBB; BB; BB = BB->getSinglePredecessor())
1119 for (auto I = BB == FromBB ? From->getReverseIterator() : BB->rbegin(),
1120 E = BB->rend();
1121 I != E; ++I) {
1122 // Stop the search if limit is reached.
1123 if (++NumVisitedInsts > MaxNumVisitedInsts)
1124 return nullptr;
1125 Instruction *Inst = &*I;
1126 if (isModSet(BatchAA.getModRefInfo(Inst, Loc)))
1127 return nullptr;
1128 if (auto *LI = dyn_cast<LoadInst>(Inst))
1129 if (LI->getPointerOperand() == Loc.Ptr && LI->getType() == LoadTy)
1130 return LI;
1131 }
1132 return nullptr;
1133}
1134
1135std::optional<AvailableValue>
1136GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1137 Value *Address) {
1138 assert(Load->isUnordered() && "rules below are incorrect for ordered access");
1139 assert(DepInfo.isLocal() && "expected a local dependence");
1140
1141 Instruction *DepInst = DepInfo.getInst();
1142
1143 const DataLayout &DL = Load->getModule()->getDataLayout();
1144 if (DepInfo.isClobber()) {
1145 // If the dependence is to a store that writes to a superset of the bits
1146 // read by the load, we can extract the bits we need for the load from the
1147 // stored value.
1148 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1149 // Can't forward from non-atomic to atomic without violating memory model.
1150 if (Address && Load->isAtomic() <= DepSI->isAtomic()) {
1151 int Offset =
1152 analyzeLoadFromClobberingStore(Load->getType(), Address, DepSI, DL);
1153 if (Offset != -1)
1154 return AvailableValue::get(DepSI->getValueOperand(), Offset);
1155 }
1156 }
1157
1158 // Check to see if we have something like this:
1159 // load i32* P
1160 // load i8* (P+1)
1161 // if we have this, replace the later with an extraction from the former.
1162 if (LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1163 // If this is a clobber and L is the first instruction in its block, then
1164 // we have the first instruction in the entry block.
1165 // Can't forward from non-atomic to atomic without violating memory model.
1166 if (DepLoad != Load && Address &&
1167 Load->isAtomic() <= DepLoad->isAtomic()) {
1168 Type *LoadType = Load->getType();
1169 int Offset = -1;
1170
1171 // If MD reported clobber, check it was nested.
1172 if (DepInfo.isClobber() &&
1173 canCoerceMustAliasedValueToLoad(DepLoad, LoadType, DL)) {
1174 const auto ClobberOff = MD->getClobberOffset(DepLoad);
1175 // GVN has no deal with a negative offset.
1176 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1177 ? -1
1178 : *ClobberOff;
1179 }
1180 if (Offset == -1)
1181 Offset =
1182 analyzeLoadFromClobberingLoad(LoadType, Address, DepLoad, DL);
1183 if (Offset != -1)
1184 return AvailableValue::getLoad(DepLoad, Offset);
1185 }
1186 }
1187
1188 // If the clobbering value is a memset/memcpy/memmove, see if we can
1189 // forward a value on from it.
1190 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1191 if (Address && !Load->isAtomic()) {
1193 DepMI, DL);
1194 if (Offset != -1)
1195 return AvailableValue::getMI(DepMI, Offset);
1196 }
1197 }
1198
1199 // Nothing known about this clobber, have to be conservative
1200 LLVM_DEBUG(
1201 // fast print dep, using operator<< on instruction is too slow.
1202 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1203 dbgs() << " is clobbered by " << *DepInst << '\n';);
1205 reportMayClobberedLoad(Load, DepInfo, DT, ORE);
1206
1207 return std::nullopt;
1208 }
1209 assert(DepInfo.isDef() && "follows from above");
1210
1211 // Loading the alloca -> undef.
1212 // Loading immediately after lifetime begin -> undef.
1213 if (isa<AllocaInst>(DepInst) || isLifetimeStart(DepInst))
1214 return AvailableValue::get(UndefValue::get(Load->getType()));
1215
1216 if (Constant *InitVal =
1217 getInitialValueOfAllocation(DepInst, TLI, Load->getType()))
1218 return AvailableValue::get(InitVal);
1219
1220 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1221 // Reject loads and stores that are to the same address but are of
1222 // different types if we have to. If the stored value is convertable to
1223 // the loaded value, we can reuse it.
1224 if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), Load->getType(),
1225 DL))
1226 return std::nullopt;
1227
1228 // Can't forward from non-atomic to atomic without violating memory model.
1229 if (S->isAtomic() < Load->isAtomic())
1230 return std::nullopt;
1231
1232 return AvailableValue::get(S->getValueOperand());
1233 }
1234
1235 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1236 // If the types mismatch and we can't handle it, reject reuse of the load.
1237 // If the stored value is larger or equal to the loaded value, we can reuse
1238 // it.
1239 if (!canCoerceMustAliasedValueToLoad(LD, Load->getType(), DL))
1240 return std::nullopt;
1241
1242 // Can't forward from non-atomic to atomic without violating memory model.
1243 if (LD->isAtomic() < Load->isAtomic())
1244 return std::nullopt;
1245
1246 return AvailableValue::getLoad(LD);
1247 }
1248
1249 // Check if load with Addr dependent from select can be converted to select
1250 // between load values. There must be no instructions between the found
1251 // loads and DepInst that may clobber the loads.
1252 if (auto *Sel = dyn_cast<SelectInst>(DepInst)) {
1253 assert(Sel->getType() == Load->getPointerOperandType());
1254 auto Loc = MemoryLocation::get(Load);
1255 Value *V1 =
1256 findDominatingValue(Loc.getWithNewPtr(Sel->getTrueValue()),
1257 Load->getType(), DepInst, getAliasAnalysis());
1258 if (!V1)
1259 return std::nullopt;
1260 Value *V2 =
1261 findDominatingValue(Loc.getWithNewPtr(Sel->getFalseValue()),
1262 Load->getType(), DepInst, getAliasAnalysis());
1263 if (!V2)
1264 return std::nullopt;
1265 return AvailableValue::getSelect(Sel, V1, V2);
1266 }
1267
1268 // Unknown def - must be conservative
1269 LLVM_DEBUG(
1270 // fast print dep, using operator<< on instruction is too slow.
1271 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1272 dbgs() << " has unknown def " << *DepInst << '\n';);
1273 return std::nullopt;
1274}
1275
1276void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1277 AvailValInBlkVect &ValuesPerBlock,
1278 UnavailBlkVect &UnavailableBlocks) {
1279 // Filter out useless results (non-locals, etc). Keep track of the blocks
1280 // where we have a value available in repl, also keep track of whether we see
1281 // dependencies that produce an unknown value for the load (such as a call
1282 // that could potentially clobber the load).
1283 for (const auto &Dep : Deps) {
1284 BasicBlock *DepBB = Dep.getBB();
1285 MemDepResult DepInfo = Dep.getResult();
1286
1287 if (DeadBlocks.count(DepBB)) {
1288 // Dead dependent mem-op disguise as a load evaluating the same value
1289 // as the load in question.
1290 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1291 continue;
1292 }
1293
1294 if (!DepInfo.isLocal()) {
1295 UnavailableBlocks.push_back(DepBB);
1296 continue;
1297 }
1298
1299 // The address being loaded in this non-local block may not be the same as
1300 // the pointer operand of the load if PHI translation occurs. Make sure
1301 // to consider the right address.
1302 if (auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1303 // subtlety: because we know this was a non-local dependency, we know
1304 // it's safe to materialize anywhere between the instruction within
1305 // DepInfo and the end of it's block.
1306 ValuesPerBlock.push_back(
1307 AvailableValueInBlock::get(DepBB, std::move(*AV)));
1308 } else {
1309 UnavailableBlocks.push_back(DepBB);
1310 }
1311 }
1312
1313 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1314 "post condition violation");
1315}
1316
1317void GVNPass::eliminatePartiallyRedundantLoad(
1318 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1319 MapVector<BasicBlock *, Value *> &AvailableLoads) {
1320 for (const auto &AvailableLoad : AvailableLoads) {
1321 BasicBlock *UnavailableBlock = AvailableLoad.first;
1322 Value *LoadPtr = AvailableLoad.second;
1323
1324 auto *NewLoad =
1325 new LoadInst(Load->getType(), LoadPtr, Load->getName() + ".pre",
1326 Load->isVolatile(), Load->getAlign(), Load->getOrdering(),
1327 Load->getSyncScopeID(), UnavailableBlock->getTerminator());
1328 NewLoad->setDebugLoc(Load->getDebugLoc());
1329 if (MSSAU) {
1330 auto *MSSA = MSSAU->getMemorySSA();
1331 // Get the defining access of the original load or use the load if it is a
1332 // MemoryDef (e.g. because it is volatile). The inserted loads are
1333 // guaranteed to load from the same definition.
1334 auto *LoadAcc = MSSA->getMemoryAccess(Load);
1335 auto *DefiningAcc =
1336 isa<MemoryDef>(LoadAcc) ? LoadAcc : LoadAcc->getDefiningAccess();
1337 auto *NewAccess = MSSAU->createMemoryAccessInBB(
1338 NewLoad, DefiningAcc, NewLoad->getParent(),
1340 if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1341 MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1342 else
1343 MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1344 }
1345
1346 // Transfer the old load's AA tags to the new load.
1347 AAMDNodes Tags = Load->getAAMetadata();
1348 if (Tags)
1349 NewLoad->setAAMetadata(Tags);
1350
1351 if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
1352 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1353 if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
1354 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1355 if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
1356 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1357 if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
1358 if (LI &&
1359 LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1360 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1361
1362 // We do not propagate the old load's debug location, because the new
1363 // load now lives in a different BB, and we want to avoid a jumpy line
1364 // table.
1365 // FIXME: How do we retain source locations without causing poor debugging
1366 // behavior?
1367
1368 // Add the newly created load.
1369 ValuesPerBlock.push_back(
1370 AvailableValueInBlock::get(UnavailableBlock, NewLoad));
1371 MD->invalidateCachedPointerInfo(LoadPtr);
1372 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1373 }
1374
1375 // Perform PHI construction.
1376 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1377 Load->replaceAllUsesWith(V);
1378 if (isa<PHINode>(V))
1379 V->takeName(Load);
1380 if (Instruction *I = dyn_cast<Instruction>(V))
1381 I->setDebugLoc(Load->getDebugLoc());
1382 if (V->getType()->isPtrOrPtrVectorTy())
1385 ORE->emit([&]() {
1386 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
1387 << "load eliminated by PRE";
1388 });
1389}
1390
1391bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1392 UnavailBlkVect &UnavailableBlocks) {
1393 // Okay, we have *some* definitions of the value. This means that the value
1394 // is available in some of our (transitive) predecessors. Lets think about
1395 // doing PRE of this load. This will involve inserting a new load into the
1396 // predecessor when it's not available. We could do this in general, but
1397 // prefer to not increase code size. As such, we only do this when we know
1398 // that we only have to insert *one* load (which means we're basically moving
1399 // the load, not inserting a new one).
1400
1401 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1402 UnavailableBlocks.end());
1403
1404 // Let's find the first basic block with more than one predecessor. Walk
1405 // backwards through predecessors if needed.
1406 BasicBlock *LoadBB = Load->getParent();
1407 BasicBlock *TmpBB = LoadBB;
1408
1409 // Check that there is no implicit control flow instructions above our load in
1410 // its block. If there is an instruction that doesn't always pass the
1411 // execution to the following instruction, then moving through it may become
1412 // invalid. For example:
1413 //
1414 // int arr[LEN];
1415 // int index = ???;
1416 // ...
1417 // guard(0 <= index && index < LEN);
1418 // use(arr[index]);
1419 //
1420 // It is illegal to move the array access to any point above the guard,
1421 // because if the index is out of bounds we should deoptimize rather than
1422 // access the array.
1423 // Check that there is no guard in this block above our instruction.
1424 bool MustEnsureSafetyOfSpeculativeExecution =
1426
1427 while (TmpBB->getSinglePredecessor()) {
1428 TmpBB = TmpBB->getSinglePredecessor();
1429 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1430 return false;
1431 if (Blockers.count(TmpBB))
1432 return false;
1433
1434 // If any of these blocks has more than one successor (i.e. if the edge we
1435 // just traversed was critical), then there are other paths through this
1436 // block along which the load may not be anticipated. Hoisting the load
1437 // above this block would be adding the load to execution paths along
1438 // which it was not previously executed.
1439 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1440 return false;
1441
1442 // Check that there is no implicit control flow in a block above.
1443 MustEnsureSafetyOfSpeculativeExecution =
1444 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1445 }
1446
1447 assert(TmpBB);
1448 LoadBB = TmpBB;
1449
1450 // Check to see how many predecessors have the loaded value fully
1451 // available.
1453 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1454 for (const AvailableValueInBlock &AV : ValuesPerBlock)
1455 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1456 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1457 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1458
1459 SmallVector<BasicBlock *, 4> CriticalEdgePred;
1460 for (BasicBlock *Pred : predecessors(LoadBB)) {
1461 // If any predecessor block is an EH pad that does not allow non-PHI
1462 // instructions before the terminator, we can't PRE the load.
1463 if (Pred->getTerminator()->isEHPad()) {
1464 LLVM_DEBUG(
1465 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1466 << Pred->getName() << "': " << *Load << '\n');
1467 return false;
1468 }
1469
1470 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1471 continue;
1472 }
1473
1474 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1475 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1476 LLVM_DEBUG(
1477 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1478 << Pred->getName() << "': " << *Load << '\n');
1479 return false;
1480 }
1481
1482 if (LoadBB->isEHPad()) {
1483 LLVM_DEBUG(
1484 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1485 << Pred->getName() << "': " << *Load << '\n');
1486 return false;
1487 }
1488
1489 // Do not split backedge as it will break the canonical loop form.
1491 if (DT->dominates(LoadBB, Pred)) {
1492 LLVM_DEBUG(
1493 dbgs()
1494 << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1495 << Pred->getName() << "': " << *Load << '\n');
1496 return false;
1497 }
1498
1499 CriticalEdgePred.push_back(Pred);
1500 } else {
1501 // Only add the predecessors that will not be split for now.
1502 PredLoads[Pred] = nullptr;
1503 }
1504 }
1505
1506 // Decide whether PRE is profitable for this load.
1507 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
1508 assert(NumUnavailablePreds != 0 &&
1509 "Fully available value should already be eliminated!");
1510 (void)NumUnavailablePreds;
1511
1512 // If this load is unavailable in multiple predecessors, reject it.
1513 // FIXME: If we could restructure the CFG, we could make a common pred with
1514 // all the preds that don't have an available Load and insert a new load into
1515 // that one block.
1516 if (NumUnavailablePreds != 1)
1517 return false;
1518
1519 // Now we know where we will insert load. We must ensure that it is safe
1520 // to speculatively execute the load at that points.
1521 if (MustEnsureSafetyOfSpeculativeExecution) {
1522 if (CriticalEdgePred.size())
1523 if (!isSafeToSpeculativelyExecute(Load, LoadBB->getFirstNonPHI(), AC, DT))
1524 return false;
1525 for (auto &PL : PredLoads)
1526 if (!isSafeToSpeculativelyExecute(Load, PL.first->getTerminator(), AC,
1527 DT))
1528 return false;
1529 }
1530
1531 // Split critical edges, and update the unavailable predecessors accordingly.
1532 for (BasicBlock *OrigPred : CriticalEdgePred) {
1533 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1534 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1535 PredLoads[NewPred] = nullptr;
1536 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1537 << LoadBB->getName() << '\n');
1538 }
1539
1540 // Check if the load can safely be moved to all the unavailable predecessors.
1541 bool CanDoPRE = true;
1542 const DataLayout &DL = Load->getModule()->getDataLayout();
1544 for (auto &PredLoad : PredLoads) {
1545 BasicBlock *UnavailablePred = PredLoad.first;
1546
1547 // Do PHI translation to get its value in the predecessor if necessary. The
1548 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1549 // We do the translation for each edge we skipped by going from Load's block
1550 // to LoadBB, otherwise we might miss pieces needing translation.
1551
1552 // If all preds have a single successor, then we know it is safe to insert
1553 // the load on the pred (?!?), so we can insert code to materialize the
1554 // pointer if it is not available.
1555 Value *LoadPtr = Load->getPointerOperand();
1556 BasicBlock *Cur = Load->getParent();
1557 while (Cur != LoadBB) {
1558 PHITransAddr Address(LoadPtr, DL, AC);
1559 LoadPtr = Address.translateWithInsertion(Cur, Cur->getSinglePredecessor(),
1560 *DT, NewInsts);
1561 if (!LoadPtr) {
1562 CanDoPRE = false;
1563 break;
1564 }
1565 Cur = Cur->getSinglePredecessor();
1566 }
1567
1568 if (LoadPtr) {
1569 PHITransAddr Address(LoadPtr, DL, AC);
1570 LoadPtr = Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1571 NewInsts);
1572 }
1573 // If we couldn't find or insert a computation of this phi translated value,
1574 // we fail PRE.
1575 if (!LoadPtr) {
1576 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1577 << *Load->getPointerOperand() << "\n");
1578 CanDoPRE = false;
1579 break;
1580 }
1581
1582 PredLoad.second = LoadPtr;
1583 }
1584
1585 if (!CanDoPRE) {
1586 while (!NewInsts.empty()) {
1587 // Erase instructions generated by the failed PHI translation before
1588 // trying to number them. PHI translation might insert instructions
1589 // in basic blocks other than the current one, and we delete them
1590 // directly, as markInstructionForDeletion only allows removing from the
1591 // current basic block.
1592 NewInsts.pop_back_val()->eraseFromParent();
1593 }
1594 // HINT: Don't revert the edge-splitting as following transformation may
1595 // also need to split these critical edges.
1596 return !CriticalEdgePred.empty();
1597 }
1598
1599 // Okay, we can eliminate this load by inserting a reload in the predecessor
1600 // and using PHI construction to get the value in the other predecessors, do
1601 // it.
1602 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
1603 LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size()
1604 << " INSTS: " << *NewInsts.back()
1605 << '\n');
1606
1607 // Assign value numbers to the new instructions.
1608 for (Instruction *I : NewInsts) {
1609 // Instructions that have been inserted in predecessor(s) to materialize
1610 // the load address do not retain their original debug locations. Doing
1611 // so could lead to confusing (but correct) source attributions.
1612 I->updateLocationAfterHoist();
1613
1614 // FIXME: We really _ought_ to insert these value numbers into their
1615 // parent's availability map. However, in doing so, we risk getting into
1616 // ordering issues. If a block hasn't been processed yet, we would be
1617 // marking a value as AVAIL-IN, which isn't what we intend.
1618 VN.lookupOrAdd(I);
1619 }
1620
1621 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads);
1622 ++NumPRELoad;
1623 return true;
1624}
1625
1626bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1627 AvailValInBlkVect &ValuesPerBlock,
1628 UnavailBlkVect &UnavailableBlocks) {
1629 if (!LI)
1630 return false;
1631
1632 const Loop *L = LI->getLoopFor(Load->getParent());
1633 // TODO: Generalize to other loop blocks that dominate the latch.
1634 if (!L || L->getHeader() != Load->getParent())
1635 return false;
1636
1637 BasicBlock *Preheader = L->getLoopPreheader();
1638 BasicBlock *Latch = L->getLoopLatch();
1639 if (!Preheader || !Latch)
1640 return false;
1641
1642 Value *LoadPtr = Load->getPointerOperand();
1643 // Must be available in preheader.
1644 if (!L->isLoopInvariant(LoadPtr))
1645 return false;
1646
1647 // We plan to hoist the load to preheader without introducing a new fault.
1648 // In order to do it, we need to prove that we cannot side-exit the loop
1649 // once loop header is first entered before execution of the load.
1650 if (ICF->isDominatedByICFIFromSameBlock(Load))
1651 return false;
1652
1653 BasicBlock *LoopBlock = nullptr;
1654 for (auto *Blocker : UnavailableBlocks) {
1655 // Blockers from outside the loop are handled in preheader.
1656 if (!L->contains(Blocker))
1657 continue;
1658
1659 // Only allow one loop block. Loop header is not less frequently executed
1660 // than each loop block, and likely it is much more frequently executed. But
1661 // in case of multiple loop blocks, we need extra information (such as block
1662 // frequency info) to understand whether it is profitable to PRE into
1663 // multiple loop blocks.
1664 if (LoopBlock)
1665 return false;
1666
1667 // Do not sink into inner loops. This may be non-profitable.
1668 if (L != LI->getLoopFor(Blocker))
1669 return false;
1670
1671 // Blocks that dominate the latch execute on every single iteration, maybe
1672 // except the last one. So PREing into these blocks doesn't make much sense
1673 // in most cases. But the blocks that do not necessarily execute on each
1674 // iteration are sometimes much colder than the header, and this is when
1675 // PRE is potentially profitable.
1676 if (DT->dominates(Blocker, Latch))
1677 return false;
1678
1679 // Make sure that the terminator itself doesn't clobber.
1680 if (Blocker->getTerminator()->mayWriteToMemory())
1681 return false;
1682
1683 LoopBlock = Blocker;
1684 }
1685
1686 if (!LoopBlock)
1687 return false;
1688
1689 // Make sure the memory at this pointer cannot be freed, therefore we can
1690 // safely reload from it after clobber.
1691 if (LoadPtr->canBeFreed())
1692 return false;
1693
1694 // TODO: Support critical edge splitting if blocker has more than 1 successor.
1695 MapVector<BasicBlock *, Value *> AvailableLoads;
1696 AvailableLoads[LoopBlock] = LoadPtr;
1697 AvailableLoads[Preheader] = LoadPtr;
1698
1699 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n');
1700 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads);
1701 ++NumPRELoopLoad;
1702 return true;
1703}
1704
1707 using namespace ore;
1708
1709 ORE->emit([&]() {
1710 return OptimizationRemark(DEBUG_TYPE, "LoadElim", Load)
1711 << "load of type " << NV("Type", Load->getType()) << " eliminated"
1712 << setExtraArgs() << " in favor of "
1713 << NV("InfavorOfValue", AvailableValue);
1714 });
1715}
1716
1717/// Attempt to eliminate a load whose dependencies are
1718/// non-local by performing PHI construction.
1719bool GVNPass::processNonLocalLoad(LoadInst *Load) {
1720 // non-local speculations are not allowed under asan.
1721 if (Load->getParent()->getParent()->hasFnAttribute(
1722 Attribute::SanitizeAddress) ||
1723 Load->getParent()->getParent()->hasFnAttribute(
1724 Attribute::SanitizeHWAddress))
1725 return false;
1726
1727 // Step 1: Find the non-local dependencies of the load.
1728 LoadDepVect Deps;
1729 MD->getNonLocalPointerDependency(Load, Deps);
1730
1731 // If we had to process more than one hundred blocks to find the
1732 // dependencies, this load isn't worth worrying about. Optimizing
1733 // it will be too expensive.
1734 unsigned NumDeps = Deps.size();
1735 if (NumDeps > MaxNumDeps)
1736 return false;
1737
1738 // If we had a phi translation failure, we'll have a single entry which is a
1739 // clobber in the current block. Reject this early.
1740 if (NumDeps == 1 &&
1741 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1742 LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs());
1743 dbgs() << " has unknown dependencies\n";);
1744 return false;
1745 }
1746
1747 bool Changed = false;
1748 // If this load follows a GEP, see if we can PRE the indices before analyzing.
1749 if (GetElementPtrInst *GEP =
1750 dyn_cast<GetElementPtrInst>(Load->getOperand(0))) {
1751 for (Use &U : GEP->indices())
1752 if (Instruction *I = dyn_cast<Instruction>(U.get()))
1753 Changed |= performScalarPRE(I);
1754 }
1755
1756 // Step 2: Analyze the availability of the load
1757 AvailValInBlkVect ValuesPerBlock;
1758 UnavailBlkVect UnavailableBlocks;
1759 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1760
1761 // If we have no predecessors that produce a known value for this load, exit
1762 // early.
1763 if (ValuesPerBlock.empty())
1764 return Changed;
1765
1766 // Step 3: Eliminate fully redundancy.
1767 //
1768 // If all of the instructions we depend on produce a known value for this
1769 // load, then it is fully redundant and we can use PHI insertion to compute
1770 // its value. Insert PHIs and remove the fully redundant value now.
1771 if (UnavailableBlocks.empty()) {
1772 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n');
1773
1774 // Perform PHI construction.
1775 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1776 Load->replaceAllUsesWith(V);
1777
1778 if (isa<PHINode>(V))
1779 V->takeName(Load);
1780 if (Instruction *I = dyn_cast<Instruction>(V))
1781 // If instruction I has debug info, then we should not update it.
1782 // Also, if I has a null DebugLoc, then it is still potentially incorrect
1783 // to propagate Load's DebugLoc because Load may not post-dominate I.
1784 if (Load->getDebugLoc() && Load->getParent() == I->getParent())
1785 I->setDebugLoc(Load->getDebugLoc());
1786 if (V->getType()->isPtrOrPtrVectorTy())
1789 ++NumGVNLoad;
1790 reportLoadElim(Load, V, ORE);
1791 return true;
1792 }
1793
1794 // Step 4: Eliminate partial redundancy.
1795 if (!isPREEnabled() || !isLoadPREEnabled())
1796 return Changed;
1797 if (!isLoadInLoopPREEnabled() && LI && LI->getLoopFor(Load->getParent()))
1798 return Changed;
1799
1800 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
1801 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
1802 return true;
1803
1804 return Changed;
1805}
1806
1808 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ)
1809 return true;
1810
1811 // Floating point comparisons can be equal, but not equivalent. Cases:
1812 // NaNs for unordered operators
1813 // +0.0 vs 0.0 for all operators
1814 if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1815 (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1816 Cmp->getFastMathFlags().noNaNs())) {
1817 Value *LHS = Cmp->getOperand(0);
1818 Value *RHS = Cmp->getOperand(1);
1819 // If we can prove either side non-zero, then equality must imply
1820 // equivalence.
1821 // FIXME: We should do this optimization if 'no signed zeros' is
1822 // applicable via an instruction-level fast-math-flag or some other
1823 // indicator that relaxed FP semantics are being used.
1824 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1825 return true;
1826 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1827 return true;;
1828 // TODO: Handle vector floating point constants
1829 }
1830 return false;
1831}
1832
1834 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE)
1835 return true;
1836
1837 // Floating point comparisons can be equal, but not equivelent. Cases:
1838 // NaNs for unordered operators
1839 // +0.0 vs 0.0 for all operators
1840 if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE &&
1841 Cmp->getFastMathFlags().noNaNs()) ||
1842 Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) {
1843 Value *LHS = Cmp->getOperand(0);
1844 Value *RHS = Cmp->getOperand(1);
1845 // If we can prove either side non-zero, then equality must imply
1846 // equivalence.
1847 // FIXME: We should do this optimization if 'no signed zeros' is
1848 // applicable via an instruction-level fast-math-flag or some other
1849 // indicator that relaxed FP semantics are being used.
1850 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1851 return true;
1852 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1853 return true;;
1854 // TODO: Handle vector floating point constants
1855 }
1856 return false;
1857}
1858
1859
1860static bool hasUsersIn(Value *V, BasicBlock *BB) {
1861 return llvm::any_of(V->users(), [BB](User *U) {
1862 auto *I = dyn_cast<Instruction>(U);
1863 return I && I->getParent() == BB;
1864 });
1865}
1866
1867bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
1868 Value *V = IntrinsicI->getArgOperand(0);
1869
1870 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1871 if (Cond->isZero()) {
1872 Type *Int8Ty = Type::getInt8Ty(V->getContext());
1873 // Insert a new store to null instruction before the load to indicate that
1874 // this code is not reachable. FIXME: We could insert unreachable
1875 // instruction directly because we can modify the CFG.
1876 auto *NewS = new StoreInst(PoisonValue::get(Int8Ty),
1878 IntrinsicI);
1879 if (MSSAU) {
1880 const MemoryUseOrDef *FirstNonDom = nullptr;
1881 const auto *AL =
1882 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
1883
1884 // If there are accesses in the current basic block, find the first one
1885 // that does not come before NewS. The new memory access is inserted
1886 // after the found access or before the terminator if no such access is
1887 // found.
1888 if (AL) {
1889 for (const auto &Acc : *AL) {
1890 if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
1891 if (!Current->getMemoryInst()->comesBefore(NewS)) {
1892 FirstNonDom = Current;
1893 break;
1894 }
1895 }
1896 }
1897
1898 // This added store is to null, so it will never executed and we can
1899 // just use the LiveOnEntry def as defining access.
1900 auto *NewDef =
1901 FirstNonDom ? MSSAU->createMemoryAccessBefore(
1902 NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1903 const_cast<MemoryUseOrDef *>(FirstNonDom))
1904 : MSSAU->createMemoryAccessInBB(
1905 NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1906 NewS->getParent(), MemorySSA::BeforeTerminator);
1907
1908 MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
1909 }
1910 }
1911 if (isAssumeWithEmptyBundle(*IntrinsicI))
1912 markInstructionForDeletion(IntrinsicI);
1913 return false;
1914 } else if (isa<Constant>(V)) {
1915 // If it's not false, and constant, it must evaluate to true. This means our
1916 // assume is assume(true), and thus, pointless, and we don't want to do
1917 // anything more here.
1918 return false;
1919 }
1920
1921 Constant *True = ConstantInt::getTrue(V->getContext());
1922 bool Changed = false;
1923
1924 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
1925 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
1926
1927 // This property is only true in dominated successors, propagateEquality
1928 // will check dominance for us.
1929 Changed |= propagateEquality(V, True, Edge, false);
1930 }
1931
1932 // We can replace assume value with true, which covers cases like this:
1933 // call void @llvm.assume(i1 %cmp)
1934 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
1935 ReplaceOperandsWithMap[V] = True;
1936
1937 // Similarly, after assume(!NotV) we know that NotV == false.
1938 Value *NotV;
1939 if (match(V, m_Not(m_Value(NotV))))
1940 ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
1941
1942 // If we find an equality fact, canonicalize all dominated uses in this block
1943 // to one of the two values. We heuristically choice the "oldest" of the
1944 // two where age is determined by value number. (Note that propagateEquality
1945 // above handles the cross block case.)
1946 //
1947 // Key case to cover are:
1948 // 1)
1949 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
1950 // call void @llvm.assume(i1 %cmp)
1951 // ret float %0 ; will change it to ret float 3.000000e+00
1952 // 2)
1953 // %load = load float, float* %addr
1954 // %cmp = fcmp oeq float %load, %0
1955 // call void @llvm.assume(i1 %cmp)
1956 // ret float %load ; will change it to ret float %0
1957 if (auto *CmpI = dyn_cast<CmpInst>(V)) {
1958 if (impliesEquivalanceIfTrue(CmpI)) {
1959 Value *CmpLHS = CmpI->getOperand(0);
1960 Value *CmpRHS = CmpI->getOperand(1);
1961 // Heuristically pick the better replacement -- the choice of heuristic
1962 // isn't terribly important here, but the fact we canonicalize on some
1963 // replacement is for exposing other simplifications.
1964 // TODO: pull this out as a helper function and reuse w/existing
1965 // (slightly different) logic.
1966 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
1967 std::swap(CmpLHS, CmpRHS);
1968 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
1969 std::swap(CmpLHS, CmpRHS);
1970 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
1971 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
1972 // Move the 'oldest' value to the right-hand side, using the value
1973 // number as a proxy for age.
1974 uint32_t LVN = VN.lookupOrAdd(CmpLHS);
1975 uint32_t RVN = VN.lookupOrAdd(CmpRHS);
1976 if (LVN < RVN)
1977 std::swap(CmpLHS, CmpRHS);
1978 }
1979
1980 // Handle degenerate case where we either haven't pruned a dead path or a
1981 // removed a trivial assume yet.
1982 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
1983 return Changed;
1984
1985 LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
1986 << *CmpLHS << " with "
1987 << *CmpRHS << " in block "
1988 << IntrinsicI->getParent()->getName() << "\n");
1989
1990
1991 // Setup the replacement map - this handles uses within the same block
1992 if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
1993 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
1994
1995 // NOTE: The non-block local cases are handled by the call to
1996 // propagateEquality above; this block is just about handling the block
1997 // local cases. TODO: There's a bunch of logic in propagateEqualiy which
1998 // isn't duplicated for the block local case, can we share it somehow?
1999 }
2000 }
2001 return Changed;
2002}
2003
2006 I->replaceAllUsesWith(Repl);
2007}
2008
2009/// Attempt to eliminate a load, first by eliminating it
2010/// locally, and then attempting non-local elimination if that fails.
2011bool GVNPass::processLoad(LoadInst *L) {
2012 if (!MD)
2013 return false;
2014
2015 // This code hasn't been audited for ordered or volatile memory access
2016 if (!L->isUnordered())
2017 return false;
2018
2019 if (L->use_empty()) {
2021 return true;
2022 }
2023
2024 // ... to a pointer that has been loaded from before...
2025 MemDepResult Dep = MD->getDependency(L);
2026
2027 // If it is defined in another block, try harder.
2028 if (Dep.isNonLocal())
2029 return processNonLocalLoad(L);
2030
2031 // Only handle the local case below
2032 if (!Dep.isLocal()) {
2033 // This might be a NonFuncLocal or an Unknown
2034 LLVM_DEBUG(
2035 // fast print dep, using operator<< on instruction is too slow.
2036 dbgs() << "GVN: load "; L->printAsOperand(dbgs());
2037 dbgs() << " has unknown dependence\n";);
2038 return false;
2039 }
2040
2041 auto AV = AnalyzeLoadAvailability(L, Dep, L->getPointerOperand());
2042 if (!AV)
2043 return false;
2044
2045 Value *AvailableValue = AV->MaterializeAdjustedValue(L, L, *this);
2046
2047 // Replace the load!
2050 if (MSSAU)
2051 MSSAU->removeMemoryAccess(L);
2052 ++NumGVNLoad;
2054 // Tell MDA to reexamine the reused pointer since we might have more
2055 // information after forwarding it.
2056 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
2058 return true;
2059}
2060
2061/// Return a pair the first field showing the value number of \p Exp and the
2062/// second field showing whether it is a value number newly created.
2063std::pair<uint32_t, bool>
2064GVNPass::ValueTable::assignExpNewValueNum(Expression &Exp) {
2065 uint32_t &e = expressionNumbering[Exp];
2066 bool CreateNewValNum = !e;
2067 if (CreateNewValNum) {
2068 Expressions.push_back(Exp);
2069 if (ExprIdx.size() < nextValueNumber + 1)
2070 ExprIdx.resize(nextValueNumber * 2);
2071 e = nextValueNumber;
2072 ExprIdx[nextValueNumber++] = nextExprNumber++;
2073 }
2074 return {e, CreateNewValNum};
2075}
2076
2077/// Return whether all the values related with the same \p num are
2078/// defined in \p BB.
2079bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
2080 GVNPass &Gvn) {
2081 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2082 while (Vals && Vals->BB == BB)
2083 Vals = Vals->Next;
2084 return !Vals;
2085}
2086
2087/// Wrap phiTranslateImpl to provide caching functionality.
2088uint32_t GVNPass::ValueTable::phiTranslate(const BasicBlock *Pred,
2089 const BasicBlock *PhiBlock,
2090 uint32_t Num, GVNPass &Gvn) {
2091 auto FindRes = PhiTranslateTable.find({Num, Pred});
2092 if (FindRes != PhiTranslateTable.end())
2093 return FindRes->second;
2094 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
2095 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2096 return NewNum;
2097}
2098
2099// Return true if the value number \p Num and NewNum have equal value.
2100// Return false if the result is unknown.
2101bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
2102 const BasicBlock *Pred,
2103 const BasicBlock *PhiBlock,
2104 GVNPass &Gvn) {
2105 CallInst *Call = nullptr;
2106 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2107 while (Vals) {
2108 Call = dyn_cast<CallInst>(Vals->Val);
2109 if (Call && Call->getParent() == PhiBlock)
2110 break;
2111 Vals = Vals->Next;
2112 }
2113
2114 if (AA->doesNotAccessMemory(Call))
2115 return true;
2116
2117 if (!MD || !AA->onlyReadsMemory(Call))
2118 return false;
2119
2120 MemDepResult local_dep = MD->getDependency(Call);
2121 if (!local_dep.isNonLocal())
2122 return false;
2123
2125 MD->getNonLocalCallDependency(Call);
2126
2127 // Check to see if the Call has no function local clobber.
2128 for (const NonLocalDepEntry &D : deps) {
2129 if (D.getResult().isNonFuncLocal())
2130 return true;
2131 }
2132 return false;
2133}
2134
2135/// Translate value number \p Num using phis, so that it has the values of
2136/// the phis in BB.
2137uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
2138 const BasicBlock *PhiBlock,
2139 uint32_t Num, GVNPass &Gvn) {
2140 if (PHINode *PN = NumberingPhi[Num]) {
2141 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2142 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2143 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
2144 return TransVal;
2145 }
2146 return Num;
2147 }
2148
2149 // If there is any value related with Num is defined in a BB other than
2150 // PhiBlock, it cannot depend on a phi in PhiBlock without going through
2151 // a backedge. We can do an early exit in that case to save compile time.
2152 if (!areAllValsInBB(Num, PhiBlock, Gvn))
2153 return Num;
2154
2155 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2156 return Num;
2157 Expression Exp = Expressions[ExprIdx[Num]];
2158
2159 for (unsigned i = 0; i < Exp.varargs.size(); i++) {
2160 // For InsertValue and ExtractValue, some varargs are index numbers
2161 // instead of value numbers. Those index numbers should not be
2162 // translated.
2163 if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
2164 (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
2165 (i > 1 && Exp.opcode == Instruction::ShuffleVector))
2166 continue;
2167 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
2168 }
2169
2170 if (Exp.commutative) {
2171 assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
2172 if (Exp.varargs[0] > Exp.varargs[1]) {
2173 std::swap(Exp.varargs[0], Exp.varargs[1]);
2174 uint32_t Opcode = Exp.opcode >> 8;
2175 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2176 Exp.opcode = (Opcode << 8) |
2178 static_cast<CmpInst::Predicate>(Exp.opcode & 255));
2179 }
2180 }
2181
2182 if (uint32_t NewNum = expressionNumbering[Exp]) {
2183 if (Exp.opcode == Instruction::Call && NewNum != Num)
2184 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2185 return NewNum;
2186 }
2187 return Num;
2188}
2189
2190/// Erase stale entry from phiTranslate cache so phiTranslate can be computed
2191/// again.
2192void GVNPass::ValueTable::eraseTranslateCacheEntry(
2193 uint32_t Num, const BasicBlock &CurrBlock) {
2194 for (const BasicBlock *Pred : predecessors(&CurrBlock))
2195 PhiTranslateTable.erase({Num, Pred});
2196}
2197
2198// In order to find a leader for a given value number at a
2199// specific basic block, we first obtain the list of all Values for that number,
2200// and then scan the list to find one whose block dominates the block in
2201// question. This is fast because dominator tree queries consist of only
2202// a few comparisons of DFS numbers.
2203Value *GVNPass::findLeader(const BasicBlock *BB, uint32_t num) {
2204 LeaderTableEntry Vals = LeaderTable[num];
2205 if (!Vals.Val) return nullptr;
2206
2207 Value *Val = nullptr;
2208 if (DT->dominates(Vals.BB, BB)) {
2209 Val = Vals.Val;
2210 if (isa<Constant>(Val)) return Val;
2211 }
2212
2213 LeaderTableEntry* Next = Vals.Next;
2214 while (Next) {
2215 if (DT->dominates(Next->BB, BB)) {
2216 if (isa<Constant>(Next->Val)) return Next->Val;
2217 if (!Val) Val = Next->Val;
2218 }
2219
2220 Next = Next->Next;
2221 }
2222
2223 return Val;
2224}
2225
2226/// There is an edge from 'Src' to 'Dst'. Return
2227/// true if every path from the entry block to 'Dst' passes via this edge. In
2228/// particular 'Dst' must not be reachable via another edge from 'Src'.
2230 DominatorTree *DT) {
2231 // While in theory it is interesting to consider the case in which Dst has
2232 // more than one predecessor, because Dst might be part of a loop which is
2233 // only reachable from Src, in practice it is pointless since at the time
2234 // GVN runs all such loops have preheaders, which means that Dst will have
2235 // been changed to have only one predecessor, namely Src.
2236 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
2237 assert((!Pred || Pred == E.getStart()) &&
2238 "No edge between these basic blocks!");
2239 return Pred != nullptr;
2240}
2241
2242void GVNPass::assignBlockRPONumber(Function &F) {
2243 BlockRPONumber.clear();
2244 uint32_t NextBlockNumber = 1;
2246 for (BasicBlock *BB : RPOT)
2247 BlockRPONumber[BB] = NextBlockNumber++;
2248 InvalidBlockRPONumbers = false;
2249}
2250
2251bool GVNPass::replaceOperandsForInBlockEquality(Instruction *Instr) const {
2252 bool Changed = false;
2253 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
2254 Value *Operand = Instr->getOperand(OpNum);
2255 auto it = ReplaceOperandsWithMap.find(Operand);
2256 if (it != ReplaceOperandsWithMap.end()) {
2257 LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
2258 << *it->second << " in instruction " << *Instr << '\n');
2259 Instr->setOperand(OpNum, it->second);
2260 Changed = true;
2261 }
2262 }
2263 return Changed;
2264}
2265
2266/// The given values are known to be equal in every block
2267/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
2268/// 'RHS' everywhere in the scope. Returns whether a change was made.
2269/// If DominatesByEdge is false, then it means that we will propagate the RHS
2270/// value starting from the end of Root.Start.
2271bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
2272 const BasicBlockEdge &Root,
2273 bool DominatesByEdge) {
2275 Worklist.push_back(std::make_pair(LHS, RHS));
2276 bool Changed = false;
2277 // For speed, compute a conservative fast approximation to
2278 // DT->dominates(Root, Root.getEnd());
2279 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2280
2281 while (!Worklist.empty()) {
2282 std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2283 LHS = Item.first; RHS = Item.second;
2284
2285 if (LHS == RHS)
2286 continue;
2287 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2288
2289 // Don't try to propagate equalities between constants.
2290 if (isa<Constant>(LHS) && isa<Constant>(RHS))
2291 continue;
2292
2293 // Prefer a constant on the right-hand side, or an Argument if no constants.
2294 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2295 std::swap(LHS, RHS);
2296 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2297
2298 // If there is no obvious reason to prefer the left-hand side over the
2299 // right-hand side, ensure the longest lived term is on the right-hand side,
2300 // so the shortest lived term will be replaced by the longest lived.
2301 // This tends to expose more simplifications.
2302 uint32_t LVN = VN.lookupOrAdd(LHS);
2303 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2304 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2305 // Move the 'oldest' value to the right-hand side, using the value number
2306 // as a proxy for age.
2307 uint32_t RVN = VN.lookupOrAdd(RHS);
2308 if (LVN < RVN) {
2309 std::swap(LHS, RHS);
2310 LVN = RVN;
2311 }
2312 }
2313
2314 // If value numbering later sees that an instruction in the scope is equal
2315 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
2316 // the invariant that instructions only occur in the leader table for their
2317 // own value number (this is used by removeFromLeaderTable), do not do this
2318 // if RHS is an instruction (if an instruction in the scope is morphed into
2319 // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2320 // using the leader table is about compiling faster, not optimizing better).
2321 // The leader table only tracks basic blocks, not edges. Only add to if we
2322 // have the simple case where the edge dominates the end.
2323 if (RootDominatesEnd && !isa<Instruction>(RHS))
2324 addToLeaderTable(LVN, RHS, Root.getEnd());
2325
2326 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
2327 // LHS always has at least one use that is not dominated by Root, this will
2328 // never do anything if LHS has only one use.
2329 if (!LHS->hasOneUse()) {
2330 unsigned NumReplacements =
2331 DominatesByEdge
2332 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
2333 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
2334
2335 Changed |= NumReplacements > 0;
2336 NumGVNEqProp += NumReplacements;
2337 // Cached information for anything that uses LHS will be invalid.
2338 if (MD)
2340 }
2341
2342 // Now try to deduce additional equalities from this one. For example, if
2343 // the known equality was "(A != B)" == "false" then it follows that A and B
2344 // are equal in the scope. Only boolean equalities with an explicit true or
2345 // false RHS are currently supported.
2346 if (!RHS->getType()->isIntegerTy(1))
2347 // Not a boolean equality - bail out.
2348 continue;
2349 ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2350 if (!CI)
2351 // RHS neither 'true' nor 'false' - bail out.
2352 continue;
2353 // Whether RHS equals 'true'. Otherwise it equals 'false'.
2354 bool isKnownTrue = CI->isMinusOne();
2355 bool isKnownFalse = !isKnownTrue;
2356
2357 // If "A && B" is known true then both A and B are known true. If "A || B"
2358 // is known false then both A and B are known false.
2359 Value *A, *B;
2360 if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
2361 (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
2362 Worklist.push_back(std::make_pair(A, RHS));
2363 Worklist.push_back(std::make_pair(B, RHS));
2364 continue;
2365 }
2366
2367 // If we are propagating an equality like "(A == B)" == "true" then also
2368 // propagate the equality A == B. When propagating a comparison such as
2369 // "(A >= B)" == "true", replace all instances of "A < B" with "false".
2370 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2371 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2372
2373 // If "A == B" is known true, or "A != B" is known false, then replace
2374 // A with B everywhere in the scope. For floating point operations, we
2375 // have to be careful since equality does not always imply equivalance.
2376 if ((isKnownTrue && impliesEquivalanceIfTrue(Cmp)) ||
2377 (isKnownFalse && impliesEquivalanceIfFalse(Cmp)))
2378 Worklist.push_back(std::make_pair(Op0, Op1));
2379
2380 // If "A >= B" is known true, replace "A < B" with false everywhere.
2381 CmpInst::Predicate NotPred = Cmp->getInversePredicate();
2382 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
2383 // Since we don't have the instruction "A < B" immediately to hand, work
2384 // out the value number that it would have and use that to find an
2385 // appropriate instruction (if any).
2386 uint32_t NextNum = VN.getNextUnusedValueNumber();
2387 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2388 // If the number we were assigned was brand new then there is no point in
2389 // looking for an instruction realizing it: there cannot be one!
2390 if (Num < NextNum) {
2391 Value *NotCmp = findLeader(Root.getEnd(), Num);
2392 if (NotCmp && isa<Instruction>(NotCmp)) {
2393 unsigned NumReplacements =
2394 DominatesByEdge
2395 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
2396 : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2397 Root.getStart());
2398 Changed |= NumReplacements > 0;
2399 NumGVNEqProp += NumReplacements;
2400 // Cached information for anything that uses NotCmp will be invalid.
2401 if (MD)
2402 MD->invalidateCachedPointerInfo(NotCmp);
2403 }
2404 }
2405 // Ensure that any instruction in scope that gets the "A < B" value number
2406 // is replaced with false.
2407 // The leader table only tracks basic blocks, not edges. Only add to if we
2408 // have the simple case where the edge dominates the end.
2409 if (RootDominatesEnd)
2410 addToLeaderTable(Num, NotVal, Root.getEnd());
2411
2412 continue;
2413 }
2414 }
2415
2416 return Changed;
2417}
2418
2419/// When calculating availability, handle an instruction
2420/// by inserting it into the appropriate sets
2421bool GVNPass::processInstruction(Instruction *I) {
2422 // Ignore dbg info intrinsics.
2423 if (isa<DbgInfoIntrinsic>(I))
2424 return false;
2425
2426 // If the instruction can be easily simplified then do so now in preference
2427 // to value numbering it. Value numbering often exposes redundancies, for
2428 // example if it determines that %y is equal to %x then the instruction
2429 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2430 const DataLayout &DL = I->getModule()->getDataLayout();
2431 if (Value *V = simplifyInstruction(I, {DL, TLI, DT, AC})) {
2432 bool Changed = false;
2433 if (!I->use_empty()) {
2434 // Simplification can cause a special instruction to become not special.
2435 // For example, devirtualization to a willreturn function.
2436 ICF->removeUsersOf(I);
2437 I->replaceAllUsesWith(V);
2438 Changed = true;
2439 }
2440 if (isInstructionTriviallyDead(I, TLI)) {
2442 Changed = true;
2443 }
2444 if (Changed) {
2445 if (MD && V->getType()->isPtrOrPtrVectorTy())
2447 ++NumGVNSimpl;
2448 return true;
2449 }
2450 }
2451
2452 if (auto *Assume = dyn_cast<AssumeInst>(I))
2453 return processAssumeIntrinsic(Assume);
2454
2455 if (LoadInst *Load = dyn_cast<LoadInst>(I)) {
2456 if (processLoad(Load))
2457 return true;
2458
2459 unsigned Num = VN.lookupOrAdd(Load);
2460 addToLeaderTable(Num, Load, Load->getParent());
2461 return false;
2462 }
2463
2464 // For conditional branches, we can perform simple conditional propagation on
2465 // the condition value itself.
2466 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2467 if (!BI->isConditional())
2468 return false;
2469
2470 if (isa<Constant>(BI->getCondition()))
2471 return processFoldableCondBr(BI);
2472
2473 Value *BranchCond = BI->getCondition();
2474 BasicBlock *TrueSucc = BI->getSuccessor(0);
2475 BasicBlock *FalseSucc = BI->getSuccessor(1);
2476 // Avoid multiple edges early.
2477 if (TrueSucc == FalseSucc)
2478 return false;
2479
2480 BasicBlock *Parent = BI->getParent();
2481 bool Changed = false;
2482
2484 BasicBlockEdge TrueE(Parent, TrueSucc);
2485 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2486
2488 BasicBlockEdge FalseE(Parent, FalseSucc);
2489 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2490
2491 return Changed;
2492 }
2493
2494 // For switches, propagate the case values into the case destinations.
2495 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2496 Value *SwitchCond = SI->getCondition();
2497 BasicBlock *Parent = SI->getParent();
2498 bool Changed = false;
2499
2500 // Remember how many outgoing edges there are to every successor.
2502 for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
2503 ++SwitchEdges[SI->getSuccessor(i)];
2504
2505 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2506 i != e; ++i) {
2507 BasicBlock *Dst = i->getCaseSuccessor();
2508 // If there is only a single edge, propagate the case value into it.
2509 if (SwitchEdges.lookup(Dst) == 1) {
2510 BasicBlockEdge E(Parent, Dst);
2511 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2512 }
2513 }
2514 return Changed;
2515 }
2516
2517 // Instructions with void type don't return a value, so there's
2518 // no point in trying to find redundancies in them.
2519 if (I->getType()->isVoidTy())
2520 return false;
2521
2522 uint32_t NextNum = VN.getNextUnusedValueNumber();
2523 unsigned Num = VN.lookupOrAdd(I);
2524
2525 // Allocations are always uniquely numbered, so we can save time and memory
2526 // by fast failing them.
2527 if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2528 addToLeaderTable(Num, I, I->getParent());
2529 return false;
2530 }
2531
2532 // If the number we were assigned was a brand new VN, then we don't
2533 // need to do a lookup to see if the number already exists
2534 // somewhere in the domtree: it can't!
2535 if (Num >= NextNum) {
2536 addToLeaderTable(Num, I, I->getParent());
2537 return false;
2538 }
2539
2540 // Perform fast-path value-number based elimination of values inherited from
2541 // dominators.
2542 Value *Repl = findLeader(I->getParent(), Num);
2543 if (!Repl) {
2544 // Failure, just remember this instance for future use.
2545 addToLeaderTable(Num, I, I->getParent());
2546 return false;
2547 } else if (Repl == I) {
2548 // If I was the result of a shortcut PRE, it might already be in the table
2549 // and the best replacement for itself. Nothing to do.
2550 return false;
2551 }
2552
2553 // Remove it!
2555 if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2558 return true;
2559}
2560
2561/// runOnFunction - This is the main transformation entry point for a function.
2562bool GVNPass::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2563 const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2565 OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2566 AC = &RunAC;
2567 DT = &RunDT;
2568 VN.setDomTree(DT);
2569 TLI = &RunTLI;
2570 VN.setAliasAnalysis(&RunAA);
2571 MD = RunMD;
2572 ImplicitControlFlowTracking ImplicitCFT;
2573 ICF = &ImplicitCFT;
2574 this->LI = LI;
2575 VN.setMemDep(MD);
2576 ORE = RunORE;
2577 InvalidBlockRPONumbers = true;
2578 MemorySSAUpdater Updater(MSSA);
2579 MSSAU = MSSA ? &Updater : nullptr;
2580
2581 bool Changed = false;
2582 bool ShouldContinue = true;
2583
2585 // Merge unconditional branches, allowing PRE to catch more
2586 // optimization opportunities.
2588 bool removedBlock = MergeBlockIntoPredecessor(&BB, &DTU, LI, MSSAU, MD);
2589 if (removedBlock)
2590 ++NumGVNBlocks;
2591
2592 Changed |= removedBlock;
2593 }
2594
2595 unsigned Iteration = 0;
2596 while (ShouldContinue) {
2597 LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2598 (void) Iteration;
2599 ShouldContinue = iterateOnFunction(F);
2600 Changed |= ShouldContinue;
2601 ++Iteration;
2602 }
2603
2604 if (isPREEnabled()) {
2605 // Fabricate val-num for dead-code in order to suppress assertion in
2606 // performPRE().
2607 assignValNumForDeadCode();
2608 bool PREChanged = true;
2609 while (PREChanged) {
2610 PREChanged = performPRE(F);
2611 Changed |= PREChanged;
2612 }
2613 }
2614
2615 // FIXME: Should perform GVN again after PRE does something. PRE can move
2616 // computations into blocks where they become fully redundant. Note that
2617 // we can't do this until PRE's critical edge splitting updates memdep.
2618 // Actually, when this happens, we should just fully integrate PRE into GVN.
2619
2620 cleanupGlobalSets();
2621 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2622 // iteration.
2623 DeadBlocks.clear();
2624
2625 if (MSSA && VerifyMemorySSA)
2626 MSSA->verifyMemorySSA();
2627
2628 return Changed;
2629}
2630
2631bool GVNPass::processBlock(BasicBlock *BB) {
2632 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2633 // (and incrementing BI before processing an instruction).
2634 assert(InstrsToErase.empty() &&
2635 "We expect InstrsToErase to be empty across iterations");
2636 if (DeadBlocks.count(BB))
2637 return false;
2638
2639 // Clearing map before every BB because it can be used only for single BB.
2640 ReplaceOperandsWithMap.clear();
2641 bool ChangedFunction = false;
2642
2643 // Since we may not have visited the input blocks of the phis, we can't
2644 // use our normal hash approach for phis. Instead, simply look for
2645 // obvious duplicates. The first pass of GVN will tend to create
2646 // identical phis, and the second or later passes can eliminate them.
2647 ChangedFunction |= EliminateDuplicatePHINodes(BB);
2648
2649 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2650 BI != BE;) {
2651 if (!ReplaceOperandsWithMap.empty())
2652 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2653 ChangedFunction |= processInstruction(&*BI);
2654
2655 if (InstrsToErase.empty()) {
2656 ++BI;
2657 continue;
2658 }
2659
2660 // If we need some instructions deleted, do it now.
2661 NumGVNInstr += InstrsToErase.size();
2662
2663 // Avoid iterator invalidation.
2664 bool AtStart = BI == BB->begin();
2665 if (!AtStart)
2666 --BI;
2667
2668 for (auto *I : InstrsToErase) {
2669 assert(I->getParent() == BB && "Removing instruction from wrong block?");
2670 LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2671 salvageKnowledge(I, AC);
2673 if (MD) MD->removeInstruction(I);
2674 if (MSSAU)
2675 MSSAU->removeMemoryAccess(I);
2676 LLVM_DEBUG(verifyRemoved(I));
2677 ICF->removeInstruction(I);
2678 I->eraseFromParent();
2679 }
2680 InstrsToErase.clear();
2681
2682 if (AtStart)
2683 BI = BB->begin();
2684 else
2685 ++BI;
2686 }
2687
2688 return ChangedFunction;
2689}
2690
2691// Instantiate an expression in a predecessor that lacked it.
2692bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2693 BasicBlock *Curr, unsigned int ValNo) {
2694 // Because we are going top-down through the block, all value numbers
2695 // will be available in the predecessor by the time we need them. Any
2696 // that weren't originally present will have been instantiated earlier
2697 // in this loop.
2698 bool success = true;
2699 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2700 Value *Op = Instr->getOperand(i);
2701 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2702 continue;
2703 // This could be a newly inserted instruction, in which case, we won't
2704 // find a value number, and should give up before we hurt ourselves.
2705 // FIXME: Rewrite the infrastructure to let it easier to value number
2706 // and process newly inserted instructions.
2707 if (!VN.exists(Op)) {
2708 success = false;
2709 break;
2710 }
2711 uint32_t TValNo =
2712 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2713 if (Value *V = findLeader(Pred, TValNo)) {
2714 Instr->setOperand(i, V);
2715 } else {
2716 success = false;
2717 break;
2718 }
2719 }
2720
2721 // Fail out if we encounter an operand that is not available in
2722 // the PRE predecessor. This is typically because of loads which
2723 // are not value numbered precisely.
2724 if (!success)
2725 return false;
2726
2727 Instr->insertBefore(Pred->getTerminator());
2728 Instr->setName(Instr->getName() + ".pre");
2729 Instr->setDebugLoc(Instr->getDebugLoc());
2730
2731 ICF->insertInstructionTo(Instr, Pred);
2732
2733 unsigned Num = VN.lookupOrAdd(Instr);
2734 VN.add(Instr, Num);
2735
2736 // Update the availability map to include the new instruction.
2737 addToLeaderTable(Num, Instr, Pred);
2738 return true;
2739}
2740
2741bool GVNPass::performScalarPRE(Instruction *CurInst) {
2742 if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2743 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2744 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2745 isa<DbgInfoIntrinsic>(CurInst))
2746 return false;
2747
2748 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2749 // sinking the compare again, and it would force the code generator to
2750 // move the i1 from processor flags or predicate registers into a general
2751 // purpose register.
2752 if (isa<CmpInst>(CurInst))
2753 return false;
2754
2755 // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2756 // sinking the addressing mode computation back to its uses. Extending the
2757 // GEP's live range increases the register pressure, and therefore it can
2758 // introduce unnecessary spills.
2759 //
2760 // This doesn't prevent Load PRE. PHI translation will make the GEP available
2761 // to the load by moving it to the predecessor block if necessary.
2762 if (isa<GetElementPtrInst>(CurInst))
2763 return false;
2764
2765 if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
2766 // We don't currently value number ANY inline asm calls.
2767 if (CallB->isInlineAsm())
2768 return false;
2769 // Don't do PRE on convergent calls.
2770 if (CallB->isConvergent())
2771 return false;
2772 }
2773
2774 uint32_t ValNo = VN.lookup(CurInst);
2775
2776 // Look for the predecessors for PRE opportunities. We're
2777 // only trying to solve the basic diamond case, where
2778 // a value is computed in the successor and one predecessor,
2779 // but not the other. We also explicitly disallow cases
2780 // where the successor is its own predecessor, because they're
2781 // more complicated to get right.
2782 unsigned NumWith = 0;
2783 unsigned NumWithout = 0;
2784 BasicBlock *PREPred = nullptr;
2785 BasicBlock *CurrentBlock = CurInst->getParent();
2786
2787 // Update the RPO numbers for this function.
2788 if (InvalidBlockRPONumbers)
2789 assignBlockRPONumber(*CurrentBlock->getParent());
2790
2792 for (BasicBlock *P : predecessors(CurrentBlock)) {
2793 // We're not interested in PRE where blocks with predecessors that are
2794 // not reachable.
2795 if (!DT->isReachableFromEntry(P)) {
2796 NumWithout = 2;
2797 break;
2798 }
2799 // It is not safe to do PRE when P->CurrentBlock is a loop backedge.
2800 assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2801 "Invalid BlockRPONumber map.");
2802 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock]) {
2803 NumWithout = 2;
2804 break;
2805 }
2806
2807 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2808 Value *predV = findLeader(P, TValNo);
2809 if (!predV) {
2810 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2811 PREPred = P;
2812 ++NumWithout;
2813 } else if (predV == CurInst) {
2814 /* CurInst dominates this predecessor. */
2815 NumWithout = 2;
2816 break;
2817 } else {
2818 predMap.push_back(std::make_pair(predV, P));
2819 ++NumWith;
2820 }
2821 }
2822
2823 // Don't do PRE when it might increase code size, i.e. when
2824 // we would need to insert instructions in more than one pred.
2825 if (NumWithout > 1 || NumWith == 0)
2826 return false;
2827
2828 // We may have a case where all predecessors have the instruction,
2829 // and we just need to insert a phi node. Otherwise, perform
2830 // insertion.
2831 Instruction *PREInstr = nullptr;
2832
2833 if (NumWithout != 0) {
2834 if (!isSafeToSpeculativelyExecute(CurInst)) {
2835 // It is only valid to insert a new instruction if the current instruction
2836 // is always executed. An instruction with implicit control flow could
2837 // prevent us from doing it. If we cannot speculate the execution, then
2838 // PRE should be prohibited.
2839 if (ICF->isDominatedByICFIFromSameBlock(CurInst))
2840 return false;
2841 }
2842
2843 // Don't do PRE across indirect branch.
2844 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2845 return false;
2846
2847 // We can't do PRE safely on a critical edge, so instead we schedule
2848 // the edge to be split and perform the PRE the next time we iterate
2849 // on the function.
2850 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2851 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2852 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2853 return false;
2854 }
2855 // We need to insert somewhere, so let's give it a shot
2856 PREInstr = CurInst->clone();
2857 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2858 // If we failed insertion, make sure we remove the instruction.
2859 LLVM_DEBUG(verifyRemoved(PREInstr));
2860 PREInstr->deleteValue();
2861 return false;
2862 }
2863 }
2864
2865 // Either we should have filled in the PRE instruction, or we should
2866 // not have needed insertions.
2867 assert(PREInstr != nullptr || NumWithout == 0);
2868
2869 ++NumGVNPRE;
2870
2871 // Create a PHI to make the value available in this block.
2872 PHINode *Phi =
2873 PHINode::Create(CurInst->getType(), predMap.size(),
2874 CurInst->getName() + ".pre-phi", &CurrentBlock->front());
2875 for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
2876 if (Value *V = predMap[i].first) {
2877 // If we use an existing value in this phi, we have to patch the original
2878 // value because the phi will be used to replace a later value.
2879 patchReplacementInstruction(CurInst, V);
2880 Phi->addIncoming(V, predMap[i].second);
2881 } else
2882 Phi->addIncoming(PREInstr, PREPred);
2883 }
2884
2885 VN.add(Phi, ValNo);
2886 // After creating a new PHI for ValNo, the phi translate result for ValNo will
2887 // be changed, so erase the related stale entries in phi translate cache.
2888 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
2889 addToLeaderTable(ValNo, Phi, CurrentBlock);
2890 Phi->setDebugLoc(CurInst->getDebugLoc());
2891 CurInst->replaceAllUsesWith(Phi);
2892 if (MD && Phi->getType()->isPtrOrPtrVectorTy())
2894 VN.erase(CurInst);
2895 removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2896
2897 LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2898 if (MD)
2899 MD->removeInstruction(CurInst);
2900 if (MSSAU)
2901 MSSAU->removeMemoryAccess(CurInst);
2902 LLVM_DEBUG(verifyRemoved(CurInst));
2903 // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
2904 // some assertion failures.
2905 ICF->removeInstruction(CurInst);
2906 CurInst->eraseFromParent();
2907 ++NumGVNInstr;
2908
2909 return true;
2910}
2911
2912/// Perform a purely local form of PRE that looks for diamond
2913/// control flow patterns and attempts to perform simple PRE at the join point.
2914bool GVNPass::performPRE(Function &F) {
2915 bool Changed = false;
2916 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
2917 // Nothing to PRE in the entry block.
2918 if (CurrentBlock == &F.getEntryBlock())
2919 continue;
2920
2921 // Don't perform PRE on an EH pad.
2922 if (CurrentBlock->isEHPad())
2923 continue;
2924
2925 for (BasicBlock::iterator BI = CurrentBlock->begin(),
2926 BE = CurrentBlock->end();
2927 BI != BE;) {
2928 Instruction *CurInst = &*BI++;
2929 Changed |= performScalarPRE(CurInst);
2930 }
2931 }
2932
2933 if (splitCriticalEdges())
2934 Changed = true;
2935
2936 return Changed;
2937}
2938
2939/// Split the critical edge connecting the given two blocks, and return
2940/// the block inserted to the critical edge.
2941BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
2942 // GVN does not require loop-simplify, do not try to preserve it if it is not
2943 // possible.
2945 Pred, Succ,
2946 CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
2947 if (BB) {
2948 if (MD)
2950 InvalidBlockRPONumbers = true;
2951 }
2952 return BB;
2953}
2954
2955/// Split critical edges found during the previous
2956/// iteration that may enable further optimization.
2957bool GVNPass::splitCriticalEdges() {
2958 if (toSplit.empty())
2959 return false;
2960
2961 bool Changed = false;
2962 do {
2963 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
2964 Changed |= SplitCriticalEdge(Edge.first, Edge.second,
2965 CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
2966 nullptr;
2967 } while (!toSplit.empty());
2968 if (Changed) {
2969 if (MD)
2971 InvalidBlockRPONumbers = true;
2972 }
2973 return Changed;
2974}
2975
2976/// Executes one iteration of GVN
2977bool GVNPass::iterateOnFunction(Function &F) {
2978 cleanupGlobalSets();
2979
2980 // Top-down walk of the dominator tree
2981 bool Changed = false;
2982 // Needed for value numbering with phi construction to work.
2983 // RPOT walks the graph in its constructor and will not be invalidated during
2984 // processBlock.
2986
2987 for (BasicBlock *BB : RPOT)
2988 Changed |= processBlock(BB);
2989
2990 return Changed;
2991}
2992
2993void GVNPass::cleanupGlobalSets() {
2994 VN.clear();
2995 LeaderTable.clear();
2996 BlockRPONumber.clear();
2997 TableAllocator.Reset();
2998 ICF->clear();
2999 InvalidBlockRPONumbers = true;
3000}
3001
3002/// Verify that the specified instruction does not occur in our
3003/// internal data structures.
3004void GVNPass::verifyRemoved(const Instruction *Inst) const {
3005 VN.verifyRemoved(Inst);
3006
3007 // Walk through the value number scope to make sure the instruction isn't
3008 // ferreted away in it.
3009 for (const auto &I : LeaderTable) {
3010 const LeaderTableEntry *Node = &I.second;
3011 assert(Node->Val != Inst && "Inst still in value numbering scope!");
3012
3013 while (Node->Next) {
3014 Node = Node->Next;
3015 assert(Node->Val != Inst && "Inst still in value numbering scope!");
3016 }
3017 }
3018}
3019
3020/// BB is declared dead, which implied other blocks become dead as well. This
3021/// function is to add all these blocks to "DeadBlocks". For the dead blocks'
3022/// live successors, update their phi nodes by replacing the operands
3023/// corresponding to dead blocks with UndefVal.
3024void GVNPass::addDeadBlock(BasicBlock *BB) {
3027
3028 NewDead.push_back(BB);
3029 while (!NewDead.empty()) {
3030 BasicBlock *D = NewDead.pop_back_val();
3031 if (DeadBlocks.count(D))
3032 continue;
3033
3034 // All blocks dominated by D are dead.
3036 DT->getDescendants(D, Dom);
3037 DeadBlocks.insert(Dom.begin(), Dom.end());
3038
3039 // Figure out the dominance-frontier(D).
3040 for (BasicBlock *B : Dom) {
3041 for (BasicBlock *S : successors(B)) {
3042 if (DeadBlocks.count(S))
3043 continue;
3044
3045 bool AllPredDead = true;
3046 for (BasicBlock *P : predecessors(S))
3047 if (!DeadBlocks.count(P)) {
3048 AllPredDead = false;
3049 break;
3050 }
3051
3052 if (!AllPredDead) {
3053 // S could be proved dead later on. That is why we don't update phi
3054 // operands at this moment.
3055 DF.insert(S);
3056 } else {
3057 // While S is not dominated by D, it is dead by now. This could take
3058 // place if S already have a dead predecessor before D is declared
3059 // dead.
3060 NewDead.push_back(S);
3061 }
3062 }
3063 }
3064 }
3065
3066 // For the dead blocks' live successors, update their phi nodes by replacing
3067 // the operands corresponding to dead blocks with UndefVal.
3068 for (BasicBlock *B : DF) {
3069 if (DeadBlocks.count(B))
3070 continue;
3071
3072 // First, split the critical edges. This might also create additional blocks
3073 // to preserve LoopSimplify form and adjust edges accordingly.
3075 for (BasicBlock *P : Preds) {
3076 if (!DeadBlocks.count(P))
3077 continue;
3078
3080 isCriticalEdge(P->getTerminator(), B)) {
3081 if (BasicBlock *S = splitCriticalEdges(P, B))
3082 DeadBlocks.insert(P = S);
3083 }
3084 }
3085
3086 // Now poison the incoming values from the dead predecessors.
3087 for (BasicBlock *P : predecessors(B)) {
3088 if (!DeadBlocks.count(P))
3089 continue;
3090 for (PHINode &Phi : B->phis()) {
3092 if (MD)
3094 }
3095 }
3096 }
3097}
3098
3099// If the given branch is recognized as a foldable branch (i.e. conditional
3100// branch with constant condition), it will perform following analyses and
3101// transformation.
3102// 1) If the dead out-coming edge is a critical-edge, split it. Let
3103// R be the target of the dead out-coming edge.
3104// 1) Identify the set of dead blocks implied by the branch's dead outcoming
3105// edge. The result of this step will be {X| X is dominated by R}
3106// 2) Identify those blocks which haves at least one dead predecessor. The
3107// result of this step will be dominance-frontier(R).
3108// 3) Update the PHIs in DF(R) by replacing the operands corresponding to
3109// dead blocks with "UndefVal" in an hope these PHIs will optimized away.
3110//
3111// Return true iff *NEW* dead code are found.
3112bool GVNPass::processFoldableCondBr(BranchInst *BI) {
3113 if (!BI || BI->isUnconditional())
3114 return false;
3115
3116 // If a branch has two identical successors, we cannot declare either dead.
3117 if (BI->getSuccessor(0) == BI->getSuccessor(1))
3118 return false;
3119
3120 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
3121 if (!Cond)
3122 return false;
3123
3124 BasicBlock *DeadRoot =
3125 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
3126 if (DeadBlocks.count(DeadRoot))
3127 return false;
3128
3129 if (!DeadRoot->getSinglePredecessor())
3130 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
3131
3132 addDeadBlock(DeadRoot);
3133 return true;
3134}
3135
3136// performPRE() will trigger assert if it comes across an instruction without
3137// associated val-num. As it normally has far more live instructions than dead
3138// instructions, it makes more sense just to "fabricate" a val-number for the
3139// dead code than checking if instruction involved is dead or not.
3140void GVNPass::assignValNumForDeadCode() {
3141 for (BasicBlock *BB : DeadBlocks) {
3142 for (Instruction &Inst : *BB) {
3143 unsigned ValNum = VN.lookupOrAdd(&Inst);
3144 addToLeaderTable(ValNum, &Inst, BB);
3145 }
3146 }
3147}
3148
3150public:
3151 static char ID; // Pass identification, replacement for typeid
3152
3153 explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep)
3154 : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) {
3156 }
3157
3158 bool runOnFunction(Function &F) override {
3159 if (skipFunction(F))
3160 return false;
3161
3162 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
3163
3164 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3165 return Impl.runImpl(
3166 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
3167 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3168 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
3169 getAnalysis<AAResultsWrapperPass>().getAAResults(),
3170 Impl.isMemDepEnabled()
3171 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3172 : nullptr,
3173 LIWP ? &LIWP->getLoopInfo() : nullptr,
3174 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3175 MSSAWP ? &MSSAWP->getMSSA() : nullptr);
3176 }
3177
3178 void getAnalysisUsage(AnalysisUsage &AU) const override {
3183 if (Impl.isMemDepEnabled())
3192 }
3193
3194private:
3195 GVNPass Impl;
3196};
3197
3198char GVNLegacyPass::ID = 0;
3199
3200INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
3209
3210// The public interface to this file...
3211FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
3212 return new GVNLegacyPass(NoMemDepAnalysis);
3213}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file contains the simple types necessary to represent the attributes associated with functions a...
SmallVector< MachineOperand, 4 > Cond
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:492
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool hasUsersIn(Value *V, BasicBlock *BB)
Definition: GVN.cpp:1860
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
Definition: GVN.cpp:1051
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
Definition: GVN.cpp:1705
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &gvn)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
Definition: GVN.cpp:937
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
Definition: GVN.cpp:2229
static bool impliesEquivalanceIfFalse(CmpInst *Cmp)
Definition: GVN.cpp:1833
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
Definition: GVN.cpp:819
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
Definition: GVN.cpp:1113
static bool isLifetimeStart(const Instruction *Inst)
Definition: GVN.cpp:1032
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
Definition: GVN.cpp:2004
static bool impliesEquivalanceIfTrue(CmpInst *Cmp)
Definition: GVN.cpp:1807
AvailabilityState
Definition: GVN.cpp:799
@ Unavailable
We know the block is not fully available. This is a fixpoint.
@ Available
We know the block is fully available. This is a fixpoint.
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
Definition: GVN.cpp:1040
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
Definition: InlineInfo.cpp:109
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition: Lint.cpp:524
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
#define P(N)
ppc ctr loops PowerPC CTR Loops Verify
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
@ SI
@ VI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This defines the Use class.
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:77
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition: APInt.h:75
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:793
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:152
iterator begin() const
Definition: ArrayRef.h:151
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
const BasicBlock * getEnd() const
Definition: Dominators.h:116
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator end()
Definition: BasicBlock.h:316
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:314
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:208
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:284
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:35
bool isEHPad() const
Return true if this basic block is an exception handling block.
Definition: BasicBlock.h:512
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Value * getRHS() const
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Value * getLHS() const
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
Definition: Allocator.h:123
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1353
unsigned arg_size() const
Definition: InstrTypes.h:1351
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:708
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1054
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:718
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:721
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:726
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:729
@ ICMP_EQ
equal
Definition: InstrTypes.h:739
@ ICMP_NE
not equal
Definition: InstrTypes.h:740
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:734
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:859
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:205
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:833
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:840
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
iterator end()
Definition: DenseMap.h:84
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
void getDescendants(NodeT *R, SmallVectorImpl< NodeT * > &Result) const
Get all nodes dominated by R, including R itself.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
Class representing an expression and its matching format.
This instruction extracts a struct member or array element value from an aggregate value.
unsigned getNumIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_begin() const
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
Definition: GVN.h:151
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Returns the value number of the given comparison, assigning it a new number if it did not have one be...
Definition: GVN.cpp:680
uint32_t getNextUnusedValueNumber()
Definition: GVN.h:213
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
Definition: GVN.cpp:588
uint32_t lookup(Value *V, bool Verify=true) const
Returns the value number of the specified value.
Definition: GVN.cpp:667
void setAliasAnalysis(AAResults *A)
Definition: GVN.h:209
void clear()
Remove all entries from the ValueTable.
Definition: GVN.cpp:688
bool exists(Value *V) const
Returns true if a value number exists for the specified value.
Definition: GVN.cpp:582
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock, uint32_t Num, GVNPass &Gvn)
Wrap phiTranslateImpl to provide caching functionality.
Definition: GVN.cpp:2088
void setMemDep(MemoryDependenceResults *M)
Definition: GVN.h:211
void erase(Value *v)
Remove a value from the value numbering.
Definition: GVN.cpp:700
void add(Value *V, uint32_t num)
add - Insert a value into the table with a specified value number.
Definition: GVN.cpp:457
void setDomTree(DominatorTree *D)
Definition: GVN.h:212
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock)
Erase stale entry from phiTranslate cache so phiTranslate can be computed again.
Definition: GVN.cpp:2192
void verifyRemoved(const Value *) const
verifyRemoved - Verify that the value is removed from all internal data structures.
Definition: GVN.cpp:710
The core GVN pass object.
Definition: GVN.h:117
friend class gvn::GVNLegacyPass
Definition: GVN.h:218
bool isPREEnabled() const
Definition: GVN.cpp:721
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
Definition: GVN.cpp:742
AAResults * getAliasAnalysis() const
Definition: GVN.h:139
bool isLoadPREEnabled() const
Definition: GVN.cpp:725
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: GVN.cpp:770
DominatorTree & getDominatorTree() const
Definition: GVN.h:138
bool isLoadInLoopPREEnabled() const
Definition: GVN.cpp:729
bool isLoadPRESplitBackedgeEnabled() const
Definition: GVN.cpp:733
void markInstructionForDeletion(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
Definition: GVN.h:133
bool isMemDepEnabled() const
Definition: GVN.cpp:738
MemoryDependenceResults & getMemDep() const
Definition: GVN.h:140
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
Legacy wrapper pass to provide the GlobalsAAResult object.
This class allows to keep track on instructions with implicit control flow.
bool isDominatedByICFIFromSameBlock(const Instruction *Insn)
Returns true if the first ICFI of Insn's block exists and dominates Insn.
bool hasICF(const BasicBlock *BB)
Returns true if at least one instruction from the given basic block has implicit control flow.
void clear()
Invalidates all information from this tracking.
void removeUsersOf(const Instruction *Inst)
Notifies this tracking that we are going to replace all uses of Inst.
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Notifies this tracking that we are going to insert a new instruction Inst to the basic block BB.
void removeInstruction(const Instruction *Inst)
Notifies this tracking that we are going to remove the instruction Inst It makes all necessary update...
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:88
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:358
const BasicBlock * getParent() const
Definition: Instruction.h:90
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
Definition: Instruction.h:171
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:168
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:355
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:1268
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Definition: LoopInfo.h:992
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:1293
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:547
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
size_type size() const
Definition: MapVector.h:61
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
An analysis that produces MemoryDependenceResults for a function.
Provides a lazy, caching interface for making common memory aliasing information queries,...
std::vector< NonLocalDepEntry > NonLocalDepInfo
void invalidateCachedPredecessors()
Clears the PredIteratorCache info.
void invalidateCachedPointerInfo(Value *Ptr)
Invalidates cached information about the specified pointer, because it may be too conservative in mem...
std::optional< int32_t > getClobberOffset(LoadInst *DepInst) const
Return the clobber offset to dependent instruction.
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
void getNonLocalPointerDependency(Instruction *QueryInst, SmallVectorImpl< NonLocalDepResult > &Result)
Perform a full dependency query for an access to the QueryInst's specified memory location,...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:936
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before or after an existing MemoryAccess.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block, with a specified clobbering defin...
void insertUse(MemoryUse *Use, bool RenameUses=false)
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:986
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:700
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:757
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1862
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:717
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:741
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:252
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:262
This is an entry in the NonLocalDepInfo cache.
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
bool allowExtraAnalysis(StringRef PassName) const
Whether we allow for extra compile-time budget to perform more analysis to produce fewer false positi...
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PHITransAddr - An address value which tracks and handles phi translation.
Definition: PHITransAddr.h:35
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1758
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:173
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:39
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Definition: SSAUpdater.cpp:53
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
Definition: SSAUpdater.cpp:98
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
Definition: SSAUpdater.cpp:62
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
Definition: SSAUpdater.cpp:70
This class represents the LLVM 'select' instruction.
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:301
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
bool empty() const
Definition: SmallSet.h:157
bool erase(const T &V)
Definition: SmallSet.h:205
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:177
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
char front() const
front - Get the first character in the string.
Definition: StringRef.h:140
Multiway switch.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:264
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:231
bool isOpaquePointerTy() const
True if this is an instance of an opaque PointerType.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1739
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:375
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
Definition: Value.cpp:781
void deleteValue()
Delete a pointer to a generic Value.
Definition: Value.cpp:109
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
Represents an op.with.overflow intrinsic.
An efficient, type-erasing, non-owning reference to a callable.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: GVN.cpp:3178
GVNLegacyPass(bool NoMemDepAnalysis=!GVNEnableMemDep)
Definition: GVN.cpp:3153
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition: GVN.cpp:3158
An opaque object representing a hash code.
Definition: Hashing.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::unique_ptr< ValueIDNum[]> ValueTable
Type for a table of values in a block.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:26
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Definition: VNCoercion.cpp:211
Value * getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingLoad returned an offset, this function can be used to actually perform th...
Definition: VNCoercion.cpp:463
Value * getStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingStore returned an offset, this function can be used to actually perform t...
Definition: VNCoercion.cpp:445
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
Definition: VNCoercion.cpp:520
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
Definition: VNCoercion.cpp:317
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
Definition: VNCoercion.cpp:352
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
Definition: VNCoercion.cpp:18
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
constexpr double e
Definition: MathExtras.h:31
@ FalseVal
Definition: TGLexer.h:62
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:102
hash_code hash_value(const FixedPointSemantics &Val)
Definition: APFixedPoint.h:128
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
Definition: CFG.cpp:79
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1367
auto successors(const MachineBasicBlock *BB)
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:99
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:2092
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:732
void initializeGVNLegacyPassPass(PassRegistry &)
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1789
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:398
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
Definition: Local.cpp:2803
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
Definition: Local.cpp:2869
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:89
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
Definition: GVN.cpp:3211
bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
Definition: CFG.cpp:95
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:184
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1939
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:613
bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition: Local.cpp:1374
bool isAssumeWithEmptyBundle(AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:491
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:231
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:651
Option class for critical edge splitting.
static GVNPass::Expression getTombstoneKey()
Definition: GVN.cpp:163
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
Definition: GVN.cpp:171
static unsigned getHashValue(const GVNPass::Expression &e)
Definition: GVN.cpp:165
static GVNPass::Expression getEmptyKey()
Definition: GVN.cpp:162
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:51
A set of parameters to control various transforms performed by GVN pass.
Definition: GVN.h:74
std::optional< bool > AllowLoadPRESplitBackedge
Definition: GVN.h:78
std::optional< bool > AllowPRE
Definition: GVN.h:75
std::optional< bool > AllowLoadInLoopPRE
Definition: GVN.h:77
std::optional< bool > AllowMemDep
Definition: GVN.h:79
std::optional< bool > AllowLoadPRE
Definition: GVN.h:76
SmallVector< uint32_t, 4 > varargs
Definition: GVN.cpp:136
bool operator==(const Expression &other) const
Definition: GVN.cpp:140
friend hash_code hash_value(const Expression &Value)
Definition: GVN.cpp:152
Expression(uint32_t o=~2U)
Definition: GVN.cpp:138
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:371
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
Definition: GVN.cpp:280
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
Definition: GVN.cpp:294
static AvailableValueInBlock getUndef(BasicBlock *BB)
Definition: GVN.cpp:299
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
Definition: GVN.cpp:287
Value * MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const
Emit code at the end of this block to adjust the value defined here to the specified type.
Definition: GVN.cpp:310
AvailableValue AV
AV - The actual available value.
Definition: GVN.cpp:285
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
Definition: GVN.cpp:303
BasicBlock * BB
BB - The basic block in question.
Definition: GVN.cpp:282
Represents a particular available value that we know how to materialize.
Definition: GVN.cpp:183
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
Definition: GVN.cpp:200
bool isSimpleValue() const
Definition: GVN.cpp:246
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
Definition: GVN.cpp:236
bool isCoercedLoadValue() const
Definition: GVN.cpp:247
static AvailableValue get(Value *V, unsigned Offset=0)
Definition: GVN.cpp:204
ValType Kind
Kind of the live-out value.
Definition: GVN.cpp:197
LoadInst * getCoercedLoadValue() const
Definition: GVN.cpp:257
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
Definition: GVN.cpp:220
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
Definition: GVN.cpp:212
bool isUndefValue() const
Definition: GVN.cpp:249
bool isSelectValue() const
Definition: GVN.cpp:250
Value * Val
Val - The value that is live out of the block.
Definition: GVN.cpp:195
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
Definition: GVN.cpp:202
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt, GVNPass &gvn) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.
Definition: GVN.cpp:980
static AvailableValue getUndef()
Definition: GVN.cpp:228
SelectInst * getSelectValue() const
Definition: GVN.cpp:267
Value * getSimpleValue() const
Definition: GVN.cpp:252
bool isMemIntrinValue() const
Definition: GVN.cpp:248
MemIntrinsic * getMemIntrinValue() const
Definition: GVN.cpp:262