85using namespace PatternMatch;
87#define DEBUG_TYPE "gvn"
89STATISTIC(NumGVNInstr,
"Number of instructions deleted");
91STATISTIC(NumGVNPRE,
"Number of instructions PRE'd");
93STATISTIC(NumGVNSimpl,
"Number of instructions simplified");
94STATISTIC(NumGVNEqProp,
"Number of equalities propagated");
96STATISTIC(NumPRELoopLoad,
"Number of loop loads PRE'd");
98 "Number of loads moved to predecessor of a critical edge in PRE");
100STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
101 "Number of blocks speculated as available in "
102 "IsValueFullyAvailableInBlock(), max");
104 "Number of times we we reached gvn-max-block-speculations cut-off "
105 "preventing further exploration");
118 cl::desc(
"Max number of dependences to attempt Load PRE (default = 100)"));
123 cl::desc(
"Max number of blocks we're willing to speculate on (and recurse "
124 "into) when deducing if a value is fully available or not in GVN "
129 cl::desc(
"Max number of visited instructions when trying to find "
130 "dominating value of select dependency (default = 100)"));
134 cl::desc(
"Max number of instructions to scan in each basic block in GVN "
266 return cast<LoadInst>(
Val);
271 return cast<MemIntrinsic>(
Val);
276 return cast<SelectInst>(
Val);
297 Res.
AV = std::move(
AV);
328 e.type =
I->getType();
329 e.opcode =
I->getOpcode();
334 e.varargs.push_back(
lookupOrAdd(GCR->getOperand(0)));
335 e.varargs.push_back(
lookupOrAdd(GCR->getBasePtr()));
336 e.varargs.push_back(
lookupOrAdd(GCR->getDerivedPtr()));
338 for (
Use &
Op :
I->operands())
341 if (
I->isCommutative()) {
346 assert(
I->getNumOperands() >= 2 &&
"Unsupported commutative instruction!");
347 if (
e.varargs[0] >
e.varargs[1])
349 e.commutative =
true;
352 if (
auto *
C = dyn_cast<CmpInst>(
I)) {
355 if (
e.varargs[0] >
e.varargs[1]) {
359 e.opcode = (
C->getOpcode() << 8) | Predicate;
360 e.commutative =
true;
361 }
else if (
auto *
E = dyn_cast<InsertValueInst>(
I)) {
362 e.varargs.append(
E->idx_begin(),
E->idx_end());
363 }
else if (
auto *SVI = dyn_cast<ShuffleVectorInst>(
I)) {
365 e.varargs.append(ShuffleMask.
begin(), ShuffleMask.
end());
373 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
374 "Not a comparison!");
377 e.varargs.push_back(lookupOrAdd(LHS));
378 e.varargs.push_back(lookupOrAdd(RHS));
381 if (
e.varargs[0] >
e.varargs[1]) {
385 e.opcode = (Opcode << 8) | Predicate;
386 e.commutative =
true;
392 assert(EI &&
"Not an ExtractValueInst?");
403 e.varargs.push_back(lookupOrAdd(WO->
getLHS()));
404 e.varargs.push_back(lookupOrAdd(WO->
getRHS()));
412 e.varargs.push_back(lookupOrAdd(
Op));
421 Type *PtrTy =
GEP->getType()->getScalarType();
423 unsigned BitWidth =
DL.getIndexTypeSizeInBits(PtrTy);
426 if (
GEP->collectOffset(
DL,
BitWidth, VariableOffsets, ConstantOffset)) {
430 E.opcode =
GEP->getOpcode();
432 E.varargs.push_back(lookupOrAdd(
GEP->getPointerOperand()));
433 for (
const auto &Pair : VariableOffsets) {
434 E.varargs.push_back(lookupOrAdd(Pair.first));
437 if (!ConstantOffset.isZero())
443 E.opcode =
GEP->getOpcode();
444 E.type =
GEP->getSourceElementType();
446 E.varargs.push_back(lookupOrAdd(
Op));
455GVNPass::ValueTable::ValueTable() =
default;
456GVNPass::ValueTable::ValueTable(
const ValueTable &) =
default;
457GVNPass::ValueTable::ValueTable(
ValueTable &&) =
default;
458GVNPass::ValueTable::~ValueTable() =
default;
464 valueNumbering.insert(std::make_pair(V, num));
465 if (
PHINode *PN = dyn_cast<PHINode>(V))
466 NumberingPhi[num] = PN;
477 if (
C->getFunction()->isPresplitCoroutine()) {
478 valueNumbering[
C] = nextValueNumber;
479 return nextValueNumber++;
485 if (
C->isConvergent()) {
486 valueNumbering[
C] = nextValueNumber;
487 return nextValueNumber++;
490 if (AA->doesNotAccessMemory(
C)) {
492 uint32_t e = assignExpNewValueNum(exp).first;
493 valueNumbering[
C] = e;
497 if (MD && AA->onlyReadsMemory(
C)) {
499 auto ValNum = assignExpNewValueNum(exp);
501 valueNumbering[
C] = ValNum.first;
508 valueNumbering[
C] = nextValueNumber;
509 return nextValueNumber++;
512 if (local_dep.
isDef()) {
517 if (!local_cdep || local_cdep->
arg_size() !=
C->arg_size()) {
518 valueNumbering[
C] = nextValueNumber;
519 return nextValueNumber++;
522 for (
unsigned i = 0, e =
C->arg_size(); i < e; ++i) {
523 uint32_t c_vn = lookupOrAdd(
C->getArgOperand(i));
526 valueNumbering[
C] = nextValueNumber;
527 return nextValueNumber++;
532 valueNumbering[
C] =
v;
545 if (
I.getResult().isNonLocal())
550 if (!
I.getResult().isDef() || cdep !=
nullptr) {
555 CallInst *NonLocalDepCall = dyn_cast<CallInst>(
I.getResult().getInst());
558 cdep = NonLocalDepCall;
567 valueNumbering[
C] = nextValueNumber;
568 return nextValueNumber++;
572 valueNumbering[
C] = nextValueNumber;
573 return nextValueNumber++;
575 for (
unsigned i = 0, e =
C->arg_size(); i < e; ++i) {
576 uint32_t c_vn = lookupOrAdd(
C->getArgOperand(i));
579 valueNumbering[
C] = nextValueNumber;
580 return nextValueNumber++;
585 valueNumbering[
C] =
v;
589 valueNumbering[
C] = nextValueNumber;
590 return nextValueNumber++;
594bool GVNPass::ValueTable::exists(
Value *V)
const {
595 return valueNumbering.count(V) != 0;
602 if (VI != valueNumbering.end())
605 auto *
I = dyn_cast<Instruction>(V);
607 valueNumbering[V] = nextValueNumber;
608 return nextValueNumber++;
612 switch (
I->getOpcode()) {
613 case Instruction::Call:
614 return lookupOrAddCall(cast<CallInst>(
I));
615 case Instruction::FNeg:
616 case Instruction::Add:
617 case Instruction::FAdd:
618 case Instruction::Sub:
619 case Instruction::FSub:
620 case Instruction::Mul:
621 case Instruction::FMul:
622 case Instruction::UDiv:
623 case Instruction::SDiv:
624 case Instruction::FDiv:
625 case Instruction::URem:
626 case Instruction::SRem:
627 case Instruction::FRem:
628 case Instruction::Shl:
629 case Instruction::LShr:
630 case Instruction::AShr:
631 case Instruction::And:
632 case Instruction::Or:
633 case Instruction::Xor:
634 case Instruction::ICmp:
635 case Instruction::FCmp:
636 case Instruction::Trunc:
637 case Instruction::ZExt:
638 case Instruction::SExt:
639 case Instruction::FPToUI:
640 case Instruction::FPToSI:
641 case Instruction::UIToFP:
642 case Instruction::SIToFP:
643 case Instruction::FPTrunc:
644 case Instruction::FPExt:
645 case Instruction::PtrToInt:
646 case Instruction::IntToPtr:
647 case Instruction::AddrSpaceCast:
648 case Instruction::BitCast:
649 case Instruction::Select:
650 case Instruction::Freeze:
651 case Instruction::ExtractElement:
652 case Instruction::InsertElement:
653 case Instruction::ShuffleVector:
654 case Instruction::InsertValue:
657 case Instruction::GetElementPtr:
658 exp = createGEPExpr(cast<GetElementPtrInst>(
I));
660 case Instruction::ExtractValue:
661 exp = createExtractvalueExpr(cast<ExtractValueInst>(
I));
663 case Instruction::PHI:
664 valueNumbering[V] = nextValueNumber;
665 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
666 return nextValueNumber++;
668 valueNumbering[V] = nextValueNumber;
669 return nextValueNumber++;
672 uint32_t e = assignExpNewValueNum(exp).first;
673 valueNumbering[V] = e;
682 assert(VI != valueNumbering.end() &&
"Value not numbered?");
685 return (VI != valueNumbering.end()) ? VI->second : 0;
692uint32_t GVNPass::ValueTable::lookupOrAddCmp(
unsigned Opcode,
696 return assignExpNewValueNum(exp).first;
700void GVNPass::ValueTable::clear() {
701 valueNumbering.clear();
702 expressionNumbering.clear();
703 NumberingPhi.clear();
704 PhiTranslateTable.clear();
712void GVNPass::ValueTable::erase(
Value *V) {
713 uint32_t Num = valueNumbering.lookup(V);
714 valueNumbering.erase(V);
717 NumberingPhi.erase(Num);
722void GVNPass::ValueTable::verifyRemoved(
const Value *V)
const {
723 assert(!valueNumbering.contains(V) &&
724 "Inst still occurs in value numbering map!");
766 bool Changed = runImpl(
F, AC, DT, TLI, AA, MemDep, LI, &ORE,
767 MSSA ? &MSSA->getMSSA() :
nullptr);
783 OS, MapClassName2PassName);
786 if (Options.
AllowPRE != std::nullopt)
787 OS << (*Options.
AllowPRE ?
"" :
"no-") <<
"pre;";
792 <<
"split-backedge-load-pre;";
798#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
802 errs() <<
I.first <<
"\n";
833 std::optional<BasicBlock *> UnavailableBB;
837 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
845 while (!Worklist.
empty()) {
849 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator,
bool>
IV =
851 CurrBB, AvailabilityState::SpeculativelyAvailable);
856 if (State == AvailabilityState::Unavailable) {
857 UnavailableBB = CurrBB;
868 ++NumNewNewSpeculativelyAvailableBBs;
874 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
875 State = AvailabilityState::Unavailable;
876 UnavailableBB = CurrBB;
882 NewSpeculativelyAvailableBBs.
insert(CurrBB);
889 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
890 NumNewNewSpeculativelyAvailableBBs);
895 auto MarkAsFixpointAndEnqueueSuccessors =
897 auto It = FullyAvailableBlocks.
find(BB);
898 if (It == FullyAvailableBlocks.
end())
901 case AvailabilityState::Unavailable:
902 case AvailabilityState::Available:
904 case AvailabilityState::SpeculativelyAvailable:
905 State = FixpointState;
908 "Found a speculatively available successor leftover?");
923 while (!Worklist.
empty())
924 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
925 AvailabilityState::Unavailable);
932 while (!Worklist.
empty())
933 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
934 AvailabilityState::Available);
937 "Must have fixed all the new speculatively available blocks.");
940 return !UnavailableBB;
949 if ((V.AV.isSimpleValue() && V.AV.getSimpleValue() == OldValue) ||
950 (V.AV.isCoercedLoadValue() && V.AV.getCoercedLoadValue() == OldValue))
964 if (ValuesPerBlock.
size() == 1 &&
966 Load->getParent())) {
967 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
968 "Dead BB dominate this block");
969 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
975 SSAUpdate.
Initialize(Load->getType(), Load->getName());
980 if (AV.AV.isUndefValue())
990 if (BB == Load->getParent() &&
991 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
992 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
1006 Type *LoadTy = Load->getType();
1007 const DataLayout &
DL = Load->getModule()->getDataLayout();
1008 if (isSimpleValue()) {
1009 Res = getSimpleValue();
1010 if (Res->
getType() != LoadTy) {
1014 <<
" " << *getSimpleValue() <<
'\n'
1018 }
else if (isCoercedLoadValue()) {
1019 LoadInst *CoercedLoad = getCoercedLoadValue();
1034 if (!CoercedLoad->
hasMetadata(LLVMContext::MD_noundef))
1036 {LLVMContext::MD_dereferenceable,
1037 LLVMContext::MD_dereferenceable_or_null,
1038 LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1040 <<
" " << *getCoercedLoadValue() <<
'\n'
1044 }
else if (isMemIntrinValue()) {
1048 <<
" " << *getMemIntrinValue() <<
'\n'
1051 }
else if (isSelectValue()) {
1054 assert(V1 && V2 &&
"both value operands of the select must be present");
1059 assert(Res &&
"failed to materialize?");
1064 if (
const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1065 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1085 using namespace ore;
1090 R <<
"load of type " << NV(
"Type", Load->getType()) <<
" not eliminated"
1093 for (
auto *U : Load->getPointerOperand()->users()) {
1094 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1095 auto *
I = cast<Instruction>(U);
1096 if (
I->getFunction() == Load->getFunction() && DT->
dominates(
I, Load)) {
1112 for (
auto *U : Load->getPointerOperand()->users()) {
1113 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1114 auto *
I = cast<Instruction>(U);
1115 if (
I->getFunction() == Load->getFunction() &&
1123 OtherAccess =
nullptr;
1135 R <<
" in favor of " << NV(
"OtherAccess", OtherAccess);
1137 R <<
" because it is clobbered by " << NV(
"ClobberedBy", DepInfo.
getInst());
1150 for (
auto *Inst = BB == FromBB ?
From : BB->getTerminator();
1151 Inst !=
nullptr; Inst = Inst->getPrevNonDebugInstruction()) {
1157 if (
auto *LI = dyn_cast<LoadInst>(Inst))
1158 if (LI->getPointerOperand() == Loc.
Ptr && LI->getType() == LoadTy)
1164std::optional<AvailableValue>
1167 assert(
Load->isUnordered() &&
"rules below are incorrect for ordered access");
1177 if (
StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1179 if (
Address &&
Load->isAtomic() <= DepSI->isAtomic()) {
1191 if (
LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1195 if (DepLoad != Load &&
Address &&
1196 Load->isAtomic() <= DepLoad->isAtomic()) {
1205 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1219 if (
MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1232 dbgs() <<
" is clobbered by " << *DepInst <<
'\n';);
1236 return std::nullopt;
1249 if (
StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1255 return std::nullopt;
1258 if (S->isAtomic() <
Load->isAtomic())
1259 return std::nullopt;
1264 if (
LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1269 return std::nullopt;
1272 if (
LD->isAtomic() <
Load->isAtomic())
1273 return std::nullopt;
1281 if (
auto *Sel = dyn_cast<SelectInst>(DepInst)) {
1282 assert(Sel->getType() ==
Load->getPointerOperandType());
1288 return std::nullopt;
1293 return std::nullopt;
1301 dbgs() <<
" has unknown def " << *DepInst <<
'\n';);
1302 return std::nullopt;
1305void GVNPass::AnalyzeLoadAvailability(
LoadInst *Load, LoadDepVect &Deps,
1306 AvailValInBlkVect &ValuesPerBlock,
1307 UnavailBlkVect &UnavailableBlocks) {
1312 for (
const auto &Dep : Deps) {
1316 if (DeadBlocks.count(DepBB)) {
1324 UnavailableBlocks.push_back(DepBB);
1331 if (
auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1335 ValuesPerBlock.push_back(
1338 UnavailableBlocks.push_back(DepBB);
1342 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1343 "post condition violation");
1369 if (
Term->getNumSuccessors() != 2 ||
Term->isSpecialTerminator())
1371 auto *SuccBB =
Term->getSuccessor(0);
1372 if (SuccBB == LoadBB)
1373 SuccBB =
Term->getSuccessor(1);
1374 if (!SuccBB->getSinglePredecessor())
1379 if (Inst.isDebugOrPseudoInst())
1381 if (--NumInsts == 0)
1384 if (!Inst.isIdenticalTo(Load))
1393 return cast<LoadInst>(&Inst);
1403void GVNPass::eliminatePartiallyRedundantLoad(
1404 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1407 for (
const auto &AvailableLoad : AvailableLoads) {
1408 BasicBlock *UnavailableBlock = AvailableLoad.first;
1409 Value *LoadPtr = AvailableLoad.second;
1413 Load->isVolatile(),
Load->getAlign(),
Load->getOrdering(),
1415 NewLoad->setDebugLoc(
Load->getDebugLoc());
1419 if (
auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1422 MSSAU->
insertUse(cast<MemoryUse>(NewAccess),
true);
1428 NewLoad->setAAMetadata(Tags);
1430 if (
auto *MD =
Load->getMetadata(LLVMContext::MD_invariant_load))
1431 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1432 if (
auto *InvGroupMD =
Load->getMetadata(LLVMContext::MD_invariant_group))
1433 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1434 if (
auto *RangeMD =
Load->getMetadata(LLVMContext::MD_range))
1435 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1436 if (
auto *AccessMD =
Load->getMetadata(LLVMContext::MD_access_group))
1439 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1448 ValuesPerBlock.push_back(
1455 if (CriticalEdgePredAndLoad) {
1456 auto I = CriticalEdgePredAndLoad->
find(UnavailableBlock);
1457 if (
I != CriticalEdgePredAndLoad->
end()) {
1458 ++NumPRELoadMoved2CEPred;
1465 removeFromLeaderTable(ValNo, OldLoad, OldLoad->
getParent());
1467 removeInstruction(OldLoad);
1475 Load->replaceAllUsesWith(V);
1476 if (isa<PHINode>(V))
1479 I->setDebugLoc(
Load->getDebugLoc());
1480 if (
V->getType()->isPtrOrPtrVectorTy())
1485 <<
"load eliminated by PRE";
1489bool GVNPass::PerformLoadPRE(
LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1490 UnavailBlkVect &UnavailableBlocks) {
1500 UnavailableBlocks.end());
1522 bool MustEnsureSafetyOfSpeculativeExecution =
1527 if (TmpBB == LoadBB)
1529 if (Blockers.count(TmpBB))
1541 MustEnsureSafetyOfSpeculativeExecution =
1542 MustEnsureSafetyOfSpeculativeExecution || ICF->
hasICF(TmpBB);
1553 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1554 for (
BasicBlock *UnavailableBB : UnavailableBlocks)
1555 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1568 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1569 << Pred->
getName() <<
"': " << *Load <<
'\n');
1580 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1581 << Pred->
getName() <<
"': " << *Load <<
'\n');
1587 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1588 << Pred->
getName() <<
"': " << *Load <<
'\n');
1597 <<
"COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1598 << Pred->
getName() <<
"': " << *Load <<
'\n');
1602 if (
LoadInst *LI = findLoadToHoistIntoPred(Pred, LoadBB, Load))
1603 CriticalEdgePredAndLoad[Pred] = LI;
1608 PredLoads[Pred] =
nullptr;
1613 unsigned NumInsertPreds = PredLoads.
size() + CriticalEdgePredSplit.
size();
1614 unsigned NumUnavailablePreds = NumInsertPreds +
1615 CriticalEdgePredAndLoad.
size();
1616 assert(NumUnavailablePreds != 0 &&
1617 "Fully available value should already be eliminated!");
1618 (void)NumUnavailablePreds;
1624 if (NumInsertPreds > 1)
1629 if (MustEnsureSafetyOfSpeculativeExecution) {
1630 if (CriticalEdgePredSplit.
size())
1633 for (
auto &PL : PredLoads)
1637 for (
auto &CEP : CriticalEdgePredAndLoad)
1644 for (
BasicBlock *OrigPred : CriticalEdgePredSplit) {
1645 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1646 assert(!PredLoads.count(OrigPred) &&
"Split edges shouldn't be in map!");
1647 PredLoads[NewPred] =
nullptr;
1648 LLVM_DEBUG(
dbgs() <<
"Split critical edge " << OrigPred->getName() <<
"->"
1649 << LoadBB->
getName() <<
'\n');
1652 for (
auto &CEP : CriticalEdgePredAndLoad)
1653 PredLoads[CEP.first] =
nullptr;
1656 bool CanDoPRE =
true;
1659 for (
auto &PredLoad : PredLoads) {
1660 BasicBlock *UnavailablePred = PredLoad.first;
1670 Value *LoadPtr =
Load->getPointerOperand();
1672 while (Cur != LoadBB) {
1685 LoadPtr =
Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1692 << *
Load->getPointerOperand() <<
"\n");
1697 PredLoad.second = LoadPtr;
1701 while (!NewInsts.
empty()) {
1711 return !CriticalEdgePredSplit.empty();
1717 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOAD: " << *Load <<
'\n');
1719 <<
" INSTS: " << *NewInsts.
back()
1727 I->updateLocationAfterHoist();
1736 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads,
1737 &CriticalEdgePredAndLoad);
1742bool GVNPass::performLoopLoadPRE(
LoadInst *Load,
1743 AvailValInBlkVect &ValuesPerBlock,
1744 UnavailBlkVect &UnavailableBlocks) {
1750 if (!L ||
L->getHeader() !=
Load->getParent())
1755 if (!Preheader || !Latch)
1758 Value *LoadPtr =
Load->getPointerOperand();
1760 if (!
L->isLoopInvariant(LoadPtr))
1770 for (
auto *Blocker : UnavailableBlocks) {
1772 if (!
L->contains(Blocker))
1796 if (Blocker->getTerminator()->mayWriteToMemory())
1799 LoopBlock = Blocker;
1812 AvailableLoads[LoopBlock] = LoadPtr;
1813 AvailableLoads[Preheader] = LoadPtr;
1815 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOOP LOAD: " << *Load <<
'\n');
1816 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads,
1824 using namespace ore;
1828 <<
"load of type " << NV(
"Type", Load->getType()) <<
" eliminated"
1829 << setExtraArgs() <<
" in favor of "
1836bool GVNPass::processNonLocalLoad(
LoadInst *Load) {
1838 if (
Load->getParent()->getParent()->hasFnAttribute(
1839 Attribute::SanitizeAddress) ||
1840 Load->getParent()->getParent()->hasFnAttribute(
1841 Attribute::SanitizeHWAddress))
1851 unsigned NumDeps = Deps.size();
1858 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1860 dbgs() <<
" has unknown dependencies\n";);
1864 bool Changed =
false;
1867 dyn_cast<GetElementPtrInst>(
Load->getOperand(0))) {
1868 for (
Use &U :
GEP->indices())
1870 Changed |= performScalarPRE(
I);
1874 AvailValInBlkVect ValuesPerBlock;
1875 UnavailBlkVect UnavailableBlocks;
1876 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1880 if (ValuesPerBlock.empty())
1888 if (UnavailableBlocks.empty()) {
1889 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING NONLOCAL LOAD: " << *Load <<
'\n');
1894 Load->replaceAllUsesWith(V);
1896 if (isa<PHINode>(V))
1902 if (
Load->getDebugLoc() &&
Load->getParent() ==
I->getParent())
1903 I->setDebugLoc(
Load->getDebugLoc());
1904 if (
V->getType()->isPtrOrPtrVectorTy())
1918 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
1919 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
1934 Cmp->getFastMathFlags().noNaNs())) {
1942 if (isa<ConstantFP>(
LHS) && !cast<ConstantFP>(
LHS)->
isZero())
1944 if (isa<ConstantFP>(
RHS) && !cast<ConstantFP>(
RHS)->
isZero())
1959 Cmp->getFastMathFlags().noNaNs()) ||
1968 if (isa<ConstantFP>(
LHS) && !cast<ConstantFP>(
LHS)->
isZero())
1970 if (isa<ConstantFP>(
RHS) && !cast<ConstantFP>(
RHS)->
isZero())
1980 auto *I = dyn_cast<Instruction>(U);
1981 return I && I->getParent() == BB;
1985bool GVNPass::processAssumeIntrinsic(
AssumeInst *IntrinsicI) {
1989 if (
Cond->isZero()) {
2007 for (
const auto &Acc : *AL) {
2008 if (
auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
2009 if (!Current->getMemoryInst()->comesBefore(NewS)) {
2010 FirstNonDom = Current;
2024 MSSAU->
insertDef(cast<MemoryDef>(NewDef),
false);
2034 if (isa<Constant>(V)) {
2042 bool Changed =
false;
2049 Changed |= propagateEquality(V, True, Edge,
false);
2055 ReplaceOperandsWithMap[
V] = True;
2077 if (
auto *CmpI = dyn_cast<CmpInst>(V)) {
2079 Value *CmpLHS = CmpI->getOperand(0);
2080 Value *CmpRHS = CmpI->getOperand(1);
2086 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
2088 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
2090 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
2091 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
2102 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
2106 << *CmpLHS <<
" with "
2107 << *CmpRHS <<
" in block "
2113 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
2126 I->replaceAllUsesWith(Repl);
2131bool GVNPass::processLoad(
LoadInst *L) {
2136 if (!
L->isUnordered())
2139 if (
L->use_empty()) {
2149 return processNonLocalLoad(L);
2156 dbgs() <<
"GVN: load ";
L->printAsOperand(
dbgs());
2157 dbgs() <<
" has unknown dependence\n";);
2161 auto AV = AnalyzeLoadAvailability(L, Dep,
L->getPointerOperand());
2183std::pair<uint32_t, bool>
2184GVNPass::ValueTable::assignExpNewValueNum(
Expression &Exp) {
2186 bool CreateNewValNum = !
e;
2187 if (CreateNewValNum) {
2188 Expressions.push_back(Exp);
2189 if (ExprIdx.size() < nextValueNumber + 1)
2190 ExprIdx.resize(nextValueNumber * 2);
2191 e = nextValueNumber;
2192 ExprIdx[nextValueNumber++] = nextExprNumber++;
2194 return {
e, CreateNewValNum};
2201 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2202 while (Vals && Vals->BB == BB)
2211 auto FindRes = PhiTranslateTable.find({Num, Pred});
2212 if (FindRes != PhiTranslateTable.end())
2213 return FindRes->second;
2214 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
2215 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2226 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2228 Call = dyn_cast<CallInst>(Vals->Val);
2229 if (Call && Call->getParent() == PhiBlock)
2234 if (AA->doesNotAccessMemory(Call))
2237 if (!MD || !AA->onlyReadsMemory(Call))
2249 if (
D.getResult().isNonFuncLocal())
2260 if (
PHINode *PN = NumberingPhi[Num]) {
2261 for (
unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2262 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2272 if (!areAllValsInBB(Num, PhiBlock, Gvn))
2275 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2279 for (
unsigned i = 0; i <
Exp.varargs.size(); i++) {
2283 if ((i > 1 &&
Exp.opcode == Instruction::InsertValue) ||
2284 (i > 0 &&
Exp.opcode == Instruction::ExtractValue) ||
2285 (i > 1 &&
Exp.opcode == Instruction::ShuffleVector))
2287 Exp.varargs[i] = phiTranslate(Pred, PhiBlock,
Exp.varargs[i], Gvn);
2290 if (
Exp.commutative) {
2291 assert(
Exp.varargs.size() >= 2 &&
"Unsupported commutative instruction!");
2292 if (
Exp.varargs[0] >
Exp.varargs[1]) {
2295 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2296 Exp.opcode = (Opcode << 8) |
2302 if (
uint32_t NewNum = expressionNumbering[Exp]) {
2303 if (
Exp.opcode == Instruction::Call && NewNum != Num)
2304 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2312void GVNPass::ValueTable::eraseTranslateCacheEntry(
2315 PhiTranslateTable.erase({Num, Pred});
2324 LeaderTableEntry Vals = LeaderTable[num];
2325 if (!Vals.Val)
return nullptr;
2327 Value *Val =
nullptr;
2330 if (isa<Constant>(Val))
return Val;
2333 LeaderTableEntry* Next = Vals.Next;
2336 if (isa<Constant>(Next->Val))
return Next->Val;
2337 if (!Val) Val = Next->Val;
2356 const BasicBlock *Pred =
E.getEnd()->getSinglePredecessor();
2357 assert((!Pred || Pred ==
E.getStart()) &&
2358 "No edge between these basic blocks!");
2359 return Pred !=
nullptr;
2362void GVNPass::assignBlockRPONumber(
Function &
F) {
2363 BlockRPONumber.clear();
2367 BlockRPONumber[BB] = NextBlockNumber++;
2368 InvalidBlockRPONumbers =
false;
2371bool GVNPass::replaceOperandsForInBlockEquality(
Instruction *Instr)
const {
2372 bool Changed =
false;
2373 for (
unsigned OpNum = 0; OpNum <
Instr->getNumOperands(); ++OpNum) {
2375 auto it = ReplaceOperandsWithMap.find(Operand);
2376 if (it != ReplaceOperandsWithMap.end()) {
2378 << *it->second <<
" in instruction " << *Instr <<
'\n');
2379 Instr->setOperand(OpNum, it->second);
2391bool GVNPass::propagateEquality(
Value *LHS,
Value *RHS,
2393 bool DominatesByEdge) {
2395 Worklist.
push_back(std::make_pair(LHS, RHS));
2396 bool Changed =
false;
2401 while (!Worklist.
empty()) {
2402 std::pair<Value*, Value*> Item = Worklist.
pop_back_val();
2403 LHS = Item.first;
RHS = Item.second;
2410 if (isa<Constant>(LHS) && isa<Constant>(RHS))
2414 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2416 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) &&
"Unexpected value!");
2423 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2424 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2443 if (RootDominatesEnd && !isa<Instruction>(RHS))
2444 addToLeaderTable(LVN, RHS, Root.
getEnd());
2450 unsigned NumReplacements =
2455 Changed |= NumReplacements > 0;
2456 NumGVNEqProp += NumReplacements;
2475 bool isKnownFalse = !isKnownTrue;
2490 if (
CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2491 Value *Op0 =
Cmp->getOperand(0), *Op1 =
Cmp->getOperand(1);
2498 Worklist.
push_back(std::make_pair(Op0, Op1));
2510 if (Num < NextNum) {
2512 if (NotCmp && isa<Instruction>(NotCmp)) {
2513 unsigned NumReplacements =
2518 Changed |= NumReplacements > 0;
2519 NumGVNEqProp += NumReplacements;
2529 if (RootDominatesEnd)
2530 addToLeaderTable(Num, NotVal, Root.
getEnd());
2543 if (isa<DbgInfoIntrinsic>(
I))
2552 bool Changed =
false;
2553 if (!
I->use_empty()) {
2557 I->replaceAllUsesWith(V);
2565 if (MD &&
V->getType()->isPtrOrPtrVectorTy())
2572 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
2573 return processAssumeIntrinsic(Assume);
2575 if (
LoadInst *Load = dyn_cast<LoadInst>(
I)) {
2576 if (processLoad(Load))
2580 addToLeaderTable(Num, Load,
Load->getParent());
2587 if (!BI->isConditional())
2590 if (isa<Constant>(BI->getCondition()))
2591 return processFoldableCondBr(BI);
2593 Value *BranchCond = BI->getCondition();
2597 if (TrueSucc == FalseSucc)
2601 bool Changed =
false;
2605 Changed |= propagateEquality(BranchCond, TrueVal, TrueE,
true);
2609 Changed |= propagateEquality(BranchCond, FalseVal, FalseE,
true);
2616 Value *SwitchCond =
SI->getCondition();
2618 bool Changed =
false;
2622 for (
unsigned i = 0, n =
SI->getNumSuccessors(); i != n; ++i)
2623 ++SwitchEdges[
SI->getSuccessor(i)];
2629 if (SwitchEdges.
lookup(Dst) == 1) {
2631 Changed |= propagateEquality(SwitchCond, i->getCaseValue(),
E,
true);
2639 if (
I->getType()->isVoidTy())
2647 if (isa<AllocaInst>(
I) ||
I->isTerminator() || isa<PHINode>(
I)) {
2648 addToLeaderTable(Num,
I,
I->getParent());
2655 if (Num >= NextNum) {
2656 addToLeaderTable(Num,
I,
I->getParent());
2662 Value *Repl = findLeader(
I->getParent(), Num);
2665 addToLeaderTable(Num,
I,
I->getParent());
2699 InvalidBlockRPONumbers =
true;
2701 MSSAU = MSSA ? &Updater :
nullptr;
2703 bool Changed =
false;
2704 bool ShouldContinue =
true;
2714 Changed |= removedBlock;
2717 unsigned Iteration = 0;
2718 while (ShouldContinue) {
2721 ShouldContinue = iterateOnFunction(
F);
2722 Changed |= ShouldContinue;
2729 assignValNumForDeadCode();
2730 bool PREChanged =
true;
2731 while (PREChanged) {
2732 PREChanged = performPRE(
F);
2733 Changed |= PREChanged;
2742 cleanupGlobalSets();
2757 "We expect InstrsToErase to be empty across iterations");
2758 if (DeadBlocks.count(BB))
2762 ReplaceOperandsWithMap.clear();
2763 bool ChangedFunction =
false;
2771 for (
PHINode *PN : PHINodesToRemove) {
2773 removeInstruction(PN);
2778 if (!ReplaceOperandsWithMap.empty())
2779 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2780 ChangedFunction |= processInstruction(&*BI);
2782 if (InstrsToErase.
empty()) {
2788 NumGVNInstr += InstrsToErase.
size();
2791 bool AtStart = BI == BB->
begin();
2795 for (
auto *
I : InstrsToErase) {
2796 assert(
I->getParent() == BB &&
"Removing instruction from wrong block?");
2800 removeInstruction(
I);
2802 InstrsToErase.clear();
2810 return ChangedFunction;
2821 for (
unsigned i = 0, e =
Instr->getNumOperands(); i != e; ++i) {
2823 if (isa<Argument>(
Op) || isa<Constant>(
Op) || isa<GlobalValue>(
Op))
2835 if (
Value *V = findLeader(Pred, TValNo)) {
2836 Instr->setOperand(i, V);
2859 addToLeaderTable(Num, Instr, Pred);
2863bool GVNPass::performScalarPRE(
Instruction *CurInst) {
2864 if (isa<AllocaInst>(CurInst) || CurInst->
isTerminator() ||
2867 isa<DbgInfoIntrinsic>(CurInst))
2874 if (isa<CmpInst>(CurInst))
2884 if (isa<GetElementPtrInst>(CurInst))
2887 if (
auto *CallB = dyn_cast<CallBase>(CurInst)) {
2889 if (CallB->isInlineAsm())
2901 unsigned NumWith = 0;
2902 unsigned NumWithout = 0;
2907 if (InvalidBlockRPONumbers)
2908 assignBlockRPONumber(*CurrentBlock->
getParent());
2919 assert(BlockRPONumber.count(
P) && BlockRPONumber.count(CurrentBlock) &&
2920 "Invalid BlockRPONumber map.");
2921 if (BlockRPONumber[
P] >= BlockRPONumber[CurrentBlock]) {
2927 Value *predV = findLeader(
P, TValNo);
2932 }
else if (predV == CurInst) {
2944 if (NumWithout > 1 || NumWith == 0)
2952 if (NumWithout != 0) {
2971 toSplit.push_back(std::make_pair(PREPred->
getTerminator(), SuccNum));
2975 PREInstr = CurInst->
clone();
2976 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2979 verifyRemoved(PREInstr);
2988 assert(PREInstr !=
nullptr || NumWithout == 0);
2994 CurInst->
getName() +
".pre-phi");
2995 Phi->insertBefore(CurrentBlock->begin());
2996 for (
unsigned i = 0, e = predMap.
size(); i != e; ++i) {
2997 if (
Value *V = predMap[i].first) {
3001 Phi->addIncoming(V, predMap[i].second);
3003 Phi->addIncoming(PREInstr, PREPred);
3010 addToLeaderTable(ValNo, Phi, CurrentBlock);
3013 if (MD &&
Phi->getType()->isPtrOrPtrVectorTy())
3016 removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
3019 removeInstruction(CurInst);
3028 bool Changed =
false;
3031 if (CurrentBlock == &
F.getEntryBlock())
3035 if (CurrentBlock->isEHPad())
3039 BE = CurrentBlock->end();
3042 Changed |= performScalarPRE(CurInst);
3046 if (splitCriticalEdges())
3063 InvalidBlockRPONumbers =
true;
3070bool GVNPass::splitCriticalEdges() {
3071 if (toSplit.empty())
3074 bool Changed =
false;
3076 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
3080 }
while (!toSplit.empty());
3084 InvalidBlockRPONumbers =
true;
3090bool GVNPass::iterateOnFunction(
Function &
F) {
3091 cleanupGlobalSets();
3094 bool Changed =
false;
3101 Changed |= processBlock(BB);
3106void GVNPass::cleanupGlobalSets() {
3108 LeaderTable.
clear();
3109 BlockRPONumber.clear();
3110 TableAllocator.
Reset();
3112 InvalidBlockRPONumbers =
true;
3123 I->eraseFromParent();
3128void GVNPass::verifyRemoved(
const Instruction *Inst)
const {
3133 for (
const auto &
I : LeaderTable) {
3134 const LeaderTableEntry *
Node = &
I.second;
3135 assert(
Node->Val != Inst &&
"Inst still in value numbering scope!");
3137 while (
Node->Next) {
3139 assert(
Node->Val != Inst &&
"Inst still in value numbering scope!");
3153 while (!NewDead.
empty()) {
3155 if (DeadBlocks.count(
D))
3161 DeadBlocks.insert(Dom.
begin(), Dom.
end());
3166 if (DeadBlocks.count(S))
3169 bool AllPredDead =
true;
3171 if (!DeadBlocks.count(
P)) {
3172 AllPredDead =
false;
3193 if (DeadBlocks.count(
B))
3200 if (!DeadBlocks.count(
P))
3206 DeadBlocks.insert(
P = S);
3212 if (!DeadBlocks.count(
P))
3236bool GVNPass::processFoldableCondBr(
BranchInst *BI) {
3250 if (DeadBlocks.count(DeadRoot))
3254 DeadRoot = splitCriticalEdges(BI->
getParent(), DeadRoot);
3256 addDeadBlock(DeadRoot);
3264void GVNPass::assignValNumForDeadCode() {
3268 addToLeaderTable(ValNum, &Inst, BB);
3283 if (skipFunction(
F))
3286 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
3288 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3289 return Impl.runImpl(
3290 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F),
3291 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3292 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F),
3293 getAnalysis<AAResultsWrapperPass>().getAAResults(),
3294 Impl.isMemDepEnabled()
3295 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3297 LIWP ? &LIWP->getLoopInfo() :
nullptr,
3298 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3299 MSSAWP ? &MSSAWP->getMSSA() :
nullptr);
3307 if (Impl.isMemDepEnabled())
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool hasUsersIn(Value *V, BasicBlock *BB)
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
static cl::opt< uint32_t > MaxNumInsnsPerBlock("gvn-max-num-insns", cl::Hidden, cl::init(100), cl::desc("Max number of instructions to scan in each basic block in GVN " "(default = 100)"))
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &gvn)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
static bool impliesEquivalanceIfFalse(CmpInst *Cmp)
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
static bool isLifetimeStart(const Instruction *Inst)
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
static void replaceValuesPerBlockEntry(SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, Value *OldValue, Value *NewValue)
If the specified OldValue exists in ValuesPerBlock, replace its value with NewValue.
static bool impliesEquivalanceIfTrue(CmpInst *Cmp)
@ Unavailable
We know the block is not fully available. This is a fixpoint.
@ Available
We know the block is fully available. This is a fixpoint.
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This is the interface for a simple mod/ref and alias analysis over globals.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Module.h This file contains the declarations for the Module class.
ppc ctr loops PowerPC CTR Loops Verify
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This defines the Use class.
static const uint32_t IV[8]
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
const BasicBlock * getEnd() const
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
bool isEHPad() const
Return true if this basic block is an exception handling block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
static ConstantInt * getTrue(LLVMContext &Context)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Analysis pass which computes a DominatorTree.
void getDescendants(NodeT *R, SmallVectorImpl< NodeT * > &Result) const
Get all nodes dominated by R, including R itself.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Class representing an expression and its matching format.
FunctionPass class - This class is used to implement most global optimizations.
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Returns the value number of the given comparison, assigning it a new number if it did not have one be...
uint32_t getNextUnusedValueNumber()
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
uint32_t lookup(Value *V, bool Verify=true) const
Returns the value number of the specified value.
void setAliasAnalysis(AAResults *A)
void clear()
Remove all entries from the ValueTable.
bool exists(Value *V) const
Returns true if a value number exists for the specified value.
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock, uint32_t Num, GVNPass &Gvn)
Wrap phiTranslateImpl to provide caching functionality.
void setMemDep(MemoryDependenceResults *M)
void erase(Value *v)
Remove a value from the value numbering.
void add(Value *V, uint32_t num)
add - Insert a value into the table with a specified value number.
void setDomTree(DominatorTree *D)
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock)
Erase stale entry from phiTranslate cache so phiTranslate can be computed again.
void verifyRemoved(const Value *) const
verifyRemoved - Verify that the value is removed from all internal data structures.
The core GVN pass object.
friend class gvn::GVNLegacyPass
bool isPREEnabled() const
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
AAResults * getAliasAnalysis() const
bool isLoadPREEnabled() const
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
DominatorTree & getDominatorTree() const
bool isLoadInLoopPREEnabled() const
bool isLoadPRESplitBackedgeEnabled() const
void markInstructionForDeletion(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
bool isMemDepEnabled() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Legacy wrapper pass to provide the GlobalsAAResult object.
This class allows to keep track on instructions with implicit control flow.
bool isDominatedByICFIFromSameBlock(const Instruction *Insn)
Returns true if the first ICFI of Insn's block exists and dominates Insn.
bool hasICF(const BasicBlock *BB)
Returns true if at least one instruction from the given basic block has implicit control flow.
void clear()
Invalidates all information from this tracking.
void removeUsersOf(const Instruction *Inst)
Notifies this tracking that we are going to replace all uses of Inst.
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Notifies this tracking that we are going to insert a new instruction Inst to the basic block BB.
void removeInstruction(const Instruction *Inst)
Notifies this tracking that we are going to remove the instruction Inst It makes all necessary update...
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
const BasicBlock * getParent() const
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs)
Drop all unknown metadata except for debug locations.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
This class implements a map that also provides access to all stored values in a deterministic order.
iterator find(const KeyT &Key)
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
An analysis that produces MemoryDependenceResults for a function.
Provides a lazy, caching interface for making common memory aliasing information queries,...
std::vector< NonLocalDepEntry > NonLocalDepInfo
void invalidateCachedPredecessors()
Clears the PredIteratorCache info.
void invalidateCachedPointerInfo(Value *Ptr)
Invalidates cached information about the specified pointer, because it may be too conservative in mem...
std::optional< int32_t > getClobberOffset(LoadInst *DepInst) const
Return the clobber offset to dependent instruction.
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
void getNonLocalPointerDependency(Instruction *QueryInst, SmallVectorImpl< NonLocalDepResult > &Result)
Perform a full dependency query for an access to the QueryInst's specified memory location,...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
An analysis that produces MemorySSA for a function.
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before an existing MemoryAccess.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block.
void insertUse(MemoryUse *Use, bool RenameUses=false)
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Legacy analysis pass which computes MemorySSA.
Encapsulates MemorySSA, including all data associated with memory accesses.
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Class that has the common methods + fields of memory uses/defs.
This is an entry in the NonLocalDepInfo cache.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PHITransAddr - An address value which tracks and handles phi translation.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserve()
Mark an analysis as preserved.
Helper class for SSA formation on a set of values defined in multiple blocks.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
This class represents the LLVM 'select' instruction.
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
void deleteValue()
Delete a pointer to a generic Value.
StringRef getName() const
Return a constant reference to the value's name.
Represents an op.with.overflow intrinsic.
An efficient, type-erasing, non-owning reference to a callable.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
GVNLegacyPass(bool NoMemDepAnalysis=!GVNEnableMemDep)
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
An opaque object representing a hash code.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::unique_ptr< ValueIDNum[]> ValueTable
Type for a table of values in a block.
@ C
The default llvm calling convention, compatible with C.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
bool match(Val *V, const Pattern &P)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Value * getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingStore/Load returned an offset, this function can be used to actually perf...
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
initializer< Ty > init(const Ty &Val)
A private "module" namespace for types and utilities used by GVN.
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
Interval::succ_iterator succ_end(Interval *I)
hash_code hash_value(const FixedPointSemantics &Val)
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
void initializeGVNLegacyPassPass(PassRegistry &)
Interval::pred_iterator pred_end(Interval *I)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool isModSet(const ModRefInfo MRI)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
bool VerifyMemorySSA
Enables verification of MemorySSA.
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool pred_empty(const BasicBlock *BB)
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Option class for critical edge splitting.
static GVNPass::Expression getTombstoneKey()
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
static unsigned getHashValue(const GVNPass::Expression &e)
static GVNPass::Expression getEmptyKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
A set of parameters to control various transforms performed by GVN pass.
std::optional< bool > AllowLoadPRESplitBackedge
std::optional< bool > AllowPRE
std::optional< bool > AllowLoadInLoopPRE
std::optional< bool > AllowMemDep
std::optional< bool > AllowLoadPRE
SmallVector< uint32_t, 4 > varargs
bool operator==(const Expression &other) const
friend hash_code hash_value(const Expression &Value)
Expression(uint32_t o=~2U)
A CRTP mix-in to automatically provide informational APIs needed for passes.
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
static AvailableValueInBlock getUndef(BasicBlock *BB)
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
Value * MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const
Emit code at the end of this block to adjust the value defined here to the specified type.
AvailableValue AV
AV - The actual available value.
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
BasicBlock * BB
BB - The basic block in question.
Represents a particular available value that we know how to materialize.
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
bool isSimpleValue() const
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
bool isCoercedLoadValue() const
static AvailableValue get(Value *V, unsigned Offset=0)
ValType Kind
Kind of the live-out value.
LoadInst * getCoercedLoadValue() const
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
bool isUndefValue() const
bool isSelectValue() const
Value * Val
Val - The value that is live out of the block.
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt, GVNPass &gvn) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.
static AvailableValue getUndef()
SelectInst * getSelectValue() const
Value * getSimpleValue() const
bool isMemIntrinValue() const
MemIntrinsic * getMemIntrinValue() const