88#define DEBUG_TYPE "gvn"
90STATISTIC(NumGVNInstr,
"Number of instructions deleted");
92STATISTIC(NumGVNPRE,
"Number of instructions PRE'd");
94STATISTIC(NumGVNSimpl,
"Number of instructions simplified");
95STATISTIC(NumGVNEqProp,
"Number of equalities propagated");
97STATISTIC(NumPRELoopLoad,
"Number of loop loads PRE'd");
99 "Number of loads moved to predecessor of a critical edge in PRE");
101STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
102 "Number of blocks speculated as available in "
103 "IsValueFullyAvailableInBlock(), max");
105 "Number of times we we reached gvn-max-block-speculations cut-off "
106 "preventing further exploration");
121 cl::desc(
"Max number of dependences to attempt Load PRE (default = 100)"));
126 cl::desc(
"Max number of blocks we're willing to speculate on (and recurse "
127 "into) when deducing if a value is fully available or not in GVN "
132 cl::desc(
"Max number of visited instructions when trying to find "
133 "dominating value of select dependency (default = 100)"));
137 cl::desc(
"Max number of instructions to scan in each basic block in GVN "
161 if ((!
Attrs.isEmpty() || !
Other.Attrs.isEmpty()) &&
162 !
Attrs.intersectWith(
Ty->getContext(),
Other.Attrs).has_value())
299 Res.
AV = std::move(
AV);
320 return AV.MaterializeAdjustedValue(Load,
BB->getTerminator());
331 E.Opcode =
I->getOpcode();
336 E.VarArgs.push_back(
lookupOrAdd(GCR->getOperand(0)));
337 E.VarArgs.push_back(
lookupOrAdd(GCR->getBasePtr()));
338 E.VarArgs.push_back(
lookupOrAdd(GCR->getDerivedPtr()));
340 for (
Use &
Op :
I->operands())
343 if (
I->isCommutative()) {
348 assert(
I->getNumOperands() >= 2 &&
"Unsupported commutative instruction!");
349 if (
E.VarArgs[0] >
E.VarArgs[1])
351 E.Commutative =
true;
357 if (
E.VarArgs[0] >
E.VarArgs[1]) {
362 E.Commutative =
true;
364 E.VarArgs.append(IVI->idx_begin(), IVI->idx_end());
366 ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
367 E.VarArgs.append(ShuffleMask.
begin(), ShuffleMask.
end());
369 E.Attrs = CB->getAttributes();
375GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
377 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
378 "Not a comparison!");
381 E.VarArgs.push_back(lookupOrAdd(
LHS));
382 E.VarArgs.push_back(lookupOrAdd(
RHS));
385 if (
E.VarArgs[0] >
E.VarArgs[1]) {
389 E.Opcode = (Opcode << 8) | Predicate;
390 E.Commutative =
true;
395GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
396 assert(EI &&
"Not an ExtractValueInst?");
407 E.VarArgs.push_back(lookupOrAdd(WO->
getLHS()));
408 E.VarArgs.push_back(lookupOrAdd(WO->
getRHS()));
416 E.VarArgs.push_back(lookupOrAdd(
Op));
423GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *
GEP) {
425 Type *PtrTy =
GEP->getType()->getScalarType();
426 const DataLayout &
DL =
GEP->getDataLayout();
427 unsigned BitWidth =
DL.getIndexTypeSizeInBits(PtrTy);
428 SmallMapVector<Value *, APInt, 4> VariableOffsets;
430 if (
GEP->collectOffset(
DL,
BitWidth, VariableOffsets, ConstantOffset)) {
434 E.Opcode =
GEP->getOpcode();
436 E.VarArgs.push_back(lookupOrAdd(
GEP->getPointerOperand()));
437 for (
const auto &[V, Scale] : VariableOffsets) {
438 E.VarArgs.push_back(lookupOrAdd(V));
439 E.VarArgs.push_back(lookupOrAdd(ConstantInt::get(
Context, Scale)));
441 if (!ConstantOffset.isZero())
443 lookupOrAdd(ConstantInt::get(
Context, ConstantOffset)));
447 E.Opcode =
GEP->getOpcode();
448 E.Ty =
GEP->getSourceElementType();
449 for (Use &
Op :
GEP->operands())
450 E.VarArgs.push_back(lookupOrAdd(
Op));
459GVNPass::ValueTable::ValueTable() =
default;
460GVNPass::ValueTable::ValueTable(
const ValueTable &) =
default;
461GVNPass::ValueTable::ValueTable(
ValueTable &&) =
default;
462GVNPass::ValueTable::~ValueTable() =
default;
468 ValueNumbering.
insert(std::make_pair(V, Num));
470 NumberingPhi[Num] = PN;
480 assert(MSSA &&
"addMemoryStateToExp should not be called without MemorySSA");
481 assert(MSSA->getMemoryAccess(
I) &&
"Instruction does not access memory");
482 MemoryAccess *MA = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(
I);
483 Exp.VarArgs.push_back(lookupOrAdd(MA));
494 if (
C->getFunction()->isPresplitCoroutine()) {
495 ValueNumbering[
C] = NextValueNumber;
496 return NextValueNumber++;
502 if (
C->isConvergent()) {
503 ValueNumbering[
C] = NextValueNumber;
504 return NextValueNumber++;
507 if (AA->doesNotAccessMemory(
C)) {
509 uint32_t
E = assignExpNewValueNum(Exp).first;
510 ValueNumbering[
C] =
E;
514 if (MD && AA->onlyReadsMemory(
C)) {
516 auto [
E, IsValNumNew] = assignExpNewValueNum(Exp);
518 ValueNumbering[
C] =
E;
522 MemDepResult LocalDep = MD->getDependency(
C);
525 ValueNumbering[
C] = NextValueNumber;
526 return NextValueNumber++;
529 if (LocalDep.
isDef()) {
534 if (!LocalDepCall || LocalDepCall->
arg_size() !=
C->arg_size()) {
535 ValueNumbering[
C] = NextValueNumber;
536 return NextValueNumber++;
539 for (
unsigned I = 0,
E =
C->arg_size();
I <
E; ++
I) {
540 uint32_t CVN = lookupOrAdd(
C->getArgOperand(
I));
541 uint32_t LocalDepCallVN = lookupOrAdd(LocalDepCall->
getArgOperand(
I));
542 if (CVN != LocalDepCallVN) {
543 ValueNumbering[
C] = NextValueNumber;
544 return NextValueNumber++;
548 uint32_t
V = lookupOrAdd(LocalDepCall);
549 ValueNumbering[
C] =
V;
555 MD->getNonLocalCallDependency(
C);
557 CallInst *CDep =
nullptr;
561 for (
const NonLocalDepEntry &
I : Deps) {
562 if (
I.getResult().isNonLocal())
567 if (!
I.getResult().isDef() || CDep !=
nullptr) {
574 if (NonLocalDepCall && DT->properlyDominates(
I.getBB(),
C->getParent())) {
575 CDep = NonLocalDepCall;
584 ValueNumbering[
C] = NextValueNumber;
585 return NextValueNumber++;
589 ValueNumbering[
C] = NextValueNumber;
590 return NextValueNumber++;
592 for (
unsigned I = 0,
E =
C->arg_size();
I <
E; ++
I) {
593 uint32_t CVN = lookupOrAdd(
C->getArgOperand(
I));
596 ValueNumbering[
C] = NextValueNumber;
597 return NextValueNumber++;
601 uint32_t
V = lookupOrAdd(CDep);
602 ValueNumbering[
C] =
V;
606 if (MSSA && IsMSSAEnabled && AA->onlyReadsMemory(
C)) {
608 addMemoryStateToExp(
C, Exp);
609 auto [
V,
_] = assignExpNewValueNum(Exp);
610 ValueNumbering[
C] =
V;
614 ValueNumbering[
C] = NextValueNumber;
615 return NextValueNumber++;
619uint32_t GVNPass::ValueTable::computeLoadStoreVN(Instruction *
I) {
620 if (!MSSA || !IsMSSAEnabled) {
621 ValueNumbering[
I] = NextValueNumber;
622 return NextValueNumber++;
626 Exp.Ty =
I->getType();
627 Exp.Opcode =
I->getOpcode();
628 for (Use &
Op :
I->operands())
629 Exp.VarArgs.push_back(lookupOrAdd(
Op));
630 addMemoryStateToExp(
I, Exp);
632 auto [
V,
_] = assignExpNewValueNum(Exp);
633 ValueNumbering[
I] =
V;
638bool GVNPass::ValueTable::exists(
Value *V)
const {
639 return ValueNumbering.contains(V);
652 if (VI != ValueNumbering.end())
657 ValueNumbering[V] = NextValueNumber;
660 return NextValueNumber++;
664 switch (
I->getOpcode()) {
665 case Instruction::Call:
667 case Instruction::FNeg:
668 case Instruction::Add:
669 case Instruction::FAdd:
670 case Instruction::Sub:
671 case Instruction::FSub:
672 case Instruction::Mul:
673 case Instruction::FMul:
674 case Instruction::UDiv:
675 case Instruction::SDiv:
676 case Instruction::FDiv:
677 case Instruction::URem:
678 case Instruction::SRem:
679 case Instruction::FRem:
680 case Instruction::Shl:
681 case Instruction::LShr:
682 case Instruction::AShr:
683 case Instruction::And:
684 case Instruction::Or:
685 case Instruction::Xor:
686 case Instruction::ICmp:
687 case Instruction::FCmp:
688 case Instruction::Trunc:
689 case Instruction::ZExt:
690 case Instruction::SExt:
691 case Instruction::FPToUI:
692 case Instruction::FPToSI:
693 case Instruction::UIToFP:
694 case Instruction::SIToFP:
695 case Instruction::FPTrunc:
696 case Instruction::FPExt:
697 case Instruction::PtrToInt:
698 case Instruction::PtrToAddr:
699 case Instruction::IntToPtr:
700 case Instruction::AddrSpaceCast:
701 case Instruction::BitCast:
702 case Instruction::Select:
703 case Instruction::Freeze:
704 case Instruction::ExtractElement:
705 case Instruction::InsertElement:
706 case Instruction::ShuffleVector:
707 case Instruction::InsertValue:
710 case Instruction::GetElementPtr:
713 case Instruction::ExtractValue:
716 case Instruction::PHI:
717 ValueNumbering[V] = NextValueNumber;
719 return NextValueNumber++;
720 case Instruction::Load:
721 case Instruction::Store:
722 return computeLoadStoreVN(
I);
724 ValueNumbering[V] = NextValueNumber;
725 return NextValueNumber++;
728 uint32_t E = assignExpNewValueNum(Exp).first;
729 ValueNumbering[V] = E;
738 assert(VI != ValueNumbering.end() &&
"Value not numbered?");
741 return (VI != ValueNumbering.end()) ? VI->second : 0;
748uint32_t GVNPass::ValueTable::lookupOrAddCmp(
unsigned Opcode,
751 Expression Exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
752 return assignExpNewValueNum(Exp).first;
757 ValueNumbering.clear();
758 ExpressionNumbering.clear();
759 NumberingPhi.clear();
761 PhiTranslateTable.clear();
770 uint32_t Num = ValueNumbering.lookup(V);
771 ValueNumbering.erase(V);
774 NumberingPhi.erase(Num);
776 NumberingBB.erase(Num);
781void GVNPass::ValueTable::verifyRemoved(
const Value *V)
const {
782 assert(!ValueNumbering.contains(V) &&
783 "Inst still occurs in value numbering map!");
792 LeaderListNode &Curr = NumToLeaders[
N];
793 if (!Curr.Entry.Val) {
799 LeaderListNode *
Node = TableAllocator.Allocate<LeaderListNode>();
802 Node->Next = Curr.Next;
810 LeaderListNode *Prev =
nullptr;
811 LeaderListNode *Curr = &NumToLeaders[
N];
813 while (Curr && (Curr->Entry.Val !=
I || Curr->Entry.BB != BB)) {
822 Prev->Next = Curr->Next;
825 Curr->Entry.Val =
nullptr;
826 Curr->Entry.BB =
nullptr;
828 LeaderListNode *
Next = Curr->Next;
829 Curr->Entry.Val =
Next->Entry.Val;
830 Curr->Entry.BB =
Next->Entry.BB;
831 Curr->Next =
Next->Next;
836void GVNPass::LeaderMap::verifyRemoved(
const Value *V)
const {
839 for (
const auto &
I : NumToLeaders) {
841 assert(
I.second.Entry.Val != V &&
"Inst still in value numbering scope!");
843 std::none_of(leader_iterator(&
I.second), leader_iterator(
nullptr),
844 [=](
const LeaderTableEntry &
E) {
return E.Val ==
V; }) &&
845 "Inst still in value numbering scope!");
866 return Options.AllowLoadPRESplitBackedge.value_or(
893 "On-demand computation of MemSSA implies that MemDep is disabled!");
897 bool Changed = runImpl(
F, AC, DT, TLI,
AA, MemDep, LI, &ORE,
898 MSSA ? &MSSA->getMSSA() :
nullptr);
913 OS, MapClassName2PassName);
916 if (Options.AllowPRE != std::nullopt)
917 OS << (*Options.AllowPRE ?
"" :
"no-") <<
"pre;";
918 if (Options.AllowLoadPRE != std::nullopt)
919 OS << (*Options.AllowLoadPRE ?
"" :
"no-") <<
"load-pre;";
920 if (Options.AllowLoadPRESplitBackedge != std::nullopt)
921 OS << (*Options.AllowLoadPRESplitBackedge ?
"" :
"no-")
922 <<
"split-backedge-load-pre;";
923 if (Options.AllowMemDep != std::nullopt)
924 OS << (*Options.AllowMemDep ?
"" :
"no-") <<
"memdep;";
925 if (Options.AllowMemorySSA != std::nullopt)
926 OS << (*Options.AllowMemorySSA ?
"" :
"no-") <<
"memoryssa";
933 removeInstruction(
I);
936#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
939 for (
const auto &[Num, Exp] : Map) {
940 errs() << Num <<
"\n";
971 std::optional<BasicBlock *> UnavailableBB;
975 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
983 while (!Worklist.
empty()) {
987 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator,
bool>
IV =
995 UnavailableBB = CurrBB;
1006 ++NumNewNewSpeculativelyAvailableBBs;
1012 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
1014 UnavailableBB = CurrBB;
1020 NewSpeculativelyAvailableBBs.
insert(CurrBB);
1026#if LLVM_ENABLE_STATS
1027 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
1028 NumNewNewSpeculativelyAvailableBBs);
1033 auto MarkAsFixpointAndEnqueueSuccessors =
1035 auto It = FullyAvailableBlocks.
find(BB);
1036 if (It == FullyAvailableBlocks.
end())
1043 State = FixpointState;
1046 "Found a speculatively available successor leftover?");
1054 if (UnavailableBB) {
1061 while (!Worklist.
empty())
1062 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
1070 while (!Worklist.
empty())
1071 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
1075 "Must have fixed all the new speculatively available blocks.");
1078 return !UnavailableBB;
1087 if (V.AV.Val == OldValue)
1088 V.AV.Val = NewValue;
1089 if (V.AV.isSelectValue()) {
1090 if (V.AV.V1 == OldValue)
1092 if (V.AV.V2 == OldValue)
1107 if (ValuesPerBlock.
size() == 1 &&
1109 Load->getParent())) {
1110 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
1111 "Dead BB dominate this block");
1112 return ValuesPerBlock[0].MaterializeAdjustedValue(Load);
1118 SSAUpdate.
Initialize(Load->getType(), Load->getName());
1123 if (AV.AV.isUndefValue())
1133 if (BB == Load->getParent() &&
1134 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
1135 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
1148 Type *LoadTy = Load->getType();
1152 if (Res->
getType() != LoadTy) {
1167 Load->getFunction());
1177 if (!CoercedLoad->
hasMetadata(LLVMContext::MD_noundef))
1179 {LLVMContext::MD_dereferenceable,
1180 LLVMContext::MD_dereferenceable_or_null,
1181 LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1197 assert(
V1 &&
V2 &&
"both value operands of the select must be present");
1206 assert(Res &&
"failed to materialize?");
1212 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1229 Value *PtrOp = Load->getPointerOperand();
1235 for (
auto *U : PtrOp->
users()) {
1238 if (
I->getFunction() == Load->getFunction() && DT->
dominates(
I, Load)) {
1256 for (
auto *U : PtrOp->
users()) {
1259 if (
I->getFunction() == Load->getFunction() &&
1267 OtherAccess =
nullptr;
1286 using namespace ore;
1289 R <<
"load of type " << NV(
"Type", Load->getType()) <<
" not eliminated"
1294 R <<
" in favor of " << NV(
"OtherAccess", OtherAccess);
1296 R <<
" because it is clobbered by " << NV(
"ClobberedBy", DepInfo.
getInst());
1310 for (
auto *Inst = BB == FromBB ? From : BB->
getTerminator();
1318 if (
SI->isSimple() &&
SI->getPointerOperand() ==
Loc.Ptr &&
1319 SI->getValueOperand()->getType() == LoadTy)
1320 return SI->getValueOperand();
1324 if (LI->getPointerOperand() ==
Loc.Ptr && LI->getType() == LoadTy)
1330std::optional<AvailableValue>
1331GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1333 assert(
Load->isUnordered() &&
"rules below are incorrect for ordered access");
1338 const DataLayout &
DL =
Load->getDataLayout();
1345 if (
Address &&
Load->isAtomic() <= DepSI->isAtomic()) {
1361 if (DepLoad != Load &&
Address &&
1362 Load->isAtomic() <= DepLoad->isAtomic()) {
1369 DepLoad->getFunction())) {
1370 const auto ClobberOff = MD->getClobberOffset(DepLoad);
1372 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1399 dbgs() <<
" is clobbered by " << *DepInst <<
'\n';);
1403 return std::nullopt;
1412 if (Constant *InitVal =
1422 return std::nullopt;
1425 if (S->isAtomic() <
Load->isAtomic())
1426 return std::nullopt;
1437 return std::nullopt;
1440 if (
LD->isAtomic() <
Load->isAtomic())
1441 return std::nullopt;
1450 assert(Sel->getType() ==
Load->getPointerOperandType());
1456 return std::nullopt;
1461 return std::nullopt;
1469 dbgs() <<
" has unknown def " << *DepInst <<
'\n';);
1470 return std::nullopt;
1473void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1474 AvailValInBlkVect &ValuesPerBlock,
1475 UnavailBlkVect &UnavailableBlocks) {
1480 for (
const auto &Dep : Deps) {
1482 MemDepResult DepInfo = Dep.getResult();
1484 if (DeadBlocks.count(DepBB)) {
1492 UnavailableBlocks.push_back(DepBB);
1499 if (
auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1503 ValuesPerBlock.push_back(
1506 UnavailableBlocks.push_back(DepBB);
1510 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1511 "post condition violation");
1533LoadInst *GVNPass::findLoadToHoistIntoPred(BasicBlock *Pred, BasicBlock *LoadBB,
1537 if (
Term->getNumSuccessors() != 2 ||
Term->isSpecialTerminator())
1539 auto *SuccBB =
Term->getSuccessor(0);
1540 if (SuccBB == LoadBB)
1541 SuccBB =
Term->getSuccessor(1);
1542 if (!SuccBB->getSinglePredecessor())
1546 for (Instruction &Inst : *SuccBB) {
1547 if (Inst.isDebugOrPseudoInst())
1549 if (--NumInsts == 0)
1552 if (!Inst.isIdenticalTo(Load))
1555 MemDepResult Dep = MD->getDependency(&Inst);
1560 if (Dep.
isNonLocal() && !ICF->isDominatedByICFIFromSameBlock(&Inst))
1571void GVNPass::eliminatePartiallyRedundantLoad(
1572 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1573 MapVector<BasicBlock *, Value *> &AvailableLoads,
1574 MapVector<BasicBlock *, LoadInst *> *CriticalEdgePredAndLoad) {
1575 for (
const auto &AvailableLoad : AvailableLoads) {
1576 BasicBlock *UnavailableBlock = AvailableLoad.first;
1577 Value *LoadPtr = AvailableLoad.second;
1579 auto *NewLoad =
new LoadInst(
1580 Load->getType(), LoadPtr,
Load->getName() +
".pre",
Load->isVolatile(),
1581 Load->getAlign(),
Load->getOrdering(),
Load->getSyncScopeID(),
1583 NewLoad->setDebugLoc(
Load->getDebugLoc());
1585 auto *NewAccess = MSSAU->createMemoryAccessInBB(
1588 MSSAU->insertDef(NewDef,
true);
1594 AAMDNodes Tags =
Load->getAAMetadata();
1596 NewLoad->setAAMetadata(Tags);
1598 if (
auto *MD =
Load->getMetadata(LLVMContext::MD_invariant_load))
1599 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1600 if (
auto *InvGroupMD =
Load->getMetadata(LLVMContext::MD_invariant_group))
1601 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1602 if (
auto *RangeMD =
Load->getMetadata(LLVMContext::MD_range))
1603 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1604 if (
auto *NoFPClassMD =
Load->getMetadata(LLVMContext::MD_nofpclass))
1605 NewLoad->setMetadata(LLVMContext::MD_nofpclass, NoFPClassMD);
1607 if (
auto *AccessMD =
Load->getMetadata(LLVMContext::MD_access_group))
1608 if (LI->getLoopFor(
Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1609 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1618 ValuesPerBlock.push_back(
1620 MD->invalidateCachedPointerInfo(LoadPtr);
1625 if (CriticalEdgePredAndLoad) {
1626 auto It = CriticalEdgePredAndLoad->
find(UnavailableBlock);
1627 if (It != CriticalEdgePredAndLoad->
end()) {
1628 ++NumPRELoadMoved2CEPred;
1629 ICF->insertInstructionTo(NewLoad, UnavailableBlock);
1630 LoadInst *OldLoad = It->second;
1634 if (uint32_t ValNo = VN.lookup(OldLoad,
false))
1635 LeaderTable.erase(ValNo, OldLoad, OldLoad->
getParent());
1636 removeInstruction(OldLoad);
1644 ICF->removeUsersOf(Load);
1645 Load->replaceAllUsesWith(V);
1649 I->setDebugLoc(
Load->getDebugLoc());
1650 if (
V->getType()->isPtrOrPtrVectorTy())
1651 MD->invalidateCachedPointerInfo(V);
1653 return OptimizationRemark(
DEBUG_TYPE,
"LoadPRE", Load)
1654 <<
"load eliminated by PRE";
1659bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1660 UnavailBlkVect &UnavailableBlocks) {
1669 SmallPtrSet<BasicBlock *, 4> Blockers(
llvm::from_range, UnavailableBlocks);
1691 bool MustEnsureSafetyOfSpeculativeExecution =
1692 ICF->isDominatedByICFIFromSameBlock(Load);
1696 if (TmpBB == LoadBB)
1698 if (Blockers.count(TmpBB))
1710 MustEnsureSafetyOfSpeculativeExecution =
1711 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1719 MapVector<BasicBlock *, Value *> PredLoads;
1720 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1721 for (
const AvailableValueInBlock &AV : ValuesPerBlock)
1723 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1731 MapVector<BasicBlock *, LoadInst *> CriticalEdgePredAndLoad;
1737 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1738 << Pred->
getName() <<
"': " << *Load <<
'\n');
1749 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1750 << Pred->
getName() <<
"': " << *Load <<
'\n');
1756 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1757 << Pred->
getName() <<
"': " << *Load <<
'\n');
1763 if (DT->dominates(LoadBB, Pred)) {
1766 <<
"COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1767 << Pred->
getName() <<
"': " << *Load <<
'\n');
1771 if (LoadInst *LI = findLoadToHoistIntoPred(Pred, LoadBB, Load))
1772 CriticalEdgePredAndLoad[Pred] = LI;
1777 PredLoads[Pred] =
nullptr;
1782 unsigned NumInsertPreds = PredLoads.
size() + CriticalEdgePredSplit.
size();
1783 unsigned NumUnavailablePreds = NumInsertPreds +
1784 CriticalEdgePredAndLoad.
size();
1785 assert(NumUnavailablePreds != 0 &&
1786 "Fully available value should already be eliminated!");
1787 (void)NumUnavailablePreds;
1793 if (NumInsertPreds > 1)
1798 if (MustEnsureSafetyOfSpeculativeExecution) {
1799 if (CriticalEdgePredSplit.
size())
1803 for (
auto &PL : PredLoads)
1807 for (
auto &CEP : CriticalEdgePredAndLoad)
1814 for (BasicBlock *OrigPred : CriticalEdgePredSplit) {
1815 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1816 assert(!PredLoads.count(OrigPred) &&
"Split edges shouldn't be in map!");
1817 PredLoads[NewPred] =
nullptr;
1818 LLVM_DEBUG(
dbgs() <<
"Split critical edge " << OrigPred->getName() <<
"->"
1819 << LoadBB->
getName() <<
'\n');
1822 for (
auto &CEP : CriticalEdgePredAndLoad)
1823 PredLoads[CEP.first] =
nullptr;
1826 bool CanDoPRE =
true;
1827 const DataLayout &
DL =
Load->getDataLayout();
1828 SmallVector<Instruction*, 8> NewInsts;
1829 for (
auto &PredLoad : PredLoads) {
1830 BasicBlock *UnavailablePred = PredLoad.first;
1840 Value *LoadPtr =
Load->getPointerOperand();
1842 while (Cur != LoadBB) {
1855 LoadPtr =
Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1862 << *
Load->getPointerOperand() <<
"\n");
1867 PredLoad.second = LoadPtr;
1871 while (!NewInsts.
empty()) {
1881 return !CriticalEdgePredSplit.empty();
1887 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOAD: " << *Load <<
'\n');
1889 <<
" INSTS: " << *NewInsts.
back()
1893 for (Instruction *
I : NewInsts) {
1897 I->updateLocationAfterHoist();
1906 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads,
1907 &CriticalEdgePredAndLoad);
1912bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1913 AvailValInBlkVect &ValuesPerBlock,
1914 UnavailBlkVect &UnavailableBlocks) {
1915 const Loop *
L = LI->getLoopFor(
Load->getParent());
1917 if (!L ||
L->getHeader() !=
Load->getParent())
1922 if (!Preheader || !Latch)
1925 Value *LoadPtr =
Load->getPointerOperand();
1927 if (!
L->isLoopInvariant(LoadPtr))
1933 if (ICF->isDominatedByICFIFromSameBlock(Load))
1937 for (
auto *Blocker : UnavailableBlocks) {
1939 if (!
L->contains(Blocker))
1951 if (L != LI->getLoopFor(Blocker))
1959 if (DT->dominates(Blocker, Latch))
1963 if (Blocker->getTerminator()->mayWriteToMemory())
1966 LoopBlock = Blocker;
1978 MapVector<BasicBlock *, Value *> AvailableLoads;
1979 AvailableLoads[LoopBlock] = LoadPtr;
1980 AvailableLoads[Preheader] = LoadPtr;
1982 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOOP LOAD: " << *Load <<
'\n');
1983 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads,
1991 using namespace ore;
1995 <<
"load of type " << NV(
"Type", Load->getType()) <<
" eliminated"
1996 << setExtraArgs() <<
" in favor of "
2003bool GVNPass::processNonLocalLoad(LoadInst *Load) {
2005 if (
Load->getParent()->getParent()->hasFnAttribute(
2006 Attribute::SanitizeAddress) ||
2007 Load->getParent()->getParent()->hasFnAttribute(
2008 Attribute::SanitizeHWAddress))
2013 MD->getNonLocalPointerDependency(Load, Deps);
2018 unsigned NumDeps = Deps.size();
2025 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
2027 dbgs() <<
" has unknown dependencies\n";);
2033 if (GetElementPtrInst *
GEP =
2035 for (Use &U :
GEP->indices())
2041 AvailValInBlkVect ValuesPerBlock;
2042 UnavailBlkVect UnavailableBlocks;
2043 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
2047 if (ValuesPerBlock.empty())
2055 if (UnavailableBlocks.empty()) {
2056 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING NONLOCAL LOAD: " << *Load <<
'\n');
2061 ICF->removeUsersOf(Load);
2062 Load->replaceAllUsesWith(V);
2070 if (
Load->getDebugLoc() &&
Load->getParent() ==
I->getParent())
2071 I->setDebugLoc(
Load->getDebugLoc());
2072 if (
V->getType()->isPtrOrPtrVectorTy())
2073 MD->invalidateCachedPointerInfo(V);
2086 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
2087 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
2093bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
2097 if (
Cond->isZero()) {
2107 const MemoryUseOrDef *FirstNonDom =
nullptr;
2109 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->
getParent());
2116 for (
const auto &Acc : *AL) {
2118 if (!Current->getMemoryInst()->comesBefore(NewS)) {
2119 FirstNonDom = Current;
2126 FirstNonDom ? MSSAU->createMemoryAccessBefore(
2128 const_cast<MemoryUseOrDef *
>(FirstNonDom))
2129 : MSSAU->createMemoryAccessInBB(
2151 return propagateEquality(V, True, IntrinsicI);
2156 I->replaceAllUsesWith(Repl);
2161bool GVNPass::processLoad(LoadInst *L) {
2166 if (!
L->isUnordered())
2169 if (
L->getType()->isTokenLikeTy())
2172 if (
L->use_empty()) {
2178 MemDepResult Dep = MD->getDependency(L);
2182 return processNonLocalLoad(L);
2189 dbgs() <<
"GVN: load ";
L->printAsOperand(
dbgs());
2190 dbgs() <<
" has unknown dependence\n";);
2194 auto AV = AnalyzeLoadAvailability(L, Dep,
L->getPointerOperand());
2198 Value *AvailableValue = AV->MaterializeAdjustedValue(L, L);
2201 ICF->removeUsersOf(L);
2202 L->replaceAllUsesWith(AvailableValue);
2204 MSSAU->removeMemoryAccess(L);
2211 MD->invalidateCachedPointerInfo(AvailableValue);
2217bool GVNPass::processMaskedLoad(IntrinsicInst *
I) {
2220 MemDepResult Dep = MD->getDependency(
I);
2226 Value *Passthrough =
I->getOperand(2);
2230 StoreVal->
getType() !=
I->getType())
2237 ICF->removeUsersOf(
I);
2238 I->replaceAllUsesWith(OpToForward);
2246std::pair<uint32_t, bool>
2247GVNPass::ValueTable::assignExpNewValueNum(
Expression &Exp) {
2248 uint32_t &
E = ExpressionNumbering[
Exp];
2249 bool CreateNewValNum = !
E;
2250 if (CreateNewValNum) {
2251 Expressions.push_back(Exp);
2252 if (ExprIdx.size() < NextValueNumber + 1)
2253 ExprIdx.resize(NextValueNumber * 2);
2254 E = NextValueNumber;
2255 ExprIdx[NextValueNumber++] = NextExprNumber++;
2257 return {
E, CreateNewValNum};
2262bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num,
const BasicBlock *BB,
2265 GVN.LeaderTable.getLeaders(Num),
2273 auto FindRes = PhiTranslateTable.find({Num, Pred});
2274 if (FindRes != PhiTranslateTable.end())
2275 return FindRes->second;
2276 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, GVN);
2277 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2288 auto Leaders = GVN.LeaderTable.getLeaders(Num);
2289 for (
const auto &Entry : Leaders) {
2291 if (
Call &&
Call->getParent() == PhiBlock)
2295 if (
AA->doesNotAccessMemory(
Call))
2298 if (!MD || !
AA->onlyReadsMemory(
Call))
2310 if (
D.getResult().isNonFuncLocal())
2318uint32_t GVNPass::ValueTable::phiTranslateImpl(
const BasicBlock *Pred,
2319 const BasicBlock *PhiBlock,
2323 if (PHINode *PN = NumberingPhi[Num]) {
2324 if (PN->getParent() != PhiBlock)
2326 for (
unsigned I = 0;
I != PN->getNumIncomingValues(); ++
I) {
2327 if (PN->getIncomingBlock(
I) != Pred)
2329 if (uint32_t TransVal =
lookup(PN->getIncomingValue(
I),
false))
2335 if (BasicBlock *BB = NumberingBB[Num]) {
2336 assert(MSSA &&
"NumberingBB is non-empty only when using MemorySSA");
2342 MemoryPhi *MPhi = MSSA->getMemoryAccess(BB);
2348 return lookupOrAdd(PredPhi->getBlock());
2349 if (MSSA->isLiveOnEntryDef(MA))
2354 "CFG/MemorySSA mismatch: predecessor not found among incoming blocks");
2360 if (!areAllValsInBB(Num, PhiBlock, GVN))
2363 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2367 for (
unsigned I = 0;
I <
Exp.VarArgs.size();
I++) {
2371 if ((
I > 1 &&
Exp.Opcode == Instruction::InsertValue) ||
2372 (
I > 0 &&
Exp.Opcode == Instruction::ExtractValue) ||
2373 (
I > 1 &&
Exp.Opcode == Instruction::ShuffleVector))
2375 Exp.VarArgs[
I] = phiTranslate(Pred, PhiBlock,
Exp.VarArgs[
I], GVN);
2378 if (
Exp.Commutative) {
2379 assert(
Exp.VarArgs.size() >= 2 &&
"Unsupported commutative instruction!");
2380 if (
Exp.VarArgs[0] >
Exp.VarArgs[1]) {
2382 uint32_t Opcode =
Exp.Opcode >> 8;
2383 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2384 Exp.Opcode = (Opcode << 8) |
2390 if (uint32_t NewNum = ExpressionNumbering[Exp]) {
2391 if (
Exp.Opcode == Instruction::Call && NewNum != Num)
2392 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, GVN) ? NewNum : Num;
2400void GVNPass::ValueTable::eraseTranslateCacheEntry(
2403 PhiTranslateTable.erase({Num, Pred});
2412 auto Leaders = LeaderTable.getLeaders(Num);
2413 if (Leaders.empty())
2416 Value *Val =
nullptr;
2417 for (
const auto &Entry : Leaders) {
2418 if (DT->dominates(Entry.BB, BB)) {
2438 const BasicBlock *Pred =
E.getEnd()->getSinglePredecessor();
2439 assert((!Pred || Pred ==
E.getStart()) &&
2440 "No edge between these basic blocks!");
2441 return Pred !=
nullptr;
2444void GVNPass::assignBlockRPONumber(Function &
F) {
2445 BlockRPONumber.clear();
2446 uint32_t NextBlockNumber = 1;
2447 ReversePostOrderTraversal<Function *> RPOT(&
F);
2448 for (BasicBlock *BB : RPOT)
2449 BlockRPONumber[BB] = NextBlockNumber++;
2450 InvalidBlockRPONumbers =
false;
2458bool GVNPass::propagateEquality(
2460 const std::variant<BasicBlockEdge, Instruction *> &Root) {
2465 if (
const BasicBlockEdge *
Edge = std::get_if<BasicBlockEdge>(&Root)) {
2472 for (
const auto *Node : DT->getNode(
I->getParent())->children())
2476 while (!Worklist.
empty()) {
2477 std::pair<Value*, Value*> Item = Worklist.
pop_back_val();
2478 LHS = Item.first;
RHS = Item.second;
2492 const DataLayout &
DL =
2501 uint32_t LVN = VN.lookupOrAdd(
LHS);
2506 uint32_t RVN = VN.lookupOrAdd(
RHS);
2523 for (
const BasicBlock *BB : DominatedBlocks)
2524 LeaderTable.insert(LVN,
RHS, BB);
2531 auto CanReplacePointersCallBack = [&
DL](
const Use &
U,
const Value *To) {
2534 unsigned NumReplacements;
2535 if (
const BasicBlockEdge *
Edge = std::get_if<BasicBlockEdge>(&Root))
2537 LHS,
RHS, *DT, *
Edge, CanReplacePointersCallBack);
2540 LHS,
RHS, *DT, std::get<Instruction *>(Root),
2541 CanReplacePointersCallBack);
2543 if (NumReplacements > 0) {
2545 NumGVNEqProp += NumReplacements;
2548 MD->invalidateCachedPointerInfo(
LHS);
2565 bool IsKnownFalse = !IsKnownTrue;
2581 Value *Op0 =
Cmp->getOperand(0), *Op1 =
Cmp->getOperand(1);
2586 if (
Cmp->isEquivalence(IsKnownFalse))
2587 Worklist.
push_back(std::make_pair(Op0, Op1));
2591 Constant *NotVal = ConstantInt::get(
Cmp->getType(), IsKnownFalse);
2595 uint32_t NextNum = VN.getNextUnusedValueNumber();
2596 uint32_t Num = VN.lookupOrAddCmp(
Cmp->getOpcode(), NotPred, Op0, Op1);
2599 if (Num < NextNum) {
2600 for (
const auto &Entry : LeaderTable.getLeaders(Num)) {
2605 if (
const BasicBlockEdge *
Edge = std::get_if<BasicBlockEdge>(&Root)) {
2606 if (!DT->dominates(
Entry.BB,
Edge->getStart()) &&
2607 !DT->dominates(
Edge->getEnd(),
Entry.BB))
2610 auto *InstBB = std::get<Instruction *>(Root)->getParent();
2611 if (!DT->dominates(
Entry.BB, InstBB) &&
2612 !DT->dominates(InstBB,
Entry.BB))
2618 unsigned NumReplacements;
2619 if (
const BasicBlockEdge *
Edge = std::get_if<BasicBlockEdge>(&Root))
2624 NotCmp, NotVal, *DT, std::get<Instruction *>(Root));
2625 Changed |= NumReplacements > 0;
2626 NumGVNEqProp += NumReplacements;
2629 MD->invalidateCachedPointerInfo(NotCmp);
2637 for (
const BasicBlock *BB : DominatedBlocks)
2638 LeaderTable.insert(Num, NotVal, BB);
2647 Worklist.
emplace_back(
A, ConstantInt::get(
A->getType(), IsKnownTrue));
2652 Worklist.
emplace_back(
A, ConstantInt::get(
A->getType(), !IsKnownTrue));
2662bool GVNPass::processInstruction(Instruction *
I) {
2667 const DataLayout &
DL =
I->getDataLayout();
2670 if (!
I->use_empty()) {
2673 ICF->removeUsersOf(
I);
2674 I->replaceAllUsesWith(V);
2682 if (MD &&
V->getType()->isPtrOrPtrVectorTy())
2683 MD->invalidateCachedPointerInfo(V);
2690 return processAssumeIntrinsic(Assume);
2693 if (processLoad(Load))
2696 unsigned Num = VN.lookupOrAdd(Load);
2697 LeaderTable.insert(Num, Load,
Load->getParent());
2708 if (!BI->isConditional())
2712 return processFoldableCondBr(BI);
2714 Value *BranchCond = BI->getCondition();
2718 if (TrueSucc == FalseSucc)
2725 BasicBlockEdge TrueE(Parent, TrueSucc);
2726 Changed |= propagateEquality(BranchCond, TrueVal, TrueE);
2729 BasicBlockEdge FalseE(Parent, FalseSucc);
2730 Changed |= propagateEquality(BranchCond, FalseVal, FalseE);
2737 Value *SwitchCond =
SI->getCondition();
2742 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2744 ++SwitchEdges[Succ];
2746 for (
const auto &Case :
SI->cases()) {
2749 if (SwitchEdges.
lookup(Dst) == 1) {
2750 BasicBlockEdge
E(Parent, Dst);
2751 Changed |= propagateEquality(SwitchCond, Case.getCaseValue(),
E);
2759 if (
I->getType()->isVoidTy())
2762 uint32_t NextNum = VN.getNextUnusedValueNumber();
2763 unsigned Num = VN.lookupOrAdd(
I);
2768 LeaderTable.insert(Num,
I,
I->getParent());
2775 if (Num >= NextNum) {
2776 LeaderTable.insert(Num,
I,
I->getParent());
2782 Value *Repl = findLeader(
I->getParent(), Num);
2785 LeaderTable.insert(Num,
I,
I->getParent());
2798 MD->invalidateCachedPointerInfo(Repl);
2804bool GVNPass::runImpl(Function &
F, AssumptionCache &RunAC, DominatorTree &RunDT,
2805 const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2806 MemoryDependenceResults *RunMD, LoopInfo &LI,
2807 OptimizationRemarkEmitter *RunORE,
MemorySSA *MSSA) {
2812 VN.setAliasAnalysis(&RunAA);
2814 ImplicitControlFlowTracking ImplicitCFT;
2818 VN.setMemorySSA(MSSA);
2820 InvalidBlockRPONumbers =
true;
2821 MemorySSAUpdater Updater(MSSA);
2822 MSSAU = MSSA ? &Updater :
nullptr;
2825 bool ShouldContinue =
true;
2827 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2839 unsigned Iteration = 0;
2840 while (ShouldContinue) {
2843 ShouldContinue = iterateOnFunction(
F);
2851 assignValNumForDeadCode();
2852 bool PREChanged =
true;
2853 while (PREChanged) {
2854 PREChanged = performPRE(
F);
2864 cleanupGlobalSets();
2875bool GVNPass::processBlock(BasicBlock *BB) {
2876 if (DeadBlocks.count(BB))
2879 bool ChangedFunction =
false;
2885 SmallPtrSet<PHINode *, 8> PHINodesToRemove;
2887 for (PHINode *PN : PHINodesToRemove) {
2888 removeInstruction(PN);
2891 ChangedFunction |= processInstruction(&Inst);
2892 return ChangedFunction;
2896bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2897 BasicBlock *Curr,
unsigned int ValNo) {
2903 for (
unsigned I = 0,
E =
Instr->getNumOperands();
I !=
E; ++
I) {
2911 if (!VN.exists(
Op)) {
2916 VN.phiTranslate(Pred, Curr, VN.lookup(
Op), *
this);
2917 if (
Value *V = findLeader(Pred, TValNo)) {
2935 ICF->insertInstructionTo(Instr, Pred);
2937 unsigned Num = VN.lookupOrAdd(Instr);
2941 LeaderTable.insert(Num, Instr, Pred);
2945bool GVNPass::performScalarPRE(Instruction *CurInst) {
2971 if (CallB->isInlineAsm())
2978 if (
II->getIntrinsicID() == Intrinsic::protected_field_ptr)
2981 uint32_t ValNo = VN.lookup(CurInst);
2989 unsigned NumWith = 0;
2990 unsigned NumWithout = 0;
2995 if (InvalidBlockRPONumbers)
2996 assignBlockRPONumber(*CurrentBlock->
getParent());
3002 if (!DT->isReachableFromEntry(
P)) {
3007 assert(BlockRPONumber.count(
P) && BlockRPONumber.count(CurrentBlock) &&
3008 "Invalid BlockRPONumber map.");
3009 if (BlockRPONumber[
P] >= BlockRPONumber[CurrentBlock]) {
3014 uint32_t TValNo = VN.phiTranslate(
P, CurrentBlock, ValNo, *
this);
3015 Value *PredV = findLeader(
P, TValNo);
3020 }
else if (PredV == CurInst) {
3032 if (NumWithout > 1 || NumWith == 0)
3040 if (NumWithout != 0) {
3046 if (ICF->isDominatedByICFIFromSameBlock(CurInst))
3059 ToSplit.push_back(std::make_pair(PREPred->
getTerminator(), SuccNum));
3063 PREInstr = CurInst->
clone();
3064 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
3067 verifyRemoved(PREInstr);
3076 assert(PREInstr !=
nullptr || NumWithout == 0);
3082 CurInst->
getName() +
".pre-phi");
3083 Phi->insertBefore(CurrentBlock->begin());
3084 for (
auto &[V, BB] : PredMap) {
3089 Phi->addIncoming(V, BB);
3091 Phi->addIncoming(PREInstr, PREPred);
3097 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
3098 LeaderTable.insert(ValNo, Phi, CurrentBlock);
3101 if (MD &&
Phi->getType()->isPtrOrPtrVectorTy())
3102 MD->invalidateCachedPointerInfo(Phi);
3103 LeaderTable.erase(ValNo, CurInst, CurrentBlock);
3106 removeInstruction(CurInst);
3113bool GVNPass::performPRE(Function &
F) {
3115 for (BasicBlock *CurrentBlock :
depth_first(&
F.getEntryBlock())) {
3117 if (CurrentBlock == &
F.getEntryBlock())
3121 if (CurrentBlock->isEHPad())
3125 BE = CurrentBlock->end();
3128 Changed |= performScalarPRE(CurInst);
3132 if (splitCriticalEdges())
3140BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
3145 CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
3148 MD->invalidateCachedPredecessors();
3149 InvalidBlockRPONumbers =
true;
3156bool GVNPass::splitCriticalEdges() {
3157 if (ToSplit.empty())
3162 std::pair<Instruction *, unsigned>
Edge = ToSplit.pop_back_val();
3164 CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
3166 }
while (!ToSplit.empty());
3169 MD->invalidateCachedPredecessors();
3170 InvalidBlockRPONumbers =
true;
3176bool GVNPass::iterateOnFunction(Function &
F) {
3177 cleanupGlobalSets();
3184 ReversePostOrderTraversal<Function *> RPOT(&
F);
3186 for (BasicBlock *BB : RPOT)
3192void GVNPass::cleanupGlobalSets() {
3194 LeaderTable.clear();
3195 BlockRPONumber.clear();
3197 InvalidBlockRPONumbers =
true;
3200void GVNPass::removeInstruction(Instruction *
I) {
3202 if (MD) MD->removeInstruction(
I);
3204 MSSAU->removeMemoryAccess(
I);
3208 ICF->removeInstruction(
I);
3209 I->eraseFromParent();
3215void GVNPass::verifyRemoved(
const Instruction *Inst)
const {
3216 VN.verifyRemoved(Inst);
3217 LeaderTable.verifyRemoved(Inst);
3224void GVNPass::addDeadBlock(BasicBlock *BB) {
3226 SmallSetVector<BasicBlock *, 4>
DF;
3229 while (!NewDead.
empty()) {
3231 if (DeadBlocks.count(
D))
3235 SmallVector<BasicBlock *, 8> Dom;
3236 DT->getDescendants(
D, Dom);
3237 DeadBlocks.insert_range(Dom);
3240 for (BasicBlock *
B : Dom) {
3242 if (DeadBlocks.count(S))
3245 bool AllPredDead =
true;
3247 if (!DeadBlocks.count(
P)) {
3248 AllPredDead =
false;
3268 for (BasicBlock *
B :
DF) {
3269 if (DeadBlocks.count(
B))
3275 for (BasicBlock *
P : Preds) {
3276 if (!DeadBlocks.count(
P))
3281 if (BasicBlock *S = splitCriticalEdges(
P,
B))
3282 DeadBlocks.insert(
P = S);
3288 if (!DeadBlocks.count(
P))
3290 for (PHINode &Phi :
B->phis()) {
3293 MD->invalidateCachedPointerInfo(&Phi);
3312bool GVNPass::processFoldableCondBr(BranchInst *BI) {
3326 if (DeadBlocks.count(DeadRoot))
3330 DeadRoot = splitCriticalEdges(BI->
getParent(), DeadRoot);
3332 addDeadBlock(DeadRoot);
3340void GVNPass::assignValNumForDeadCode() {
3341 for (BasicBlock *BB : DeadBlocks) {
3342 for (Instruction &Inst : *BB) {
3343 unsigned ValNum = VN.lookupOrAdd(&Inst);
3344 LeaderTable.insert(ValNum, &Inst, BB);
3356 .setMemDep(MemDepAnalysis)
3357 .setMemorySSA(MemSSAAnalysis)) {
3366 if (Impl.isMemorySSAEnabled() && !MSSAWP)
3369 return Impl.runImpl(
3374 Impl.isMemDepEnabled()
3379 MSSAWP ? &MSSAWP->getMSSA() :
nullptr);
3387 if (Impl.isMemDepEnabled())
3396 if (Impl.isMemorySSAEnabled())
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
early cse Early CSE w MemorySSA
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, const DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
static cl::opt< uint32_t > MaxNumInsnsPerBlock("gvn-max-num-insns", cl::Hidden, cl::init(100), cl::desc("Max number of instructions to scan in each basic block in GVN " "(default = 100)"))
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static const Instruction * findMayClobberedPtrAccess(LoadInst *Load, const DominatorTree *DT)
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static cl::opt< bool > GVNEnableMemorySSA("enable-gvn-memoryssa", cl::init(false))
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, const DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
static bool isLifetimeStart(const Instruction *Inst)
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
static void replaceValuesPerBlockEntry(SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, Value *OldValue, Value *NewValue)
If the specified OldValue exists in ValuesPerBlock, replace its value with NewValue.
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &GVN)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
@ Unavailable
We know the block is not fully available. This is a fixpoint.
@ Available
We know the block is fully available. This is a fixpoint.
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
uint64_t IntrinsicInst * II
ppc ctr loops PowerPC CTR Loops Verify
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
bool isEHPad() const
Return true if this basic block is an exception handling block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Analysis pass which computes a DominatorTree.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Class representing an expression and its matching format.
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
LLVM_ABI uint32_t lookupOrAdd(MemoryAccess *MA)
The core GVN pass object.
friend class gvn::GVNLegacyPass
LLVM_ABI bool isPREEnabled() const
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
LLVM_ABI void salvageAndRemoveInstruction(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
AAResults * getAliasAnalysis() const
LLVM_ABI bool isLoadPREEnabled() const
GVNPass(GVNOptions Options={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI bool isMemorySSAEnabled() const
DominatorTree & getDominatorTree() const
LLVM_ABI bool isLoadInLoopPREEnabled() const
LLVM_ABI bool isLoadPRESplitBackedgeEnabled() const
LLVM_ABI bool isMemDepEnabled() const
Legacy wrapper pass to provide the GlobalsAAResult object.
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
LLVM_ABI bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
LLVM_ABI void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs={})
Drop all unknown metadata except for debug locations.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
The legacy pass manager's analysis pass to compute loop information.
iterator find(const KeyT &Key)
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
BasicBlock * getBlock() const
An analysis that produces MemoryDependenceResults for a function.
std::vector< NonLocalDepEntry > NonLocalDepInfo
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
An analysis that produces MemorySSA for a function.
Legacy analysis pass which computes MemorySSA.
LLVM_ABI void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
This is an entry in the NonLocalDepInfo cache.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Helper class for SSA formation on a set of values defined in multiple blocks.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
This class represents the LLVM 'select' instruction.
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
SmallVector & operator=(const SmallVector &RHS)
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
bool hasUseList() const
Check if this Value has a use-list.
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
LLVM_ABI void deleteValue()
Delete a pointer to a generic Value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
An efficient, type-erasing, non-owning reference to a callable.
GVNLegacyPass(bool MemDepAnalysis=GVNEnableMemDep, bool MemSSAAnalysis=GVNEnableMemorySSA)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
An opaque object representing a hash code.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_MaskedStore(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
Matches MaskedStore Intrinsic.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
Value * getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, Function *F)
If analyzeLoadFromClobberingStore/Load returned an offset, this function can be used to actually perf...
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, Function *F)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
initializer< Ty > init(const Ty &Val)
A private "module" namespace for types and utilities used by GVN.
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
NodeAddr< UseNode * > Use
NodeAddr< NodeBase * > Node
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
hash_code hash_value(const FixedPointSemantics &Val)
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
LLVM_ABI unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
LLVM_ABI unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
constexpr from_range_t from_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
bool isModSet(const ModRefInfo MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
LLVM_ABI void initializeGVNLegacyPassPass(PassRegistry &)
LLVM_ABI unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
LLVM_ABI BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
LLVM_ABI bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
constexpr unsigned BitWidth
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool pred_empty(const BasicBlock *BB)
iterator_range< df_iterator< T > > depth_first(const T &G)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
LLVM_ABI bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
LLVM_ABI bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static GVNPass::Expression getTombstoneKey()
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
static GVNPass::Expression getEmptyKey()
static unsigned getHashValue(const GVNPass::Expression &E)
An information struct used to provide DenseMap with the various necessary components for a given valu...
A set of parameters to control various transforms performed by GVN pass.
bool operator==(const Expression &Other) const
friend hash_code hash_value(const Expression &Value)
SmallVector< uint32_t, 4 > VarArgs
Expression(uint32_t Op=~2U)
A CRTP mix-in to automatically provide informational APIs needed for passes.
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
static AvailableValueInBlock getUndef(BasicBlock *BB)
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
AvailableValue AV
AV - The actual available value.
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
BasicBlock * BB
BB - The basic block in question.
Value * MaterializeAdjustedValue(LoadInst *Load) const
Emit code at the end of this block to adjust the value defined here to the specified type.
Represents a particular available value that we know how to materialize.
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
bool isSimpleValue() const
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
bool isCoercedLoadValue() const
static AvailableValue get(Value *V, unsigned Offset=0)
ValType Kind
Kind of the live-out value.
LoadInst * getCoercedLoadValue() const
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
bool isUndefValue() const
bool isSelectValue() const
Value * Val
Val - The value that is live out of the block.
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
static AvailableValue getUndef()
SelectInst * getSelectValue() const
Value * getSimpleValue() const
bool isMemIntrinValue() const
MemIntrinsic * getMemIntrinValue() const
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.