88#define DEBUG_TYPE "gvn"
90STATISTIC(NumGVNInstr,
"Number of instructions deleted");
92STATISTIC(NumGVNPRE,
"Number of instructions PRE'd");
94STATISTIC(NumGVNSimpl,
"Number of instructions simplified");
95STATISTIC(NumGVNEqProp,
"Number of equalities propagated");
97STATISTIC(NumPRELoopLoad,
"Number of loop loads PRE'd");
99 "Number of loads moved to predecessor of a critical edge in PRE");
101STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
102 "Number of blocks speculated as available in "
103 "IsValueFullyAvailableInBlock(), max");
105 "Number of times we we reached gvn-max-block-speculations cut-off "
106 "preventing further exploration");
121 cl::desc(
"Max number of dependences to attempt Load PRE (default = 100)"));
126 cl::desc(
"Max number of blocks we're willing to speculate on (and recurse "
127 "into) when deducing if a value is fully available or not in GVN "
132 cl::desc(
"Max number of visited instructions when trying to find "
133 "dominating value of select dependency (default = 100)"));
137 cl::desc(
"Max number of instructions to scan in each basic block in GVN "
161 if ((!
Attrs.isEmpty() || !
Other.Attrs.isEmpty()) &&
162 !
Attrs.intersectWith(
Ty->getContext(),
Other.Attrs).has_value())
303 Res.
AV = std::move(
AV);
324 return AV.MaterializeAdjustedValue(Load,
BB->getTerminator());
335 E.Opcode =
I->getOpcode();
340 E.VarArgs.push_back(
lookupOrAdd(GCR->getOperand(0)));
341 E.VarArgs.push_back(
lookupOrAdd(GCR->getBasePtr()));
342 E.VarArgs.push_back(
lookupOrAdd(GCR->getDerivedPtr()));
344 for (
Use &
Op :
I->operands())
347 if (
I->isCommutative()) {
352 assert(
I->getNumOperands() >= 2 &&
"Unsupported commutative instruction!");
353 if (
E.VarArgs[0] >
E.VarArgs[1])
355 E.Commutative =
true;
361 if (
E.VarArgs[0] >
E.VarArgs[1]) {
366 E.Commutative =
true;
368 E.VarArgs.append(IVI->idx_begin(), IVI->idx_end());
370 ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
371 E.VarArgs.append(ShuffleMask.
begin(), ShuffleMask.
end());
373 E.Attrs = CB->getAttributes();
379GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
381 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
382 "Not a comparison!");
385 E.VarArgs.push_back(lookupOrAdd(
LHS));
386 E.VarArgs.push_back(lookupOrAdd(
RHS));
389 if (
E.VarArgs[0] >
E.VarArgs[1]) {
393 E.Opcode = (Opcode << 8) | Predicate;
394 E.Commutative =
true;
399GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
400 assert(EI &&
"Not an ExtractValueInst?");
411 E.VarArgs.push_back(lookupOrAdd(WO->
getLHS()));
412 E.VarArgs.push_back(lookupOrAdd(WO->
getRHS()));
420 E.VarArgs.push_back(lookupOrAdd(
Op));
427GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *
GEP) {
429 Type *PtrTy =
GEP->getType()->getScalarType();
430 const DataLayout &
DL =
GEP->getDataLayout();
431 unsigned BitWidth =
DL.getIndexTypeSizeInBits(PtrTy);
432 SmallMapVector<Value *, APInt, 4> VariableOffsets;
434 if (
GEP->collectOffset(
DL,
BitWidth, VariableOffsets, ConstantOffset)) {
438 E.Opcode =
GEP->getOpcode();
440 E.VarArgs.push_back(lookupOrAdd(
GEP->getPointerOperand()));
441 for (
const auto &[V, Scale] : VariableOffsets) {
442 E.VarArgs.push_back(lookupOrAdd(V));
443 E.VarArgs.push_back(lookupOrAdd(ConstantInt::get(
Context, Scale)));
445 if (!ConstantOffset.isZero())
447 lookupOrAdd(ConstantInt::get(
Context, ConstantOffset)));
451 E.Opcode =
GEP->getOpcode();
452 E.Ty =
GEP->getSourceElementType();
453 for (Use &
Op :
GEP->operands())
454 E.VarArgs.push_back(lookupOrAdd(
Op));
463GVNPass::ValueTable::ValueTable() =
default;
464GVNPass::ValueTable::ValueTable(
const ValueTable &) =
default;
465GVNPass::ValueTable::ValueTable(
ValueTable &&) =
default;
466GVNPass::ValueTable::~ValueTable() =
default;
472 ValueNumbering.
insert(std::make_pair(V, Num));
474 NumberingPhi[Num] = PN;
484 assert(MSSA &&
"addMemoryStateToExp should not be called without MemorySSA");
485 assert(MSSA->getMemoryAccess(
I) &&
"Instruction does not access memory");
486 MemoryAccess *MA = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(
I);
487 Exp.VarArgs.push_back(lookupOrAdd(MA));
498 if (
C->getFunction()->isPresplitCoroutine()) {
499 ValueNumbering[
C] = NextValueNumber;
500 return NextValueNumber++;
506 if (
C->isConvergent()) {
507 ValueNumbering[
C] = NextValueNumber;
508 return NextValueNumber++;
511 if (AA->doesNotAccessMemory(
C)) {
513 uint32_t
E = assignExpNewValueNum(Exp).first;
514 ValueNumbering[
C] =
E;
518 if (MD && AA->onlyReadsMemory(
C)) {
520 auto [
E, IsValNumNew] = assignExpNewValueNum(Exp);
522 ValueNumbering[
C] =
E;
526 MemDepResult LocalDep = MD->getDependency(
C);
529 ValueNumbering[
C] = NextValueNumber;
530 return NextValueNumber++;
533 if (LocalDep.
isDef()) {
538 if (!LocalDepCall || LocalDepCall->
arg_size() !=
C->arg_size()) {
539 ValueNumbering[
C] = NextValueNumber;
540 return NextValueNumber++;
543 for (
unsigned I = 0,
E =
C->arg_size();
I <
E; ++
I) {
544 uint32_t CVN = lookupOrAdd(
C->getArgOperand(
I));
545 uint32_t LocalDepCallVN = lookupOrAdd(LocalDepCall->
getArgOperand(
I));
546 if (CVN != LocalDepCallVN) {
547 ValueNumbering[
C] = NextValueNumber;
548 return NextValueNumber++;
552 uint32_t
V = lookupOrAdd(LocalDepCall);
553 ValueNumbering[
C] =
V;
559 MD->getNonLocalCallDependency(
C);
561 CallInst *CDep =
nullptr;
565 for (
const NonLocalDepEntry &
I : Deps) {
566 if (
I.getResult().isNonLocal())
571 if (!
I.getResult().isDef() || CDep !=
nullptr) {
578 if (NonLocalDepCall && DT->properlyDominates(
I.getBB(),
C->getParent())) {
579 CDep = NonLocalDepCall;
588 ValueNumbering[
C] = NextValueNumber;
589 return NextValueNumber++;
593 ValueNumbering[
C] = NextValueNumber;
594 return NextValueNumber++;
596 for (
unsigned I = 0,
E =
C->arg_size();
I <
E; ++
I) {
597 uint32_t CVN = lookupOrAdd(
C->getArgOperand(
I));
600 ValueNumbering[
C] = NextValueNumber;
601 return NextValueNumber++;
605 uint32_t
V = lookupOrAdd(CDep);
606 ValueNumbering[
C] =
V;
610 if (MSSA && IsMSSAEnabled && AA->onlyReadsMemory(
C)) {
612 addMemoryStateToExp(
C, Exp);
613 auto [
V,
_] = assignExpNewValueNum(Exp);
614 ValueNumbering[
C] =
V;
618 ValueNumbering[
C] = NextValueNumber;
619 return NextValueNumber++;
623uint32_t GVNPass::ValueTable::computeLoadStoreVN(Instruction *
I) {
624 if (!MSSA || !IsMSSAEnabled) {
625 ValueNumbering[
I] = NextValueNumber;
626 return NextValueNumber++;
630 Exp.Ty =
I->getType();
631 Exp.Opcode =
I->getOpcode();
632 for (Use &
Op :
I->operands())
633 Exp.VarArgs.push_back(lookupOrAdd(
Op));
634 addMemoryStateToExp(
I, Exp);
636 auto [
V,
_] = assignExpNewValueNum(Exp);
637 ValueNumbering[
I] =
V;
642bool GVNPass::ValueTable::exists(
Value *V)
const {
643 return ValueNumbering.contains(V);
656 if (VI != ValueNumbering.end())
661 ValueNumbering[V] = NextValueNumber;
664 return NextValueNumber++;
668 switch (
I->getOpcode()) {
669 case Instruction::Call:
671 case Instruction::FNeg:
672 case Instruction::Add:
673 case Instruction::FAdd:
674 case Instruction::Sub:
675 case Instruction::FSub:
676 case Instruction::Mul:
677 case Instruction::FMul:
678 case Instruction::UDiv:
679 case Instruction::SDiv:
680 case Instruction::FDiv:
681 case Instruction::URem:
682 case Instruction::SRem:
683 case Instruction::FRem:
684 case Instruction::Shl:
685 case Instruction::LShr:
686 case Instruction::AShr:
687 case Instruction::And:
688 case Instruction::Or:
689 case Instruction::Xor:
690 case Instruction::ICmp:
691 case Instruction::FCmp:
692 case Instruction::Trunc:
693 case Instruction::ZExt:
694 case Instruction::SExt:
695 case Instruction::FPToUI:
696 case Instruction::FPToSI:
697 case Instruction::UIToFP:
698 case Instruction::SIToFP:
699 case Instruction::FPTrunc:
700 case Instruction::FPExt:
701 case Instruction::PtrToInt:
702 case Instruction::IntToPtr:
703 case Instruction::AddrSpaceCast:
704 case Instruction::BitCast:
705 case Instruction::Select:
706 case Instruction::Freeze:
707 case Instruction::ExtractElement:
708 case Instruction::InsertElement:
709 case Instruction::ShuffleVector:
710 case Instruction::InsertValue:
713 case Instruction::GetElementPtr:
716 case Instruction::ExtractValue:
719 case Instruction::PHI:
720 ValueNumbering[V] = NextValueNumber;
722 return NextValueNumber++;
723 case Instruction::Load:
724 case Instruction::Store:
725 return computeLoadStoreVN(
I);
727 ValueNumbering[V] = NextValueNumber;
728 return NextValueNumber++;
731 uint32_t E = assignExpNewValueNum(Exp).first;
732 ValueNumbering[V] = E;
741 assert(VI != ValueNumbering.end() &&
"Value not numbered?");
744 return (VI != ValueNumbering.end()) ? VI->second : 0;
751uint32_t GVNPass::ValueTable::lookupOrAddCmp(
unsigned Opcode,
754 Expression Exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
755 return assignExpNewValueNum(Exp).first;
760 ValueNumbering.clear();
761 ExpressionNumbering.clear();
762 NumberingPhi.clear();
764 PhiTranslateTable.clear();
773 uint32_t Num = ValueNumbering.lookup(V);
774 ValueNumbering.erase(V);
777 NumberingPhi.erase(Num);
779 NumberingBB.erase(Num);
784void GVNPass::ValueTable::verifyRemoved(
const Value *V)
const {
785 assert(!ValueNumbering.contains(V) &&
786 "Inst still occurs in value numbering map!");
795 LeaderListNode &Curr = NumToLeaders[
N];
796 if (!Curr.Entry.Val) {
802 LeaderListNode *
Node = TableAllocator.Allocate<LeaderListNode>();
805 Node->Next = Curr.Next;
813 LeaderListNode *Prev =
nullptr;
814 LeaderListNode *Curr = &NumToLeaders[
N];
816 while (Curr && (Curr->Entry.Val !=
I || Curr->Entry.BB != BB)) {
825 Prev->Next = Curr->Next;
828 Curr->Entry.Val =
nullptr;
829 Curr->Entry.BB =
nullptr;
831 LeaderListNode *
Next = Curr->Next;
832 Curr->Entry.Val =
Next->Entry.Val;
833 Curr->Entry.BB =
Next->Entry.BB;
834 Curr->Next =
Next->Next;
839void GVNPass::LeaderMap::verifyRemoved(
const Value *V)
const {
842 for (
const auto &
I : NumToLeaders) {
844 assert(
I.second.Entry.Val != V &&
"Inst still in value numbering scope!");
846 std::none_of(leader_iterator(&
I.second), leader_iterator(
nullptr),
847 [=](
const LeaderTableEntry &
E) {
return E.Val ==
V; }) &&
848 "Inst still in value numbering scope!");
869 return Options.AllowLoadPRESplitBackedge.value_or(
896 "On-demand computation of MemSSA implies that MemDep is disabled!");
900 bool Changed = runImpl(
F, AC, DT, TLI,
AA, MemDep, LI, &ORE,
901 MSSA ? &MSSA->getMSSA() :
nullptr);
916 OS, MapClassName2PassName);
919 if (Options.AllowPRE != std::nullopt)
920 OS << (*Options.AllowPRE ?
"" :
"no-") <<
"pre;";
921 if (Options.AllowLoadPRE != std::nullopt)
922 OS << (*Options.AllowLoadPRE ?
"" :
"no-") <<
"load-pre;";
923 if (Options.AllowLoadPRESplitBackedge != std::nullopt)
924 OS << (*Options.AllowLoadPRESplitBackedge ?
"" :
"no-")
925 <<
"split-backedge-load-pre;";
926 if (Options.AllowMemDep != std::nullopt)
927 OS << (*Options.AllowMemDep ?
"" :
"no-") <<
"memdep;";
928 if (Options.AllowMemorySSA != std::nullopt)
929 OS << (*Options.AllowMemorySSA ?
"" :
"no-") <<
"memoryssa";
936 removeInstruction(
I);
939#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
942 for (
const auto &[Num, Exp] : Map) {
943 errs() << Num <<
"\n";
974 std::optional<BasicBlock *> UnavailableBB;
978 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
986 while (!Worklist.
empty()) {
990 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator,
bool>
IV =
998 UnavailableBB = CurrBB;
1009 ++NumNewNewSpeculativelyAvailableBBs;
1015 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
1017 UnavailableBB = CurrBB;
1023 NewSpeculativelyAvailableBBs.
insert(CurrBB);
1029#if LLVM_ENABLE_STATS
1030 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
1031 NumNewNewSpeculativelyAvailableBBs);
1036 auto MarkAsFixpointAndEnqueueSuccessors =
1038 auto It = FullyAvailableBlocks.
find(BB);
1039 if (It == FullyAvailableBlocks.
end())
1046 State = FixpointState;
1049 "Found a speculatively available successor leftover?");
1057 if (UnavailableBB) {
1064 while (!Worklist.
empty())
1065 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
1073 while (!Worklist.
empty())
1074 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
1078 "Must have fixed all the new speculatively available blocks.");
1081 return !UnavailableBB;
1090 if (V.AV.Val == OldValue)
1091 V.AV.Val = NewValue;
1092 if (V.AV.isSelectValue()) {
1093 if (V.AV.V1 == OldValue)
1095 if (V.AV.V2 == OldValue)
1110 if (ValuesPerBlock.
size() == 1 &&
1112 Load->getParent())) {
1113 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
1114 "Dead BB dominate this block");
1115 return ValuesPerBlock[0].MaterializeAdjustedValue(Load);
1121 SSAUpdate.
Initialize(Load->getType(), Load->getName());
1126 if (AV.AV.isUndefValue())
1136 if (BB == Load->getParent() &&
1137 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
1138 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
1151 Type *LoadTy = Load->getType();
1155 if (Res->
getType() != LoadTy) {
1170 Load->getFunction());
1180 if (!CoercedLoad->
hasMetadata(LLVMContext::MD_noundef))
1182 {LLVMContext::MD_dereferenceable,
1183 LLVMContext::MD_dereferenceable_or_null,
1184 LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1200 assert(
V1 &&
V2 &&
"both value operands of the select must be present");
1209 assert(Res &&
"failed to materialize?");
1215 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1232 Value *PtrOp = Load->getPointerOperand();
1238 for (
auto *U : PtrOp->
users()) {
1241 if (
I->getFunction() == Load->getFunction() && DT->
dominates(
I, Load)) {
1259 for (
auto *U : PtrOp->
users()) {
1262 if (
I->getFunction() == Load->getFunction() &&
1270 OtherAccess =
nullptr;
1289 using namespace ore;
1292 R <<
"load of type " << NV(
"Type", Load->getType()) <<
" not eliminated"
1297 R <<
" in favor of " << NV(
"OtherAccess", OtherAccess);
1299 R <<
" because it is clobbered by " << NV(
"ClobberedBy", DepInfo.
getInst());
1312 for (
auto *Inst = BB == FromBB ? From : BB->
getTerminator();
1320 if (LI->getPointerOperand() ==
Loc.Ptr && LI->getType() == LoadTy)
1326std::optional<AvailableValue>
1327GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1329 assert(
Load->isUnordered() &&
"rules below are incorrect for ordered access");
1334 const DataLayout &
DL =
Load->getDataLayout();
1341 if (
Address &&
Load->isAtomic() <= DepSI->isAtomic()) {
1357 if (DepLoad != Load &&
Address &&
1358 Load->isAtomic() <= DepLoad->isAtomic()) {
1365 DepLoad->getFunction())) {
1366 const auto ClobberOff = MD->getClobberOffset(DepLoad);
1368 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1395 dbgs() <<
" is clobbered by " << *DepInst <<
'\n';);
1399 return std::nullopt;
1408 if (Constant *InitVal =
1418 return std::nullopt;
1421 if (S->isAtomic() <
Load->isAtomic())
1422 return std::nullopt;
1433 return std::nullopt;
1436 if (
LD->isAtomic() <
Load->isAtomic())
1437 return std::nullopt;
1446 assert(Sel->getType() ==
Load->getPointerOperandType());
1452 return std::nullopt;
1457 return std::nullopt;
1465 dbgs() <<
" has unknown def " << *DepInst <<
'\n';);
1466 return std::nullopt;
1469void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1470 AvailValInBlkVect &ValuesPerBlock,
1471 UnavailBlkVect &UnavailableBlocks) {
1476 for (
const auto &Dep : Deps) {
1478 MemDepResult DepInfo = Dep.getResult();
1480 if (DeadBlocks.count(DepBB)) {
1488 UnavailableBlocks.push_back(DepBB);
1495 if (
auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1499 ValuesPerBlock.push_back(
1502 UnavailableBlocks.push_back(DepBB);
1506 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1507 "post condition violation");
1529LoadInst *GVNPass::findLoadToHoistIntoPred(BasicBlock *Pred, BasicBlock *LoadBB,
1533 if (
Term->getNumSuccessors() != 2 ||
Term->isSpecialTerminator())
1535 auto *SuccBB =
Term->getSuccessor(0);
1536 if (SuccBB == LoadBB)
1537 SuccBB =
Term->getSuccessor(1);
1538 if (!SuccBB->getSinglePredecessor())
1542 for (Instruction &Inst : *SuccBB) {
1543 if (Inst.isDebugOrPseudoInst())
1545 if (--NumInsts == 0)
1548 if (!Inst.isIdenticalTo(Load))
1551 MemDepResult Dep = MD->getDependency(&Inst);
1556 if (Dep.
isNonLocal() && !ICF->isDominatedByICFIFromSameBlock(&Inst))
1567void GVNPass::eliminatePartiallyRedundantLoad(
1568 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1569 MapVector<BasicBlock *, Value *> &AvailableLoads,
1570 MapVector<BasicBlock *, LoadInst *> *CriticalEdgePredAndLoad) {
1571 for (
const auto &AvailableLoad : AvailableLoads) {
1572 BasicBlock *UnavailableBlock = AvailableLoad.first;
1573 Value *LoadPtr = AvailableLoad.second;
1575 auto *NewLoad =
new LoadInst(
1576 Load->getType(), LoadPtr,
Load->getName() +
".pre",
Load->isVolatile(),
1577 Load->getAlign(),
Load->getOrdering(),
Load->getSyncScopeID(),
1579 NewLoad->setDebugLoc(
Load->getDebugLoc());
1581 auto *NewAccess = MSSAU->createMemoryAccessInBB(
1584 MSSAU->insertDef(NewDef,
true);
1590 AAMDNodes Tags =
Load->getAAMetadata();
1592 NewLoad->setAAMetadata(Tags);
1594 if (
auto *MD =
Load->getMetadata(LLVMContext::MD_invariant_load))
1595 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1596 if (
auto *InvGroupMD =
Load->getMetadata(LLVMContext::MD_invariant_group))
1597 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1598 if (
auto *RangeMD =
Load->getMetadata(LLVMContext::MD_range))
1599 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1600 if (
auto *AccessMD =
Load->getMetadata(LLVMContext::MD_access_group))
1601 if (LI->getLoopFor(
Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1602 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1611 ValuesPerBlock.push_back(
1613 MD->invalidateCachedPointerInfo(LoadPtr);
1618 if (CriticalEdgePredAndLoad) {
1619 auto It = CriticalEdgePredAndLoad->
find(UnavailableBlock);
1620 if (It != CriticalEdgePredAndLoad->
end()) {
1621 ++NumPRELoadMoved2CEPred;
1622 ICF->insertInstructionTo(NewLoad, UnavailableBlock);
1623 LoadInst *OldLoad = It->second;
1627 if (uint32_t ValNo = VN.lookup(OldLoad,
false))
1628 LeaderTable.erase(ValNo, OldLoad, OldLoad->
getParent());
1629 removeInstruction(OldLoad);
1637 ICF->removeUsersOf(Load);
1638 Load->replaceAllUsesWith(V);
1642 I->setDebugLoc(
Load->getDebugLoc());
1643 if (
V->getType()->isPtrOrPtrVectorTy())
1644 MD->invalidateCachedPointerInfo(V);
1646 return OptimizationRemark(
DEBUG_TYPE,
"LoadPRE", Load)
1647 <<
"load eliminated by PRE";
1652bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1653 UnavailBlkVect &UnavailableBlocks) {
1662 SmallPtrSet<BasicBlock *, 4> Blockers(
llvm::from_range, UnavailableBlocks);
1684 bool MustEnsureSafetyOfSpeculativeExecution =
1685 ICF->isDominatedByICFIFromSameBlock(Load);
1689 if (TmpBB == LoadBB)
1691 if (Blockers.count(TmpBB))
1703 MustEnsureSafetyOfSpeculativeExecution =
1704 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1712 MapVector<BasicBlock *, Value *> PredLoads;
1713 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1714 for (
const AvailableValueInBlock &AV : ValuesPerBlock)
1716 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1724 MapVector<BasicBlock *, LoadInst *> CriticalEdgePredAndLoad;
1730 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1731 << Pred->
getName() <<
"': " << *Load <<
'\n');
1742 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1743 << Pred->
getName() <<
"': " << *Load <<
'\n');
1749 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1750 << Pred->
getName() <<
"': " << *Load <<
'\n');
1756 if (DT->dominates(LoadBB, Pred)) {
1759 <<
"COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1760 << Pred->
getName() <<
"': " << *Load <<
'\n');
1764 if (LoadInst *LI = findLoadToHoistIntoPred(Pred, LoadBB, Load))
1765 CriticalEdgePredAndLoad[Pred] = LI;
1770 PredLoads[Pred] =
nullptr;
1775 unsigned NumInsertPreds = PredLoads.
size() + CriticalEdgePredSplit.
size();
1776 unsigned NumUnavailablePreds = NumInsertPreds +
1777 CriticalEdgePredAndLoad.
size();
1778 assert(NumUnavailablePreds != 0 &&
1779 "Fully available value should already be eliminated!");
1780 (void)NumUnavailablePreds;
1786 if (NumInsertPreds > 1)
1791 if (MustEnsureSafetyOfSpeculativeExecution) {
1792 if (CriticalEdgePredSplit.
size())
1796 for (
auto &PL : PredLoads)
1800 for (
auto &CEP : CriticalEdgePredAndLoad)
1807 for (BasicBlock *OrigPred : CriticalEdgePredSplit) {
1808 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1809 assert(!PredLoads.count(OrigPred) &&
"Split edges shouldn't be in map!");
1810 PredLoads[NewPred] =
nullptr;
1811 LLVM_DEBUG(
dbgs() <<
"Split critical edge " << OrigPred->getName() <<
"->"
1812 << LoadBB->
getName() <<
'\n');
1815 for (
auto &CEP : CriticalEdgePredAndLoad)
1816 PredLoads[CEP.first] =
nullptr;
1819 bool CanDoPRE =
true;
1820 const DataLayout &
DL =
Load->getDataLayout();
1821 SmallVector<Instruction*, 8> NewInsts;
1822 for (
auto &PredLoad : PredLoads) {
1823 BasicBlock *UnavailablePred = PredLoad.first;
1833 Value *LoadPtr =
Load->getPointerOperand();
1835 while (Cur != LoadBB) {
1848 LoadPtr =
Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1855 << *
Load->getPointerOperand() <<
"\n");
1860 PredLoad.second = LoadPtr;
1864 while (!NewInsts.
empty()) {
1874 return !CriticalEdgePredSplit.empty();
1880 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOAD: " << *Load <<
'\n');
1882 <<
" INSTS: " << *NewInsts.
back()
1886 for (Instruction *
I : NewInsts) {
1890 I->updateLocationAfterHoist();
1899 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads,
1900 &CriticalEdgePredAndLoad);
1905bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1906 AvailValInBlkVect &ValuesPerBlock,
1907 UnavailBlkVect &UnavailableBlocks) {
1908 const Loop *
L = LI->getLoopFor(
Load->getParent());
1910 if (!L ||
L->getHeader() !=
Load->getParent())
1915 if (!Preheader || !Latch)
1918 Value *LoadPtr =
Load->getPointerOperand();
1920 if (!
L->isLoopInvariant(LoadPtr))
1926 if (ICF->isDominatedByICFIFromSameBlock(Load))
1930 for (
auto *Blocker : UnavailableBlocks) {
1932 if (!
L->contains(Blocker))
1944 if (L != LI->getLoopFor(Blocker))
1952 if (DT->dominates(Blocker, Latch))
1956 if (Blocker->getTerminator()->mayWriteToMemory())
1959 LoopBlock = Blocker;
1971 MapVector<BasicBlock *, Value *> AvailableLoads;
1972 AvailableLoads[LoopBlock] = LoadPtr;
1973 AvailableLoads[Preheader] = LoadPtr;
1975 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOOP LOAD: " << *Load <<
'\n');
1976 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads,
1984 using namespace ore;
1988 <<
"load of type " << NV(
"Type", Load->getType()) <<
" eliminated"
1989 << setExtraArgs() <<
" in favor of "
1996bool GVNPass::processNonLocalLoad(LoadInst *Load) {
1998 if (
Load->getParent()->getParent()->hasFnAttribute(
1999 Attribute::SanitizeAddress) ||
2000 Load->getParent()->getParent()->hasFnAttribute(
2001 Attribute::SanitizeHWAddress))
2006 MD->getNonLocalPointerDependency(Load, Deps);
2011 unsigned NumDeps = Deps.size();
2018 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
2020 dbgs() <<
" has unknown dependencies\n";);
2026 if (GetElementPtrInst *
GEP =
2028 for (Use &U :
GEP->indices())
2034 AvailValInBlkVect ValuesPerBlock;
2035 UnavailBlkVect UnavailableBlocks;
2036 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
2040 if (ValuesPerBlock.empty())
2048 if (UnavailableBlocks.empty()) {
2049 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING NONLOCAL LOAD: " << *Load <<
'\n');
2054 ICF->removeUsersOf(Load);
2055 Load->replaceAllUsesWith(V);
2063 if (
Load->getDebugLoc() &&
Load->getParent() ==
I->getParent())
2064 I->setDebugLoc(
Load->getDebugLoc());
2065 if (
V->getType()->isPtrOrPtrVectorTy())
2066 MD->invalidateCachedPointerInfo(V);
2079 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
2080 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
2088 auto *I = dyn_cast<Instruction>(U);
2089 return I && I->getParent() == BB;
2093bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
2097 if (
Cond->isZero()) {
2107 const MemoryUseOrDef *FirstNonDom =
nullptr;
2109 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->
getParent());
2116 for (
const auto &Acc : *AL) {
2118 if (!Current->getMemoryInst()->comesBefore(NewS)) {
2119 FirstNonDom = Current;
2126 FirstNonDom ? MSSAU->createMemoryAccessBefore(
2128 const_cast<MemoryUseOrDef *
>(FirstNonDom))
2129 : MSSAU->createMemoryAccessInBB(
2158 Changed |= propagateEquality(V, True,
Edge,
false);
2164 ReplaceOperandsWithMap[
V] = True;
2187 if (CmpI->isEquivalence()) {
2188 Value *CmpLHS = CmpI->getOperand(0);
2189 Value *CmpRHS = CmpI->getOperand(1);
2203 uint32_t LVN = VN.lookupOrAdd(CmpLHS);
2204 uint32_t RVN = VN.lookupOrAdd(CmpRHS);
2215 << *CmpLHS <<
" with "
2216 << *CmpRHS <<
" in block "
2217 << IntrinsicI->
getParent()->getName() <<
"\n");
2221 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
2234 I->replaceAllUsesWith(Repl);
2239bool GVNPass::processLoad(LoadInst *L) {
2244 if (!
L->isUnordered())
2247 if (
L->use_empty()) {
2253 MemDepResult Dep = MD->getDependency(L);
2257 return processNonLocalLoad(L);
2264 dbgs() <<
"GVN: load ";
L->printAsOperand(
dbgs());
2265 dbgs() <<
" has unknown dependence\n";);
2269 auto AV = AnalyzeLoadAvailability(L, Dep,
L->getPointerOperand());
2273 Value *AvailableValue = AV->MaterializeAdjustedValue(L, L);
2276 ICF->removeUsersOf(L);
2277 L->replaceAllUsesWith(AvailableValue);
2279 MSSAU->removeMemoryAccess(L);
2286 MD->invalidateCachedPointerInfo(AvailableValue);
2292std::pair<uint32_t, bool>
2293GVNPass::ValueTable::assignExpNewValueNum(
Expression &Exp) {
2294 uint32_t &
E = ExpressionNumbering[
Exp];
2295 bool CreateNewValNum = !
E;
2296 if (CreateNewValNum) {
2297 Expressions.push_back(Exp);
2298 if (ExprIdx.size() < NextValueNumber + 1)
2299 ExprIdx.resize(NextValueNumber * 2);
2300 E = NextValueNumber;
2301 ExprIdx[NextValueNumber++] = NextExprNumber++;
2303 return {
E, CreateNewValNum};
2308bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num,
const BasicBlock *BB,
2311 GVN.LeaderTable.getLeaders(Num),
2319 auto FindRes = PhiTranslateTable.find({Num, Pred});
2320 if (FindRes != PhiTranslateTable.end())
2321 return FindRes->second;
2322 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, GVN);
2323 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2334 auto Leaders = GVN.LeaderTable.getLeaders(Num);
2335 for (
const auto &Entry : Leaders) {
2337 if (
Call &&
Call->getParent() == PhiBlock)
2341 if (
AA->doesNotAccessMemory(
Call))
2344 if (!MD || !
AA->onlyReadsMemory(
Call))
2356 if (
D.getResult().isNonFuncLocal())
2364uint32_t GVNPass::ValueTable::phiTranslateImpl(
const BasicBlock *Pred,
2365 const BasicBlock *PhiBlock,
2369 if (PHINode *PN = NumberingPhi[Num]) {
2370 if (PN->getParent() != PhiBlock)
2372 for (
unsigned I = 0;
I != PN->getNumIncomingValues(); ++
I) {
2373 if (PN->getIncomingBlock(
I) != Pred)
2375 if (uint32_t TransVal =
lookup(PN->getIncomingValue(
I),
false))
2381 if (BasicBlock *BB = NumberingBB[Num]) {
2382 assert(MSSA &&
"NumberingBB is non-empty only when using MemorySSA");
2388 MemoryPhi *MPhi = MSSA->getMemoryAccess(BB);
2394 return lookupOrAdd(PredPhi->getBlock());
2395 if (MSSA->isLiveOnEntryDef(MA))
2400 "CFG/MemorySSA mismatch: predecessor not found among incoming blocks");
2406 if (!areAllValsInBB(Num, PhiBlock, GVN))
2409 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2413 for (
unsigned I = 0;
I <
Exp.VarArgs.size();
I++) {
2417 if ((
I > 1 &&
Exp.Opcode == Instruction::InsertValue) ||
2418 (
I > 0 &&
Exp.Opcode == Instruction::ExtractValue) ||
2419 (
I > 1 &&
Exp.Opcode == Instruction::ShuffleVector))
2421 Exp.VarArgs[
I] = phiTranslate(Pred, PhiBlock,
Exp.VarArgs[
I], GVN);
2424 if (
Exp.Commutative) {
2425 assert(
Exp.VarArgs.size() >= 2 &&
"Unsupported commutative instruction!");
2426 if (
Exp.VarArgs[0] >
Exp.VarArgs[1]) {
2428 uint32_t Opcode =
Exp.Opcode >> 8;
2429 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2430 Exp.Opcode = (Opcode << 8) |
2436 if (uint32_t NewNum = ExpressionNumbering[Exp]) {
2437 if (
Exp.Opcode == Instruction::Call && NewNum != Num)
2438 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, GVN) ? NewNum : Num;
2446void GVNPass::ValueTable::eraseTranslateCacheEntry(
2449 PhiTranslateTable.erase({Num, Pred});
2458 auto Leaders = LeaderTable.getLeaders(Num);
2459 if (Leaders.empty())
2462 Value *Val =
nullptr;
2463 for (
const auto &Entry : Leaders) {
2464 if (DT->dominates(Entry.BB, BB)) {
2484 const BasicBlock *Pred =
E.getEnd()->getSinglePredecessor();
2485 assert((!Pred || Pred ==
E.getStart()) &&
2486 "No edge between these basic blocks!");
2487 return Pred !=
nullptr;
2490void GVNPass::assignBlockRPONumber(Function &
F) {
2491 BlockRPONumber.clear();
2492 uint32_t NextBlockNumber = 1;
2493 ReversePostOrderTraversal<Function *> RPOT(&
F);
2494 for (BasicBlock *BB : RPOT)
2495 BlockRPONumber[BB] = NextBlockNumber++;
2496 InvalidBlockRPONumbers =
false;
2499bool GVNPass::replaceOperandsForInBlockEquality(Instruction *Instr)
const {
2501 for (
unsigned OpNum = 0; OpNum <
Instr->getNumOperands(); ++OpNum) {
2502 Use &Operand =
Instr->getOperandUse(OpNum);
2503 auto It = ReplaceOperandsWithMap.find(Operand.
get());
2504 if (It != ReplaceOperandsWithMap.end()) {
2505 const DataLayout &
DL =
Instr->getDataLayout();
2510 << *It->second <<
" in instruction " << *Instr <<
'\n');
2511 Instr->setOperand(OpNum, It->second);
2524 const BasicBlockEdge &Root,
2525 bool DominatesByEdge) {
2533 while (!Worklist.
empty()) {
2534 std::pair<Value*, Value*> Item = Worklist.
pop_back_val();
2535 LHS = Item.first;
RHS = Item.second;
2549 const DataLayout &
DL =
2558 uint32_t LVN = VN.lookupOrAdd(
LHS);
2563 uint32_t RVN = VN.lookupOrAdd(
RHS);
2581 LeaderTable.insert(LVN,
RHS, Root.
getEnd());
2588 auto CanReplacePointersCallBack = [&
DL](
const Use &
U,
const Value *To) {
2591 unsigned NumReplacements =
2594 CanReplacePointersCallBack)
2596 CanReplacePointersCallBack);
2598 if (NumReplacements > 0) {
2600 NumGVNEqProp += NumReplacements;
2603 MD->invalidateCachedPointerInfo(
LHS);
2620 bool IsKnownFalse = !IsKnownTrue;
2636 Value *Op0 =
Cmp->getOperand(0), *Op1 =
Cmp->getOperand(1);
2641 if (
Cmp->isEquivalence(IsKnownFalse))
2642 Worklist.
push_back(std::make_pair(Op0, Op1));
2646 Constant *NotVal = ConstantInt::get(
Cmp->getType(), IsKnownFalse);
2650 uint32_t NextNum = VN.getNextUnusedValueNumber();
2651 uint32_t Num = VN.lookupOrAddCmp(
Cmp->getOpcode(), NotPred, Op0, Op1);
2654 if (Num < NextNum) {
2657 unsigned NumReplacements =
2662 Changed |= NumReplacements > 0;
2663 NumGVNEqProp += NumReplacements;
2666 MD->invalidateCachedPointerInfo(NotCmp);
2673 if (RootDominatesEnd)
2674 LeaderTable.insert(Num, NotVal, Root.
getEnd());
2683 Worklist.
emplace_back(
A, ConstantInt::get(
A->getType(), IsKnownTrue));
2688 Worklist.
emplace_back(
A, ConstantInt::get(
A->getType(), !IsKnownTrue));
2698bool GVNPass::processInstruction(Instruction *
I) {
2703 const DataLayout &
DL =
I->getDataLayout();
2706 if (!
I->use_empty()) {
2709 ICF->removeUsersOf(
I);
2710 I->replaceAllUsesWith(V);
2718 if (MD &&
V->getType()->isPtrOrPtrVectorTy())
2719 MD->invalidateCachedPointerInfo(V);
2726 return processAssumeIntrinsic(Assume);
2729 if (processLoad(Load))
2732 unsigned Num = VN.lookupOrAdd(Load);
2733 LeaderTable.insert(Num, Load,
Load->getParent());
2740 if (!BI->isConditional())
2744 return processFoldableCondBr(BI);
2746 Value *BranchCond = BI->getCondition();
2750 if (TrueSucc == FalseSucc)
2757 BasicBlockEdge TrueE(Parent, TrueSucc);
2758 Changed |= propagateEquality(BranchCond, TrueVal, TrueE,
true);
2761 BasicBlockEdge FalseE(Parent, FalseSucc);
2762 Changed |= propagateEquality(BranchCond, FalseVal, FalseE,
true);
2769 Value *SwitchCond =
SI->getCondition();
2774 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2776 ++SwitchEdges[Succ];
2778 for (
const auto &Case :
SI->cases()) {
2781 if (SwitchEdges.
lookup(Dst) == 1) {
2782 BasicBlockEdge
E(Parent, Dst);
2783 Changed |= propagateEquality(SwitchCond, Case.getCaseValue(),
E,
true);
2791 if (
I->getType()->isVoidTy())
2794 uint32_t NextNum = VN.getNextUnusedValueNumber();
2795 unsigned Num = VN.lookupOrAdd(
I);
2800 LeaderTable.insert(Num,
I,
I->getParent());
2807 if (Num >= NextNum) {
2808 LeaderTable.insert(Num,
I,
I->getParent());
2814 Value *Repl = findLeader(
I->getParent(), Num);
2817 LeaderTable.insert(Num,
I,
I->getParent());
2830 MD->invalidateCachedPointerInfo(Repl);
2836bool GVNPass::runImpl(Function &
F, AssumptionCache &RunAC, DominatorTree &RunDT,
2837 const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2838 MemoryDependenceResults *RunMD, LoopInfo &LI,
2839 OptimizationRemarkEmitter *RunORE,
MemorySSA *MSSA) {
2844 VN.setAliasAnalysis(&RunAA);
2846 ImplicitControlFlowTracking ImplicitCFT;
2850 VN.setMemorySSA(MSSA);
2852 InvalidBlockRPONumbers =
true;
2853 MemorySSAUpdater Updater(MSSA);
2854 MSSAU = MSSA ? &Updater :
nullptr;
2857 bool ShouldContinue =
true;
2859 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2871 unsigned Iteration = 0;
2872 while (ShouldContinue) {
2875 ShouldContinue = iterateOnFunction(
F);
2883 assignValNumForDeadCode();
2884 bool PREChanged =
true;
2885 while (PREChanged) {
2886 PREChanged = performPRE(
F);
2896 cleanupGlobalSets();
2907bool GVNPass::processBlock(BasicBlock *BB) {
2908 if (DeadBlocks.count(BB))
2912 ReplaceOperandsWithMap.clear();
2913 bool ChangedFunction =
false;
2919 SmallPtrSet<PHINode *, 8> PHINodesToRemove;
2921 for (PHINode *PN : PHINodesToRemove) {
2922 removeInstruction(PN);
2925 if (!ReplaceOperandsWithMap.empty())
2926 ChangedFunction |= replaceOperandsForInBlockEquality(&Inst);
2927 ChangedFunction |= processInstruction(&Inst);
2929 return ChangedFunction;
2933bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2934 BasicBlock *Curr,
unsigned int ValNo) {
2940 for (
unsigned I = 0,
E =
Instr->getNumOperands();
I !=
E; ++
I) {
2948 if (!VN.exists(
Op)) {
2953 VN.phiTranslate(Pred, Curr, VN.lookup(
Op), *
this);
2954 if (
Value *V = findLeader(Pred, TValNo)) {
2972 ICF->insertInstructionTo(Instr, Pred);
2974 unsigned Num = VN.lookupOrAdd(Instr);
2978 LeaderTable.insert(Num, Instr, Pred);
2982bool GVNPass::performScalarPRE(Instruction *CurInst) {
3008 if (CallB->isInlineAsm())
3012 uint32_t ValNo = VN.lookup(CurInst);
3020 unsigned NumWith = 0;
3021 unsigned NumWithout = 0;
3026 if (InvalidBlockRPONumbers)
3027 assignBlockRPONumber(*CurrentBlock->
getParent());
3033 if (!DT->isReachableFromEntry(
P)) {
3038 assert(BlockRPONumber.count(
P) && BlockRPONumber.count(CurrentBlock) &&
3039 "Invalid BlockRPONumber map.");
3040 if (BlockRPONumber[
P] >= BlockRPONumber[CurrentBlock]) {
3045 uint32_t TValNo = VN.phiTranslate(
P, CurrentBlock, ValNo, *
this);
3046 Value *PredV = findLeader(
P, TValNo);
3051 }
else if (PredV == CurInst) {
3063 if (NumWithout > 1 || NumWith == 0)
3071 if (NumWithout != 0) {
3077 if (ICF->isDominatedByICFIFromSameBlock(CurInst))
3090 ToSplit.push_back(std::make_pair(PREPred->
getTerminator(), SuccNum));
3094 PREInstr = CurInst->
clone();
3095 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
3098 verifyRemoved(PREInstr);
3107 assert(PREInstr !=
nullptr || NumWithout == 0);
3113 CurInst->
getName() +
".pre-phi");
3114 Phi->insertBefore(CurrentBlock->begin());
3115 for (
auto &[V, BB] : PredMap) {
3120 Phi->addIncoming(V, BB);
3122 Phi->addIncoming(PREInstr, PREPred);
3128 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
3129 LeaderTable.insert(ValNo, Phi, CurrentBlock);
3132 if (MD &&
Phi->getType()->isPtrOrPtrVectorTy())
3133 MD->invalidateCachedPointerInfo(Phi);
3134 LeaderTable.erase(ValNo, CurInst, CurrentBlock);
3137 removeInstruction(CurInst);
3145bool GVNPass::performPRE(Function &
F) {
3147 for (BasicBlock *CurrentBlock :
depth_first(&
F.getEntryBlock())) {
3149 if (CurrentBlock == &
F.getEntryBlock())
3153 if (CurrentBlock->isEHPad())
3157 BE = CurrentBlock->end();
3160 Changed |= performScalarPRE(CurInst);
3164 if (splitCriticalEdges())
3172BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
3177 CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
3180 MD->invalidateCachedPredecessors();
3181 InvalidBlockRPONumbers =
true;
3188bool GVNPass::splitCriticalEdges() {
3189 if (ToSplit.empty())
3194 std::pair<Instruction *, unsigned>
Edge = ToSplit.pop_back_val();
3196 CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
3198 }
while (!ToSplit.empty());
3201 MD->invalidateCachedPredecessors();
3202 InvalidBlockRPONumbers =
true;
3208bool GVNPass::iterateOnFunction(Function &
F) {
3209 cleanupGlobalSets();
3216 ReversePostOrderTraversal<Function *> RPOT(&
F);
3218 for (BasicBlock *BB : RPOT)
3224void GVNPass::cleanupGlobalSets() {
3226 LeaderTable.clear();
3227 BlockRPONumber.clear();
3229 InvalidBlockRPONumbers =
true;
3232void GVNPass::removeInstruction(Instruction *
I) {
3234 if (MD) MD->removeInstruction(
I);
3236 MSSAU->removeMemoryAccess(
I);
3240 ICF->removeInstruction(
I);
3241 I->eraseFromParent();
3246void GVNPass::verifyRemoved(
const Instruction *Inst)
const {
3247 VN.verifyRemoved(Inst);
3248 LeaderTable.verifyRemoved(Inst);
3255void GVNPass::addDeadBlock(BasicBlock *BB) {
3257 SmallSetVector<BasicBlock *, 4>
DF;
3260 while (!NewDead.
empty()) {
3262 if (DeadBlocks.count(
D))
3266 SmallVector<BasicBlock *, 8> Dom;
3267 DT->getDescendants(
D, Dom);
3268 DeadBlocks.insert_range(Dom);
3271 for (BasicBlock *
B : Dom) {
3273 if (DeadBlocks.count(S))
3276 bool AllPredDead =
true;
3278 if (!DeadBlocks.count(
P)) {
3279 AllPredDead =
false;
3299 for (BasicBlock *
B :
DF) {
3300 if (DeadBlocks.count(
B))
3306 for (BasicBlock *
P : Preds) {
3307 if (!DeadBlocks.count(
P))
3312 if (BasicBlock *S = splitCriticalEdges(
P,
B))
3313 DeadBlocks.insert(
P = S);
3319 if (!DeadBlocks.count(
P))
3321 for (PHINode &Phi :
B->phis()) {
3324 MD->invalidateCachedPointerInfo(&Phi);
3343bool GVNPass::processFoldableCondBr(BranchInst *BI) {
3357 if (DeadBlocks.count(DeadRoot))
3361 DeadRoot = splitCriticalEdges(BI->
getParent(), DeadRoot);
3363 addDeadBlock(DeadRoot);
3371void GVNPass::assignValNumForDeadCode() {
3372 for (BasicBlock *BB : DeadBlocks) {
3373 for (Instruction &Inst : *BB) {
3374 unsigned ValNum = VN.lookupOrAdd(&Inst);
3375 LeaderTable.insert(ValNum, &Inst, BB);
3387 .setMemDep(MemDepAnalysis)
3388 .setMemorySSA(MemSSAAnalysis)) {
3397 if (Impl.isMemorySSAEnabled() && !MSSAWP)
3400 return Impl.runImpl(
3405 Impl.isMemDepEnabled()
3410 MSSAWP ? &MSSAWP->getMSSA() :
nullptr);
3418 if (Impl.isMemDepEnabled())
3427 if (Impl.isMemorySSAEnabled())
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
early cse Early CSE w MemorySSA
static bool hasUsersIn(Value *V, BasicBlock *BB)
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, const DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
static cl::opt< uint32_t > MaxNumInsnsPerBlock("gvn-max-num-insns", cl::Hidden, cl::init(100), cl::desc("Max number of instructions to scan in each basic block in GVN " "(default = 100)"))
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static const Instruction * findMayClobberedPtrAccess(LoadInst *Load, const DominatorTree *DT)
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static cl::opt< bool > GVNEnableMemorySSA("enable-gvn-memoryssa", cl::init(false))
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, const DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
static bool isLifetimeStart(const Instruction *Inst)
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
static void replaceValuesPerBlockEntry(SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, Value *OldValue, Value *NewValue)
If the specified OldValue exists in ValuesPerBlock, replace its value with NewValue.
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &GVN)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
@ Unavailable
We know the block is not fully available. This is a fixpoint.
@ Available
We know the block is fully available. This is a fixpoint.
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
uint64_t IntrinsicInst * II
ppc ctr loops PowerPC CTR Loops Verify
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
const BasicBlock * getEnd() const
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
bool isEHPad() const
Return true if this basic block is an exception handling block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Analysis pass which computes a DominatorTree.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Class representing an expression and its matching format.
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
LLVM_ABI uint32_t lookupOrAdd(MemoryAccess *MA)
The core GVN pass object.
friend class gvn::GVNLegacyPass
LLVM_ABI bool isPREEnabled() const
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
LLVM_ABI void salvageAndRemoveInstruction(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
AAResults * getAliasAnalysis() const
LLVM_ABI bool isLoadPREEnabled() const
GVNPass(GVNOptions Options={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI bool isMemorySSAEnabled() const
DominatorTree & getDominatorTree() const
LLVM_ABI bool isLoadInLoopPREEnabled() const
LLVM_ABI bool isLoadPRESplitBackedgeEnabled() const
LLVM_ABI bool isMemDepEnabled() const
Legacy wrapper pass to provide the GlobalsAAResult object.
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
LLVM_ABI bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
LLVM_ABI void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs={})
Drop all unknown metadata except for debug locations.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
The legacy pass manager's analysis pass to compute loop information.
iterator find(const KeyT &Key)
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
BasicBlock * getBlock() const
An analysis that produces MemoryDependenceResults for a function.
std::vector< NonLocalDepEntry > NonLocalDepInfo
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
An analysis that produces MemorySSA for a function.
Legacy analysis pass which computes MemorySSA.
LLVM_ABI void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
This is an entry in the NonLocalDepInfo cache.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Helper class for SSA formation on a set of values defined in multiple blocks.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getCondition() const
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
SmallVector & operator=(const SmallVector &RHS)
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
bool hasUseList() const
Check if this Value has a use-list.
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
LLVM_ABI void deleteValue()
Delete a pointer to a generic Value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
An efficient, type-erasing, non-owning reference to a callable.
GVNLegacyPass(bool MemDepAnalysis=GVNEnableMemDep, bool MemSSAAnalysis=GVNEnableMemorySSA)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
An opaque object representing a hash code.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
bool match(Val *V, const Pattern &P)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
Value * getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, Function *F)
If analyzeLoadFromClobberingStore/Load returned an offset, this function can be used to actually perf...
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, Function *F)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
initializer< Ty > init(const Ty &Val)
A private "module" namespace for types and utilities used by GVN.
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
NodeAddr< UseNode * > Use
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
hash_code hash_value(const FixedPointSemantics &Val)
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
LLVM_ABI unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
LLVM_ABI unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
constexpr from_range_t from_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
bool isModSet(const ModRefInfo MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
LLVM_ABI void initializeGVNLegacyPassPass(PassRegistry &)
LLVM_ABI unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
LLVM_ABI BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
LLVM_ABI bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
constexpr unsigned BitWidth
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool pred_empty(const BasicBlock *BB)
iterator_range< df_iterator< T > > depth_first(const T &G)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
LLVM_ABI bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
LLVM_ABI bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static GVNPass::Expression getTombstoneKey()
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
static GVNPass::Expression getEmptyKey()
static unsigned getHashValue(const GVNPass::Expression &E)
An information struct used to provide DenseMap with the various necessary components for a given valu...
A set of parameters to control various transforms performed by GVN pass.
bool operator==(const Expression &Other) const
friend hash_code hash_value(const Expression &Value)
SmallVector< uint32_t, 4 > VarArgs
Expression(uint32_t Op=~2U)
A CRTP mix-in to automatically provide informational APIs needed for passes.
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
static AvailableValueInBlock getUndef(BasicBlock *BB)
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
AvailableValue AV
AV - The actual available value.
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
BasicBlock * BB
BB - The basic block in question.
Value * MaterializeAdjustedValue(LoadInst *Load) const
Emit code at the end of this block to adjust the value defined here to the specified type.
Represents a particular available value that we know how to materialize.
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
bool isSimpleValue() const
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
bool isCoercedLoadValue() const
static AvailableValue get(Value *V, unsigned Offset=0)
ValType Kind
Kind of the live-out value.
LoadInst * getCoercedLoadValue() const
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
bool isUndefValue() const
bool isSelectValue() const
Value * Val
Val - The value that is live out of the block.
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
static AvailableValue getUndef()
SelectInst * getSelectValue() const
Value * getSimpleValue() const
bool isMemIntrinValue() const
MemIntrinsic * getMemIntrinValue() const
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.