86using namespace PatternMatch;
88#define DEBUG_TYPE "gvn"
90STATISTIC(NumGVNInstr,
"Number of instructions deleted");
92STATISTIC(NumGVNPRE,
"Number of instructions PRE'd");
94STATISTIC(NumGVNSimpl,
"Number of instructions simplified");
95STATISTIC(NumGVNEqProp,
"Number of equalities propagated");
97STATISTIC(NumPRELoopLoad,
"Number of loop loads PRE'd");
99 "Number of loads moved to predecessor of a critical edge in PRE");
101STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
102 "Number of blocks speculated as available in "
103 "IsValueFullyAvailableInBlock(), max");
105 "Number of times we we reached gvn-max-block-speculations cut-off "
106 "preventing further exploration");
121 cl::desc(
"Max number of dependences to attempt Load PRE (default = 100)"));
126 cl::desc(
"Max number of blocks we're willing to speculate on (and recurse "
127 "into) when deducing if a value is fully available or not in GVN "
132 cl::desc(
"Max number of visited instructions when trying to find "
133 "dominating value of select dependency (default = 100)"));
137 cl::desc(
"Max number of instructions to scan in each basic block in GVN "
274 return cast<LoadInst>(
Val);
279 return cast<MemIntrinsic>(
Val);
284 return cast<SelectInst>(
Val);
305 Res.
AV = std::move(
AV);
336 e.type =
I->getType();
337 e.opcode =
I->getOpcode();
342 e.varargs.push_back(
lookupOrAdd(GCR->getOperand(0)));
343 e.varargs.push_back(
lookupOrAdd(GCR->getBasePtr()));
344 e.varargs.push_back(
lookupOrAdd(GCR->getDerivedPtr()));
346 for (
Use &
Op :
I->operands())
349 if (
I->isCommutative()) {
354 assert(
I->getNumOperands() >= 2 &&
"Unsupported commutative instruction!");
355 if (
e.varargs[0] >
e.varargs[1])
357 e.commutative =
true;
360 if (
auto *
C = dyn_cast<CmpInst>(
I)) {
363 if (
e.varargs[0] >
e.varargs[1]) {
367 e.opcode = (
C->getOpcode() << 8) | Predicate;
368 e.commutative =
true;
369 }
else if (
auto *E = dyn_cast<InsertValueInst>(
I)) {
370 e.varargs.append(E->idx_begin(), E->idx_end());
371 }
else if (
auto *SVI = dyn_cast<ShuffleVectorInst>(
I)) {
373 e.varargs.append(ShuffleMask.
begin(), ShuffleMask.
end());
374 }
else if (
auto *CB = dyn_cast<CallBase>(
I)) {
375 e.attrs = CB->getAttributes();
383 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
384 "Not a comparison!");
387 e.varargs.push_back(lookupOrAdd(LHS));
388 e.varargs.push_back(lookupOrAdd(RHS));
391 if (
e.varargs[0] >
e.varargs[1]) {
395 e.opcode = (Opcode << 8) | Predicate;
396 e.commutative =
true;
402 assert(EI &&
"Not an ExtractValueInst?");
413 e.varargs.push_back(lookupOrAdd(WO->
getLHS()));
414 e.varargs.push_back(lookupOrAdd(WO->
getRHS()));
422 e.varargs.push_back(lookupOrAdd(
Op));
431 Type *PtrTy =
GEP->getType()->getScalarType();
433 unsigned BitWidth =
DL.getIndexTypeSizeInBits(PtrTy);
436 if (
GEP->collectOffset(
DL,
BitWidth, VariableOffsets, ConstantOffset)) {
440 E.opcode =
GEP->getOpcode();
442 E.varargs.push_back(lookupOrAdd(
GEP->getPointerOperand()));
443 for (
const auto &Pair : VariableOffsets) {
444 E.varargs.push_back(lookupOrAdd(Pair.first));
445 E.varargs.push_back(lookupOrAdd(ConstantInt::get(Context, Pair.second)));
447 if (!ConstantOffset.isZero())
449 lookupOrAdd(ConstantInt::get(Context, ConstantOffset)));
453 E.opcode =
GEP->getOpcode();
454 E.type =
GEP->getSourceElementType();
456 E.varargs.push_back(lookupOrAdd(
Op));
465GVNPass::ValueTable::ValueTable() =
default;
466GVNPass::ValueTable::ValueTable(
const ValueTable &) =
default;
467GVNPass::ValueTable::ValueTable(
ValueTable &&) =
default;
468GVNPass::ValueTable::~ValueTable() =
default;
474 valueNumbering.
insert(std::make_pair(V, num));
475 if (
PHINode *PN = dyn_cast<PHINode>(V))
476 NumberingPhi[num] = PN;
487 if (
C->getFunction()->isPresplitCoroutine()) {
488 valueNumbering[
C] = nextValueNumber;
489 return nextValueNumber++;
495 if (
C->isConvergent()) {
496 valueNumbering[
C] = nextValueNumber;
497 return nextValueNumber++;
500 if (AA->doesNotAccessMemory(
C)) {
502 uint32_t e = assignExpNewValueNum(exp).first;
503 valueNumbering[
C] = e;
507 if (MD && AA->onlyReadsMemory(
C)) {
509 auto ValNum = assignExpNewValueNum(exp);
511 valueNumbering[
C] = ValNum.first;
518 valueNumbering[
C] = nextValueNumber;
519 return nextValueNumber++;
522 if (local_dep.
isDef()) {
527 if (!local_cdep || local_cdep->
arg_size() !=
C->arg_size()) {
528 valueNumbering[
C] = nextValueNumber;
529 return nextValueNumber++;
532 for (
unsigned i = 0, e =
C->arg_size(); i < e; ++i) {
533 uint32_t c_vn = lookupOrAdd(
C->getArgOperand(i));
536 valueNumbering[
C] = nextValueNumber;
537 return nextValueNumber++;
542 valueNumbering[
C] =
v;
555 if (
I.getResult().isNonLocal())
560 if (!
I.getResult().isDef() || cdep !=
nullptr) {
565 CallInst *NonLocalDepCall = dyn_cast<CallInst>(
I.getResult().getInst());
568 cdep = NonLocalDepCall;
577 valueNumbering[
C] = nextValueNumber;
578 return nextValueNumber++;
582 valueNumbering[
C] = nextValueNumber;
583 return nextValueNumber++;
585 for (
unsigned i = 0, e =
C->arg_size(); i < e; ++i) {
586 uint32_t c_vn = lookupOrAdd(
C->getArgOperand(i));
589 valueNumbering[
C] = nextValueNumber;
590 return nextValueNumber++;
595 valueNumbering[
C] =
v;
599 valueNumbering[
C] = nextValueNumber;
600 return nextValueNumber++;
604bool GVNPass::ValueTable::exists(
Value *V)
const {
605 return valueNumbering.contains(V);
612 if (VI != valueNumbering.end())
615 auto *
I = dyn_cast<Instruction>(V);
617 valueNumbering[V] = nextValueNumber;
618 return nextValueNumber++;
622 switch (
I->getOpcode()) {
623 case Instruction::Call:
624 return lookupOrAddCall(cast<CallInst>(
I));
625 case Instruction::FNeg:
626 case Instruction::Add:
627 case Instruction::FAdd:
628 case Instruction::Sub:
629 case Instruction::FSub:
630 case Instruction::Mul:
631 case Instruction::FMul:
632 case Instruction::UDiv:
633 case Instruction::SDiv:
634 case Instruction::FDiv:
635 case Instruction::URem:
636 case Instruction::SRem:
637 case Instruction::FRem:
638 case Instruction::Shl:
639 case Instruction::LShr:
640 case Instruction::AShr:
641 case Instruction::And:
642 case Instruction::Or:
643 case Instruction::Xor:
644 case Instruction::ICmp:
645 case Instruction::FCmp:
646 case Instruction::Trunc:
647 case Instruction::ZExt:
648 case Instruction::SExt:
649 case Instruction::FPToUI:
650 case Instruction::FPToSI:
651 case Instruction::UIToFP:
652 case Instruction::SIToFP:
653 case Instruction::FPTrunc:
654 case Instruction::FPExt:
655 case Instruction::PtrToInt:
656 case Instruction::IntToPtr:
657 case Instruction::AddrSpaceCast:
658 case Instruction::BitCast:
659 case Instruction::Select:
660 case Instruction::Freeze:
661 case Instruction::ExtractElement:
662 case Instruction::InsertElement:
663 case Instruction::ShuffleVector:
664 case Instruction::InsertValue:
667 case Instruction::GetElementPtr:
668 exp = createGEPExpr(cast<GetElementPtrInst>(
I));
670 case Instruction::ExtractValue:
671 exp = createExtractvalueExpr(cast<ExtractValueInst>(
I));
673 case Instruction::PHI:
674 valueNumbering[V] = nextValueNumber;
675 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
676 return nextValueNumber++;
678 valueNumbering[V] = nextValueNumber;
679 return nextValueNumber++;
682 uint32_t e = assignExpNewValueNum(exp).first;
683 valueNumbering[V] = e;
692 assert(VI != valueNumbering.end() &&
"Value not numbered?");
695 return (VI != valueNumbering.end()) ? VI->second : 0;
702uint32_t GVNPass::ValueTable::lookupOrAddCmp(
unsigned Opcode,
706 return assignExpNewValueNum(exp).first;
711 valueNumbering.clear();
712 expressionNumbering.clear();
713 NumberingPhi.clear();
714 PhiTranslateTable.clear();
723 uint32_t Num = valueNumbering.lookup(V);
724 valueNumbering.erase(V);
727 NumberingPhi.erase(Num);
732void GVNPass::ValueTable::verifyRemoved(
const Value *V)
const {
733 assert(!valueNumbering.contains(V) &&
734 "Inst still occurs in value numbering map!");
743 LeaderListNode &Curr = NumToLeaders[
N];
744 if (!Curr.Entry.Val) {
750 LeaderListNode *Node = TableAllocator.Allocate<LeaderListNode>();
753 Node->Next = Curr.Next;
761 LeaderListNode *Prev =
nullptr;
762 LeaderListNode *Curr = &NumToLeaders[
N];
764 while (Curr && (Curr->Entry.Val !=
I || Curr->Entry.BB != BB)) {
773 Prev->Next = Curr->Next;
776 Curr->Entry.Val =
nullptr;
777 Curr->Entry.BB =
nullptr;
779 LeaderListNode *Next = Curr->Next;
780 Curr->Entry.Val = Next->Entry.Val;
781 Curr->Entry.BB = Next->Entry.BB;
782 Curr->Next = Next->Next;
787void GVNPass::LeaderMap::verifyRemoved(
const Value *V)
const {
790 for (
const auto &
I : NumToLeaders) {
792 assert(
I.second.Entry.Val != V &&
"Inst still in value numbering scope!");
794 std::none_of(leader_iterator(&
I.second), leader_iterator(
nullptr),
795 [=](
const LeaderTableEntry &E) {
return E.Val ==
V; }) &&
796 "Inst still in value numbering scope!");
844 "On-demand computation of MemSSA implies that MemDep is disabled!");
848 bool Changed = runImpl(
F, AC, DT, TLI, AA, MemDep, LI, &ORE,
849 MSSA ? &MSSA->getMSSA() :
nullptr);
864 OS, MapClassName2PassName);
867 if (Options.
AllowPRE != std::nullopt)
868 OS << (*Options.
AllowPRE ?
"" :
"no-") <<
"pre;";
873 <<
"split-backedge-load-pre;";
881#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
885 errs() <<
I.first <<
"\n";
916 std::optional<BasicBlock *> UnavailableBB;
920 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
928 while (!Worklist.
empty()) {
932 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator,
bool>
IV =
934 CurrBB, AvailabilityState::SpeculativelyAvailable);
939 if (State == AvailabilityState::Unavailable) {
940 UnavailableBB = CurrBB;
951 ++NumNewNewSpeculativelyAvailableBBs;
957 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
958 State = AvailabilityState::Unavailable;
959 UnavailableBB = CurrBB;
965 NewSpeculativelyAvailableBBs.
insert(CurrBB);
972 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
973 NumNewNewSpeculativelyAvailableBBs);
978 auto MarkAsFixpointAndEnqueueSuccessors =
980 auto It = FullyAvailableBlocks.
find(BB);
981 if (It == FullyAvailableBlocks.
end())
984 case AvailabilityState::Unavailable:
985 case AvailabilityState::Available:
987 case AvailabilityState::SpeculativelyAvailable:
988 State = FixpointState;
991 "Found a speculatively available successor leftover?");
1006 while (!Worklist.
empty())
1007 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
1008 AvailabilityState::Unavailable);
1015 while (!Worklist.
empty())
1016 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
1017 AvailabilityState::Available);
1020 "Must have fixed all the new speculatively available blocks.");
1023 return !UnavailableBB;
1032 if (V.AV.Val == OldValue)
1033 V.AV.Val = NewValue;
1034 if (V.AV.isSelectValue()) {
1035 if (V.AV.V1 == OldValue)
1037 if (V.AV.V2 == OldValue)
1052 if (ValuesPerBlock.
size() == 1 &&
1054 Load->getParent())) {
1055 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
1056 "Dead BB dominate this block");
1057 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
1063 SSAUpdate.
Initialize(Load->getType(), Load->getName());
1068 if (AV.AV.isUndefValue())
1078 if (BB == Load->getParent() &&
1079 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
1080 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
1094 Type *LoadTy = Load->getType();
1096 if (isSimpleValue()) {
1097 Res = getSimpleValue();
1098 if (Res->
getType() != LoadTy) {
1102 <<
" " << *getSimpleValue() <<
'\n'
1106 }
else if (isCoercedLoadValue()) {
1107 LoadInst *CoercedLoad = getCoercedLoadValue();
1122 if (!CoercedLoad->
hasMetadata(LLVMContext::MD_noundef))
1124 {LLVMContext::MD_dereferenceable,
1125 LLVMContext::MD_dereferenceable_or_null,
1126 LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1128 <<
" " << *getCoercedLoadValue() <<
'\n'
1132 }
else if (isMemIntrinValue()) {
1136 <<
" " << *getMemIntrinValue() <<
'\n'
1139 }
else if (isSelectValue()) {
1142 assert(V1 && V2 &&
"both value operands of the select must be present");
1147 cast<SelectInst>(Res)->setDebugLoc(Load->getDebugLoc());
1151 assert(Res &&
"failed to materialize?");
1157 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1177 using namespace ore;
1182 R <<
"load of type " << NV(
"Type", Load->getType()) <<
" not eliminated"
1185 for (
auto *U : Load->getPointerOperand()->users()) {
1186 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1187 auto *
I = cast<Instruction>(U);
1188 if (
I->getFunction() == Load->getFunction() && DT->
dominates(
I, Load)) {
1204 for (
auto *U : Load->getPointerOperand()->users()) {
1205 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1206 auto *
I = cast<Instruction>(U);
1207 if (
I->getFunction() == Load->getFunction() &&
1215 OtherAccess =
nullptr;
1227 R <<
" in favor of " << NV(
"OtherAccess", OtherAccess);
1229 R <<
" because it is clobbered by " << NV(
"ClobberedBy", DepInfo.
getInst());
1249 if (
auto *LI = dyn_cast<LoadInst>(Inst))
1250 if (LI->getPointerOperand() == Loc.
Ptr && LI->getType() == LoadTy)
1256std::optional<AvailableValue>
1259 assert(
Load->isUnordered() &&
"rules below are incorrect for ordered access");
1269 if (
StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1271 if (
Address &&
Load->isAtomic() <= DepSI->isAtomic()) {
1283 if (
LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1287 if (DepLoad != Load &&
Address &&
1288 Load->isAtomic() <= DepLoad->isAtomic()) {
1297 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1311 if (
MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1324 dbgs() <<
" is clobbered by " << *DepInst <<
'\n';);
1328 return std::nullopt;
1341 if (
StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1347 return std::nullopt;
1350 if (S->isAtomic() <
Load->isAtomic())
1351 return std::nullopt;
1356 if (
LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1361 return std::nullopt;
1364 if (
LD->isAtomic() <
Load->isAtomic())
1365 return std::nullopt;
1373 if (
auto *Sel = dyn_cast<SelectInst>(DepInst)) {
1374 assert(Sel->getType() ==
Load->getPointerOperandType());
1380 return std::nullopt;
1385 return std::nullopt;
1393 dbgs() <<
" has unknown def " << *DepInst <<
'\n';);
1394 return std::nullopt;
1397void GVNPass::AnalyzeLoadAvailability(
LoadInst *Load, LoadDepVect &Deps,
1398 AvailValInBlkVect &ValuesPerBlock,
1399 UnavailBlkVect &UnavailableBlocks) {
1404 for (
const auto &Dep : Deps) {
1408 if (DeadBlocks.count(DepBB)) {
1416 UnavailableBlocks.push_back(DepBB);
1423 if (
auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1427 ValuesPerBlock.push_back(
1430 UnavailableBlocks.push_back(DepBB);
1434 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1435 "post condition violation");
1461 if (
Term->getNumSuccessors() != 2 ||
Term->isSpecialTerminator())
1463 auto *SuccBB =
Term->getSuccessor(0);
1464 if (SuccBB == LoadBB)
1465 SuccBB =
Term->getSuccessor(1);
1466 if (!SuccBB->getSinglePredecessor())
1471 if (Inst.isDebugOrPseudoInst())
1473 if (--NumInsts == 0)
1476 if (!Inst.isIdenticalTo(Load))
1485 return cast<LoadInst>(&Inst);
1495void GVNPass::eliminatePartiallyRedundantLoad(
1496 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1499 for (
const auto &AvailableLoad : AvailableLoads) {
1500 BasicBlock *UnavailableBlock = AvailableLoad.first;
1501 Value *LoadPtr = AvailableLoad.second;
1504 Load->getType(), LoadPtr,
Load->getName() +
".pre",
Load->isVolatile(),
1505 Load->getAlign(),
Load->getOrdering(),
Load->getSyncScopeID(),
1507 NewLoad->setDebugLoc(
Load->getDebugLoc());
1511 if (
auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1514 MSSAU->
insertUse(cast<MemoryUse>(NewAccess),
true);
1520 NewLoad->setAAMetadata(Tags);
1522 if (
auto *MD =
Load->getMetadata(LLVMContext::MD_invariant_load))
1523 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1524 if (
auto *InvGroupMD =
Load->getMetadata(LLVMContext::MD_invariant_group))
1525 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1526 if (
auto *RangeMD =
Load->getMetadata(LLVMContext::MD_range))
1527 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1528 if (
auto *AccessMD =
Load->getMetadata(LLVMContext::MD_access_group))
1530 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1539 ValuesPerBlock.push_back(
1546 if (CriticalEdgePredAndLoad) {
1547 auto I = CriticalEdgePredAndLoad->
find(UnavailableBlock);
1548 if (
I != CriticalEdgePredAndLoad->
end()) {
1549 ++NumPRELoadMoved2CEPred;
1556 LeaderTable.erase(ValNo, OldLoad, OldLoad->
getParent());
1558 removeInstruction(OldLoad);
1567 Load->replaceAllUsesWith(V);
1568 if (isa<PHINode>(V))
1571 I->setDebugLoc(
Load->getDebugLoc());
1572 if (
V->getType()->isPtrOrPtrVectorTy())
1577 <<
"load eliminated by PRE";
1581bool GVNPass::PerformLoadPRE(
LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1582 UnavailBlkVect &UnavailableBlocks) {
1592 UnavailableBlocks.end());
1614 bool MustEnsureSafetyOfSpeculativeExecution =
1619 if (TmpBB == LoadBB)
1621 if (Blockers.count(TmpBB))
1633 MustEnsureSafetyOfSpeculativeExecution =
1634 MustEnsureSafetyOfSpeculativeExecution || ICF->
hasICF(TmpBB);
1645 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1646 for (
BasicBlock *UnavailableBB : UnavailableBlocks)
1647 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1660 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1661 << Pred->
getName() <<
"': " << *Load <<
'\n');
1672 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1673 << Pred->
getName() <<
"': " << *Load <<
'\n');
1679 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1680 << Pred->
getName() <<
"': " << *Load <<
'\n');
1689 <<
"COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1690 << Pred->
getName() <<
"': " << *Load <<
'\n');
1694 if (
LoadInst *LI = findLoadToHoistIntoPred(Pred, LoadBB, Load))
1695 CriticalEdgePredAndLoad[Pred] = LI;
1700 PredLoads[Pred] =
nullptr;
1705 unsigned NumInsertPreds = PredLoads.
size() + CriticalEdgePredSplit.
size();
1706 unsigned NumUnavailablePreds = NumInsertPreds +
1707 CriticalEdgePredAndLoad.
size();
1708 assert(NumUnavailablePreds != 0 &&
1709 "Fully available value should already be eliminated!");
1710 (void)NumUnavailablePreds;
1716 if (NumInsertPreds > 1)
1721 if (MustEnsureSafetyOfSpeculativeExecution) {
1722 if (CriticalEdgePredSplit.
size())
1725 for (
auto &PL : PredLoads)
1729 for (
auto &CEP : CriticalEdgePredAndLoad)
1736 for (
BasicBlock *OrigPred : CriticalEdgePredSplit) {
1737 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1738 assert(!PredLoads.count(OrigPred) &&
"Split edges shouldn't be in map!");
1739 PredLoads[NewPred] =
nullptr;
1740 LLVM_DEBUG(
dbgs() <<
"Split critical edge " << OrigPred->getName() <<
"->"
1741 << LoadBB->
getName() <<
'\n');
1744 for (
auto &CEP : CriticalEdgePredAndLoad)
1745 PredLoads[CEP.first] =
nullptr;
1748 bool CanDoPRE =
true;
1751 for (
auto &PredLoad : PredLoads) {
1752 BasicBlock *UnavailablePred = PredLoad.first;
1762 Value *LoadPtr =
Load->getPointerOperand();
1764 while (Cur != LoadBB) {
1777 LoadPtr =
Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1784 << *
Load->getPointerOperand() <<
"\n");
1789 PredLoad.second = LoadPtr;
1793 while (!NewInsts.
empty()) {
1803 return !CriticalEdgePredSplit.empty();
1809 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOAD: " << *Load <<
'\n');
1811 <<
" INSTS: " << *NewInsts.
back()
1819 I->updateLocationAfterHoist();
1828 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads,
1829 &CriticalEdgePredAndLoad);
1834bool GVNPass::performLoopLoadPRE(
LoadInst *Load,
1835 AvailValInBlkVect &ValuesPerBlock,
1836 UnavailBlkVect &UnavailableBlocks) {
1839 if (!L ||
L->getHeader() !=
Load->getParent())
1844 if (!Preheader || !Latch)
1847 Value *LoadPtr =
Load->getPointerOperand();
1849 if (!
L->isLoopInvariant(LoadPtr))
1859 for (
auto *Blocker : UnavailableBlocks) {
1861 if (!
L->contains(Blocker))
1885 if (Blocker->getTerminator()->mayWriteToMemory())
1888 LoopBlock = Blocker;
1901 AvailableLoads[LoopBlock] = LoadPtr;
1902 AvailableLoads[Preheader] = LoadPtr;
1904 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOOP LOAD: " << *Load <<
'\n');
1905 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads,
1913 using namespace ore;
1917 <<
"load of type " << NV(
"Type", Load->getType()) <<
" eliminated"
1918 << setExtraArgs() <<
" in favor of "
1925bool GVNPass::processNonLocalLoad(
LoadInst *Load) {
1927 if (
Load->getParent()->getParent()->hasFnAttribute(
1928 Attribute::SanitizeAddress) ||
1929 Load->getParent()->getParent()->hasFnAttribute(
1930 Attribute::SanitizeHWAddress))
1940 unsigned NumDeps = Deps.size();
1947 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1949 dbgs() <<
" has unknown dependencies\n";);
1953 bool Changed =
false;
1956 dyn_cast<GetElementPtrInst>(
Load->getOperand(0))) {
1957 for (
Use &U :
GEP->indices())
1959 Changed |= performScalarPRE(
I);
1963 AvailValInBlkVect ValuesPerBlock;
1964 UnavailBlkVect UnavailableBlocks;
1965 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1969 if (ValuesPerBlock.empty())
1977 if (UnavailableBlocks.empty()) {
1978 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING NONLOCAL LOAD: " << *Load <<
'\n');
1984 Load->replaceAllUsesWith(V);
1986 if (isa<PHINode>(V))
1992 if (
Load->getDebugLoc() &&
Load->getParent() ==
I->getParent())
1993 I->setDebugLoc(
Load->getDebugLoc());
1994 if (
V->getType()->isPtrOrPtrVectorTy())
2008 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
2009 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
2017 auto *I = dyn_cast<Instruction>(U);
2018 return I && I->getParent() == BB;
2022bool GVNPass::processAssumeIntrinsic(
AssumeInst *IntrinsicI) {
2026 if (
Cond->isZero()) {
2045 for (
const auto &Acc : *AL) {
2046 if (
auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
2047 if (!Current->getMemoryInst()->comesBefore(NewS)) {
2048 FirstNonDom = Current;
2062 MSSAU->
insertDef(cast<MemoryDef>(NewDef),
false);
2072 if (isa<Constant>(V)) {
2080 bool Changed =
false;
2087 Changed |= propagateEquality(V, True, Edge,
false);
2093 ReplaceOperandsWithMap[
V] = True;
2115 if (
auto *CmpI = dyn_cast<CmpInst>(V)) {
2116 if (CmpI->isEquivalence()) {
2117 Value *CmpLHS = CmpI->getOperand(0);
2118 Value *CmpRHS = CmpI->getOperand(1);
2124 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
2126 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
2128 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
2129 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
2140 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
2144 << *CmpLHS <<
" with "
2145 << *CmpRHS <<
" in block "
2146 << IntrinsicI->
getParent()->getName() <<
"\n");
2151 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
2164 I->replaceAllUsesWith(Repl);
2169bool GVNPass::processLoad(
LoadInst *L) {
2174 if (!
L->isUnordered())
2177 if (
L->use_empty()) {
2187 return processNonLocalLoad(L);
2194 dbgs() <<
"GVN: load ";
L->printAsOperand(
dbgs());
2195 dbgs() <<
" has unknown dependence\n";);
2199 auto AV = AnalyzeLoadAvailability(L, Dep,
L->getPointerOperand());
2222std::pair<uint32_t, bool>
2223GVNPass::ValueTable::assignExpNewValueNum(
Expression &Exp) {
2225 bool CreateNewValNum = !
e;
2226 if (CreateNewValNum) {
2227 Expressions.push_back(Exp);
2228 if (ExprIdx.size() < nextValueNumber + 1)
2229 ExprIdx.resize(nextValueNumber * 2);
2230 e = nextValueNumber;
2231 ExprIdx[nextValueNumber++] = nextExprNumber++;
2233 return {
e, CreateNewValNum};
2241 Gvn.LeaderTable.getLeaders(Num),
2242 [=](
const LeaderMap::LeaderTableEntry &L) { return L.BB == BB; });
2249 auto FindRes = PhiTranslateTable.find({Num, Pred});
2250 if (FindRes != PhiTranslateTable.end())
2251 return FindRes->second;
2252 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
2253 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2264 auto Leaders = Gvn.LeaderTable.getLeaders(Num);
2265 for (
const auto &Entry : Leaders) {
2266 Call = dyn_cast<CallInst>(Entry.Val);
2267 if (Call && Call->getParent() == PhiBlock)
2271 if (AA->doesNotAccessMemory(Call))
2274 if (!MD || !AA->onlyReadsMemory(Call))
2286 if (
D.getResult().isNonFuncLocal())
2297 if (
PHINode *PN = NumberingPhi[Num]) {
2298 for (
unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2299 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2309 if (!areAllValsInBB(Num, PhiBlock, Gvn))
2312 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2316 for (
unsigned i = 0; i <
Exp.varargs.size(); i++) {
2320 if ((i > 1 &&
Exp.opcode == Instruction::InsertValue) ||
2321 (i > 0 &&
Exp.opcode == Instruction::ExtractValue) ||
2322 (i > 1 &&
Exp.opcode == Instruction::ShuffleVector))
2324 Exp.varargs[i] = phiTranslate(Pred, PhiBlock,
Exp.varargs[i], Gvn);
2327 if (
Exp.commutative) {
2328 assert(
Exp.varargs.size() >= 2 &&
"Unsupported commutative instruction!");
2329 if (
Exp.varargs[0] >
Exp.varargs[1]) {
2332 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2333 Exp.opcode = (Opcode << 8) |
2339 if (
uint32_t NewNum = expressionNumbering[Exp]) {
2340 if (
Exp.opcode == Instruction::Call && NewNum != Num)
2341 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2349void GVNPass::ValueTable::eraseTranslateCacheEntry(
2352 PhiTranslateTable.erase({Num, Pred});
2361 auto Leaders = LeaderTable.getLeaders(num);
2362 if (Leaders.empty())
2365 Value *Val =
nullptr;
2366 for (
const auto &Entry : Leaders) {
2369 if (isa<Constant>(Val))
2389 "No edge between these basic blocks!");
2390 return Pred !=
nullptr;
2393void GVNPass::assignBlockRPONumber(
Function &
F) {
2394 BlockRPONumber.clear();
2398 BlockRPONumber[BB] = NextBlockNumber++;
2399 InvalidBlockRPONumbers =
false;
2402bool GVNPass::replaceOperandsForInBlockEquality(
Instruction *Instr)
const {
2403 bool Changed =
false;
2404 for (
unsigned OpNum = 0; OpNum <
Instr->getNumOperands(); ++OpNum) {
2406 auto it = ReplaceOperandsWithMap.find(Operand);
2407 if (it != ReplaceOperandsWithMap.end()) {
2409 << *it->second <<
" in instruction " << *Instr <<
'\n');
2410 Instr->setOperand(OpNum, it->second);
2422bool GVNPass::propagateEquality(
Value *LHS,
Value *RHS,
2424 bool DominatesByEdge) {
2426 Worklist.
push_back(std::make_pair(LHS, RHS));
2427 bool Changed =
false;
2432 while (!Worklist.
empty()) {
2433 std::pair<Value*, Value*> Item = Worklist.
pop_back_val();
2434 LHS = Item.first;
RHS = Item.second;
2441 if (isa<Constant>(LHS) && isa<Constant>(RHS))
2445 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2447 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) &&
"Unexpected value!");
2451 : cast<Instruction>(LHS)->getDataLayout();
2458 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2459 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2478 if (RootDominatesEnd && !isa<Instruction>(RHS) &&
2480 LeaderTable.insert(LVN, RHS, Root.
getEnd());
2487 auto canReplacePointersCallBack = [&
DL](
const Use &
U,
const Value *To) {
2490 unsigned NumReplacements =
2493 canReplacePointersCallBack)
2495 canReplacePointersCallBack);
2497 if (NumReplacements > 0) {
2499 NumGVNEqProp += NumReplacements;
2519 bool isKnownFalse = !isKnownTrue;
2534 if (
CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2535 Value *Op0 =
Cmp->getOperand(0), *Op1 =
Cmp->getOperand(1);
2540 if (
Cmp->isEquivalence(isKnownFalse))
2541 Worklist.
push_back(std::make_pair(Op0, Op1));
2545 Constant *NotVal = ConstantInt::get(
Cmp->getType(), isKnownFalse);
2553 if (Num < NextNum) {
2555 if (NotCmp && isa<Instruction>(NotCmp)) {
2556 unsigned NumReplacements =
2561 Changed |= NumReplacements > 0;
2562 NumGVNEqProp += NumReplacements;
2572 if (RootDominatesEnd)
2573 LeaderTable.insert(Num, NotVal, Root.
getEnd());
2586 if (isa<DbgInfoIntrinsic>(
I))
2595 bool Changed =
false;
2596 if (!
I->use_empty()) {
2600 I->replaceAllUsesWith(V);
2608 if (MD &&
V->getType()->isPtrOrPtrVectorTy())
2615 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
2616 return processAssumeIntrinsic(Assume);
2618 if (
LoadInst *Load = dyn_cast<LoadInst>(
I)) {
2619 if (processLoad(Load))
2623 LeaderTable.insert(Num, Load,
Load->getParent());
2630 if (!BI->isConditional())
2633 if (isa<Constant>(BI->getCondition()))
2634 return processFoldableCondBr(BI);
2636 Value *BranchCond = BI->getCondition();
2640 if (TrueSucc == FalseSucc)
2644 bool Changed =
false;
2648 Changed |= propagateEquality(BranchCond, TrueVal, TrueE,
true);
2652 Changed |= propagateEquality(BranchCond, FalseVal, FalseE,
true);
2659 Value *SwitchCond =
SI->getCondition();
2661 bool Changed =
false;
2666 ++SwitchEdges[Succ];
2672 if (SwitchEdges.
lookup(Dst) == 1) {
2674 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E,
true);
2682 if (
I->getType()->isVoidTy())
2690 if (isa<AllocaInst>(
I) ||
I->isTerminator() || isa<PHINode>(
I)) {
2691 LeaderTable.insert(Num,
I,
I->getParent());
2698 if (Num >= NextNum) {
2699 LeaderTable.insert(Num,
I,
I->getParent());
2705 Value *Repl = findLeader(
I->getParent(), Num);
2708 LeaderTable.insert(Num,
I,
I->getParent());
2742 InvalidBlockRPONumbers =
true;
2744 MSSAU = MSSA ? &Updater :
nullptr;
2746 bool Changed =
false;
2747 bool ShouldContinue =
true;
2757 Changed |= removedBlock;
2761 unsigned Iteration = 0;
2762 while (ShouldContinue) {
2765 ShouldContinue = iterateOnFunction(
F);
2766 Changed |= ShouldContinue;
2773 assignValNumForDeadCode();
2774 bool PREChanged =
true;
2775 while (PREChanged) {
2776 PREChanged = performPRE(
F);
2777 Changed |= PREChanged;
2786 cleanupGlobalSets();
2801 "We expect InstrsToErase to be empty across iterations");
2802 if (DeadBlocks.count(BB))
2806 ReplaceOperandsWithMap.clear();
2807 bool ChangedFunction =
false;
2815 for (
PHINode *PN : PHINodesToRemove) {
2817 removeInstruction(PN);
2822 if (!ReplaceOperandsWithMap.empty())
2823 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2824 ChangedFunction |= processInstruction(&*BI);
2826 if (InstrsToErase.
empty()) {
2832 NumGVNInstr += InstrsToErase.
size();
2835 bool AtStart = BI == BB->
begin();
2839 for (
auto *
I : InstrsToErase) {
2840 assert(
I->getParent() == BB &&
"Removing instruction from wrong block?");
2844 removeInstruction(
I);
2846 InstrsToErase.clear();
2854 return ChangedFunction;
2865 for (
unsigned i = 0, e =
Instr->getNumOperands(); i != e; ++i) {
2867 if (isa<Argument>(
Op) || isa<Constant>(
Op) || isa<GlobalValue>(
Op))
2879 if (
Value *V = findLeader(Pred, TValNo)) {
2880 Instr->setOperand(i, V);
2903 LeaderTable.insert(Num, Instr, Pred);
2907bool GVNPass::performScalarPRE(
Instruction *CurInst) {
2908 if (isa<AllocaInst>(CurInst) || CurInst->
isTerminator() ||
2911 isa<DbgInfoIntrinsic>(CurInst))
2918 if (isa<CmpInst>(CurInst))
2928 if (isa<GetElementPtrInst>(CurInst))
2931 if (
auto *CallB = dyn_cast<CallBase>(CurInst)) {
2933 if (CallB->isInlineAsm())
2945 unsigned NumWith = 0;
2946 unsigned NumWithout = 0;
2951 if (InvalidBlockRPONumbers)
2952 assignBlockRPONumber(*CurrentBlock->
getParent());
2963 assert(BlockRPONumber.count(
P) && BlockRPONumber.count(CurrentBlock) &&
2964 "Invalid BlockRPONumber map.");
2965 if (BlockRPONumber[
P] >= BlockRPONumber[CurrentBlock]) {
2971 Value *predV = findLeader(
P, TValNo);
2976 }
else if (predV == CurInst) {
2988 if (NumWithout > 1 || NumWith == 0)
2996 if (NumWithout != 0) {
3015 toSplit.push_back(std::make_pair(PREPred->
getTerminator(), SuccNum));
3019 PREInstr = CurInst->
clone();
3020 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
3023 verifyRemoved(PREInstr);
3032 assert(PREInstr !=
nullptr || NumWithout == 0);
3038 CurInst->
getName() +
".pre-phi");
3039 Phi->insertBefore(CurrentBlock->begin());
3040 for (
unsigned i = 0, e = predMap.
size(); i != e; ++i) {
3041 if (
Value *V = predMap[i].first) {
3045 Phi->addIncoming(V, predMap[i].second);
3047 Phi->addIncoming(PREInstr, PREPred);
3054 LeaderTable.insert(ValNo, Phi, CurrentBlock);
3057 if (MD &&
Phi->getType()->isPtrOrPtrVectorTy())
3060 LeaderTable.erase(ValNo, CurInst, CurrentBlock);
3063 removeInstruction(CurInst);
3072 bool Changed =
false;
3075 if (CurrentBlock == &
F.getEntryBlock())
3079 if (CurrentBlock->isEHPad())
3083 BE = CurrentBlock->end();
3086 Changed |= performScalarPRE(CurInst);
3090 if (splitCriticalEdges())
3107 InvalidBlockRPONumbers =
true;
3114bool GVNPass::splitCriticalEdges() {
3115 if (toSplit.empty())
3118 bool Changed =
false;
3120 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
3124 }
while (!toSplit.empty());
3128 InvalidBlockRPONumbers =
true;
3134bool GVNPass::iterateOnFunction(
Function &
F) {
3135 cleanupGlobalSets();
3138 bool Changed =
false;
3145 Changed |= processBlock(BB);
3150void GVNPass::cleanupGlobalSets() {
3152 LeaderTable.clear();
3153 BlockRPONumber.clear();
3155 InvalidBlockRPONumbers =
true;
3166 I->eraseFromParent();
3171void GVNPass::verifyRemoved(
const Instruction *Inst)
const {
3173 LeaderTable.verifyRemoved(Inst);
3185 while (!NewDead.
empty()) {
3187 if (DeadBlocks.count(
D))
3193 DeadBlocks.insert(Dom.
begin(), Dom.
end());
3198 if (DeadBlocks.count(S))
3201 bool AllPredDead =
true;
3203 if (!DeadBlocks.count(
P)) {
3204 AllPredDead =
false;
3225 if (DeadBlocks.count(
B))
3232 if (!DeadBlocks.count(
P))
3238 DeadBlocks.insert(
P = S);
3244 if (!DeadBlocks.count(
P))
3268bool GVNPass::processFoldableCondBr(
BranchInst *BI) {
3282 if (DeadBlocks.count(DeadRoot))
3286 DeadRoot = splitCriticalEdges(BI->
getParent(), DeadRoot);
3288 addDeadBlock(DeadRoot);
3296void GVNPass::assignValNumForDeadCode() {
3300 LeaderTable.insert(ValNum, &Inst, BB);
3312 .setMemDep(MemDepAnalysis)
3313 .setMemorySSA(MemSSAAnalysis)) {
3318 if (skipFunction(
F))
3321 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3322 if (Impl.isMemorySSAEnabled() && !MSSAWP)
3323 MSSAWP = &getAnalysis<MemorySSAWrapperPass>();
3325 return Impl.runImpl(
3326 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F),
3327 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3328 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F),
3329 getAnalysis<AAResultsWrapperPass>().getAAResults(),
3330 Impl.isMemDepEnabled()
3331 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3333 getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
3334 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3335 MSSAWP ? &MSSAWP->getMSSA() :
nullptr);
3343 if (Impl.isMemDepEnabled())
3352 if (Impl.isMemorySSAEnabled())
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool hasUsersIn(Value *V, BasicBlock *BB)
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
static cl::opt< uint32_t > MaxNumInsnsPerBlock("gvn-max-num-insns", cl::Hidden, cl::init(100), cl::desc("Max number of instructions to scan in each basic block in GVN " "(default = 100)"))
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static cl::opt< bool > GVNEnableMemorySSA("enable-gvn-memoryssa", cl::init(false))
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &gvn)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
static bool isLifetimeStart(const Instruction *Inst)
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
static void replaceValuesPerBlockEntry(SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, Value *OldValue, Value *NewValue)
If the specified OldValue exists in ValuesPerBlock, replace its value with NewValue.
@ Unavailable
We know the block is not fully available. This is a fixpoint.
@ Available
We know the block is fully available. This is a fixpoint.
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
uint64_t IntrinsicInst * II
ppc ctr loops PowerPC CTR Loops Verify
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
bool isEmpty() const
Return true if there are no attributes.
std::optional< AttributeList > intersectWith(LLVMContext &C, AttributeList Other) const
Try to intersect this AttributeList with Other.
const BasicBlock * getEnd() const
const BasicBlock * getStart() const
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
bool isEHPad() const
Return true if this basic block is an exception handling block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Analysis pass which computes a DominatorTree.
void getDescendants(NodeT *R, SmallVectorImpl< NodeT * > &Result) const
Get all nodes dominated by R, including R itself.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Class representing an expression and its matching format.
FunctionPass class - This class is used to implement most global optimizations.
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Returns the value number of the given comparison, assigning it a new number if it did not have one be...
uint32_t getNextUnusedValueNumber()
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
uint32_t lookup(Value *V, bool Verify=true) const
Returns the value number of the specified value.
void setAliasAnalysis(AAResults *A)
void clear()
Remove all entries from the ValueTable.
bool exists(Value *V) const
Returns true if a value number exists for the specified value.
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock, uint32_t Num, GVNPass &Gvn)
Wrap phiTranslateImpl to provide caching functionality.
void setMemDep(MemoryDependenceResults *M)
void erase(Value *v)
Remove a value from the value numbering.
void add(Value *V, uint32_t num)
add - Insert a value into the table with a specified value number.
void setDomTree(DominatorTree *D)
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock)
Erase stale entry from phiTranslate cache so phiTranslate can be computed again.
void verifyRemoved(const Value *) const
verifyRemoved - Verify that the value is removed from all internal data structures.
The core GVN pass object.
friend class gvn::GVNLegacyPass
bool isPREEnabled() const
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
AAResults * getAliasAnalysis() const
bool isLoadPREEnabled() const
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
bool isMemorySSAEnabled() const
DominatorTree & getDominatorTree() const
bool isLoadInLoopPREEnabled() const
bool isLoadPRESplitBackedgeEnabled() const
void markInstructionForDeletion(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
bool isMemDepEnabled() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Legacy wrapper pass to provide the GlobalsAAResult object.
This class allows to keep track on instructions with implicit control flow.
bool isDominatedByICFIFromSameBlock(const Instruction *Insn)
Returns true if the first ICFI of Insn's block exists and dominates Insn.
bool hasICF(const BasicBlock *BB)
Returns true if at least one instruction from the given basic block has implicit control flow.
void clear()
Invalidates all information from this tracking.
void removeUsersOf(const Instruction *Inst)
Notifies this tracking that we are going to replace all uses of Inst.
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Notifies this tracking that we are going to insert a new instruction Inst to the basic block BB.
void removeInstruction(const Instruction *Inst)
Notifies this tracking that we are going to remove the instruction Inst It makes all necessary update...
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Instruction * getPrevNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the previous non-debug instruction in the same basic block as 'this',...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs={})
Drop all unknown metadata except for debug locations.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
This class implements a map that also provides access to all stored values in a deterministic order.
iterator find(const KeyT &Key)
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
An analysis that produces MemoryDependenceResults for a function.
Provides a lazy, caching interface for making common memory aliasing information queries,...
std::vector< NonLocalDepEntry > NonLocalDepInfo
void invalidateCachedPredecessors()
Clears the PredIteratorCache info.
void invalidateCachedPointerInfo(Value *Ptr)
Invalidates cached information about the specified pointer, because it may be too conservative in mem...
std::optional< int32_t > getClobberOffset(LoadInst *DepInst) const
Return the clobber offset to dependent instruction.
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
void getNonLocalPointerDependency(Instruction *QueryInst, SmallVectorImpl< NonLocalDepResult > &Result)
Perform a full dependency query for an access to the QueryInst's specified memory location,...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
An analysis that produces MemorySSA for a function.
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before an existing MemoryAccess.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
void insertUse(MemoryUse *Use, bool RenameUses=false)
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point, bool CreationMustSucceed=true)
Create a MemoryAccess in MemorySSA at a specified point in a block.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Legacy analysis pass which computes MemorySSA.
Encapsulates MemorySSA, including all data associated with memory accesses.
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Class that has the common methods + fields of memory uses/defs.
This is an entry in the NonLocalDepInfo cache.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PHITransAddr - An address value which tracks and handles phi translation.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserve()
Mark an analysis as preserved.
Helper class for SSA formation on a set of values defined in multiple blocks.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getCondition() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
SmallVector & operator=(const SmallVector &RHS)
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
void deleteValue()
Delete a pointer to a generic Value.
StringRef getName() const
Return a constant reference to the value's name.
Represents an op.with.overflow intrinsic.
An efficient, type-erasing, non-owning reference to a callable.
GVNLegacyPass(bool MemDepAnalysis=GVNEnableMemDep, bool MemSSAAnalysis=GVNEnableMemorySSA)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
An opaque object representing a hash code.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
bool match(Val *V, const Pattern &P)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Value * getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingStore/Load returned an offset, this function can be used to actually perf...
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
initializer< Ty > init(const Ty &Val)
A private "module" namespace for types and utilities used by GVN.
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
hash_code hash_value(const FixedPointSemantics &Val)
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
auto pred_end(const MachineBasicBlock *BB)
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
void initializeGVNLegacyPassPass(PassRegistry &)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
bool isModSet(const ModRefInfo MRI)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
@ Global
Append to llvm.global_dtors.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
bool VerifyMemorySSA
Enables verification of MemorySSA.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
FunctionPass * createGVNPass()
Create a legacy GVN pass.
BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
constexpr unsigned BitWidth
auto pred_begin(const MachineBasicBlock *BB)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool pred_empty(const BasicBlock *BB)
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Option class for critical edge splitting.
static GVNPass::Expression getTombstoneKey()
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
static unsigned getHashValue(const GVNPass::Expression &e)
static GVNPass::Expression getEmptyKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
A set of parameters to control various transforms performed by GVN pass.
std::optional< bool > AllowLoadPRESplitBackedge
std::optional< bool > AllowPRE
std::optional< bool > AllowLoadInLoopPRE
std::optional< bool > AllowMemDep
std::optional< bool > AllowLoadPRE
std::optional< bool > AllowMemorySSA
SmallVector< uint32_t, 4 > varargs
bool operator==(const Expression &other) const
friend hash_code hash_value(const Expression &Value)
Expression(uint32_t o=~2U)
A CRTP mix-in to automatically provide informational APIs needed for passes.
A MapVector that performs no allocations if smaller than a certain size.
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
static AvailableValueInBlock getUndef(BasicBlock *BB)
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
Value * MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const
Emit code at the end of this block to adjust the value defined here to the specified type.
AvailableValue AV
AV - The actual available value.
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
BasicBlock * BB
BB - The basic block in question.
Represents a particular available value that we know how to materialize.
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
bool isSimpleValue() const
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
bool isCoercedLoadValue() const
static AvailableValue get(Value *V, unsigned Offset=0)
ValType Kind
Kind of the live-out value.
LoadInst * getCoercedLoadValue() const
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
bool isUndefValue() const
bool isSelectValue() const
Value * Val
Val - The value that is live out of the block.
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt, GVNPass &gvn) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.
static AvailableValue getUndef()
SelectInst * getSelectValue() const
Value * getSimpleValue() const
bool isMemIntrinValue() const
MemIntrinsic * getMemIntrinValue() const