71#define DEBUG_TYPE "machine-sink"
75 cl::desc(
"Split critical edges during machine sinking"),
80 cl::desc(
"Use block frequency info to find successors to sink"),
84 "machine-sink-split-probability-threshold",
86 "Percentage threshold for splitting single-instruction critical edge. "
87 "If the branch threshold is higher than this threshold, we allow "
88 "speculative execution of up to 1 instruction to avoid branching to "
89 "splitted critical edge"),
93 "machine-sink-load-instrs-threshold",
94 cl::desc(
"Do not try to find alias store for a load if there is a in-path "
95 "block whose instruction number is higher than this threshold."),
99 "machine-sink-load-blocks-threshold",
100 cl::desc(
"Do not try to find alias store for a load if the block number in "
101 "the straight line is higher than this threshold."),
106 cl::desc(
"Sink instructions into cycles to avoid "
111 "machine-sink-cycle-limit",
113 "The maximum number of instructions considered for cycle sinking."),
116STATISTIC(NumSunk,
"Number of machine instructions sunk");
117STATISTIC(NumCycleSunk,
"Number of machine instructions sunk into a cycle");
120STATISTIC(NumPostRACopySink,
"Number of copies sunk after RA");
126class MachineSinking {
164 using AllSuccsCache =
178 using SinkItem = std::pair<MachineInstr *, MachineBasicBlock *>;
197 CachedRegisterPressure;
199 bool EnableSinkAndFold;
208 : DT(DT), PDT(PDT), CI(CI), PSI(PSI), MBFI(MBFI), MBPI(MBPI),
AA(
AA),
209 LIS(LIS),
SI(
SI), LV(LV), MLI(MLI),
210 EnableSinkAndFold(EnableSinkAndFold) {}
214 void releaseMemory() {
215 CEBCandidates.
clear();
216 CEMergeCandidates.
clear();
246 AllSuccsCache &AllSuccessors);
256 bool &LocalUse)
const;
259 AllSuccsCache &AllSuccessors);
271 AllSuccsCache &AllSuccessors);
280 AllSuccsCache &AllSuccessors)
const;
283 bool UseCache =
true);
285 bool registerPressureSetExceedsLimit(
unsigned NRegs,
320char MachineSinkingLegacy::ID = 0;
345 if (!TII->isBasicBlockPrologue(*PI))
347 for (auto &MO : MI.operands()) {
350 Register Reg = MO.getReg();
354 if (Reg.isPhysical() &&
355 (TII->isIgnorableUse(MO) || (MRI && MRI->isConstantPhysReg(Reg))))
357 if (PI->modifiesRegister(Reg, TRI))
360 if (PI->readsRegister(Reg, TRI))
363 auto *DefOp = PI->findRegisterDefOperand(Reg, TRI, false, true);
364 if (DefOp && !DefOp->isDead())
373bool MachineSinking::PerformTrivialForwardCoalescing(
MachineInstr &
MI,
384 const TargetRegisterClass *SRC = MRI->
getRegClass(SrcReg);
385 const TargetRegisterClass *DRC = MRI->
getRegClass(DstReg);
395 MI.eraseFromParent();
405bool MachineSinking::PerformSinkAndFold(MachineInstr &
MI,
406 MachineBasicBlock *
MBB) {
407 if (
MI.isCopy() ||
MI.mayLoadOrStore() ||
408 MI.getOpcode() == TargetOpcode::REG_SEQUENCE)
416 bool SawStore =
true;
417 if (!
MI.isSafeToMove(SawStore))
422 if (
MI.isConvergent())
431 for (
const MachineOperand &MO :
MI.operands()) {
432 if (MO.isImm() || MO.isRegMask() || MO.isRegLiveOut() || MO.isMetadata() ||
433 MO.isMCSymbol() || MO.isDbgInstrRef() || MO.isCFIIndex() ||
434 MO.isIntrinsicID() || MO.isPredicate() || MO.isShuffleMask())
453 else if (UsedRegB == 0)
471 using SinkInfo = std::pair<MachineInstr *, ExtAddrMode>;
475 const TargetRegisterClass *RC = MRI->
getRegClass(DefReg);
476 const TargetRegisterClass *RCA =
477 UsedRegA == 0 ? nullptr : MRI->
getRegClass(UsedRegA);
478 const TargetRegisterClass *RCB =
479 UsedRegB == 0 ? nullptr : MRI->
getRegClass(UsedRegB);
482 while (!Worklist.
empty()) {
487 MachineInstr &UseInst = *MO.getParent();
490 if (
const MachineOperand &O = UseInst.
getOperand(0);
O.isReg())
511 return MO.isReg() && MO.getReg() == Reg;
515 if (!
TII->canFoldIntoAddrMode(UseInst,
Reg,
MI, AM))
532 if (RCA ==
nullptr) {
537 unsigned NRegs = !!RCA + !!RCB;
543 if (RCB ==
nullptr) {
544 if (registerPressureSetExceedsLimit(NRegs, RCA,
MBB))
546 }
else if (registerPressureSetExceedsLimit(1, RCA,
MBB) ||
547 registerPressureSetExceedsLimit(1, RCB,
MBB)) {
557 if (SinkInto.
empty())
561 for (
auto &[SinkDst, MaybeAM] : SinkInto) {
562 MachineInstr *
New =
nullptr;
565 if (SinkDst->isCopy()) {
578 Register DstReg = SinkDst->getOperand(0).getReg();
579 TII->reMaterialize(*SinkDst->getParent(), InsertPt, DstReg, 0,
MI);
580 New = &*std::prev(InsertPt);
581 if (!
New->getDebugLoc())
582 New->setDebugLoc(SinkDst->getDebugLoc());
593 New =
TII->emitLdStWithAddr(*SinkDst, MaybeAM);
605 if (SinkDst->mayStore() && !SinkDst->hasOrderedMemoryRef())
606 StoreInstrCache.clear();
607 SinkDst->eraseFromParent();
615 while (!Worklist.
empty()) {
619 assert((
U->isCopy() ||
U->isDebugInstr()) &&
620 "Only debug uses and copies must remain");
622 Worklist.
push_back(
U->getOperand(0).getReg());
628 for (MachineOperand *MO :
Cleanup) {
631 I->eraseFromParent();
638 MI.eraseFromParent();
646bool MachineSinking::AllUsesDominatedByBlock(
Register Reg,
647 MachineBasicBlock *
MBB,
648 MachineBasicBlock *DefMBB,
650 bool &LocalUse)
const {
672 MachineInstr *UseInst = MO.getParent();
673 unsigned OpNo = MO.getOperandNo();
674 MachineBasicBlock *UseBlock = UseInst->getParent();
675 return UseBlock == MBB && UseInst->isPHI() &&
676 UseInst->getOperand(OpNo + 1).getMBB() == DefMBB;
685 unsigned OpNo = &MO - &UseInst->
getOperand(0);
686 MachineBasicBlock *UseBlock = UseInst->
getParent();
687 if (UseInst->
isPHI()) {
691 }
else if (UseBlock == DefMBB) {
707 assert(
MI.mayLoad() &&
"Expected MI that loads!");
711 if (
MI.memoperands_empty())
716 if (PSV->isGOT() || PSV->isConstantPool())
722void MachineSinking::FindCycleSinkCandidates(
724 SmallVectorImpl<MachineInstr *> &Candidates) {
725 for (
auto &
MI : *BB) {
727 if (
MI.isMetaInstruction()) {
728 LLVM_DEBUG(
dbgs() <<
"CycleSink: not sinking meta instruction\n");
732 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction not a candidate for this "
737 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction is not cycle invariant\n");
740 bool DontMoveAcrossStore =
true;
741 if (!
MI.isSafeToMove(DontMoveAcrossStore)) {
742 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction not safe to move.\n");
746 LLVM_DEBUG(
dbgs() <<
"CycleSink: Dont sink GOT or constant pool loads\n");
749 if (
MI.isConvergent())
752 const MachineOperand &MO =
MI.getOperand(0);
758 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction added as candidate.\n");
770 .getCachedResult<ProfileSummaryAnalysis>(
783 MachineSinking Impl(EnableSinkAndFold, DT, PDT, LV, MLI,
SI, LIS, CI, PSI,
798 OS << MapClassName2PassName(
name());
799 if (EnableSinkAndFold)
800 OS <<
"<enable-sink-fold>";
810 auto *DT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
812 &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
813 auto *CI = &getAnalysis<MachineCycleInfoWrapperPass>().getCycleInfo();
814 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
817 ? &getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI()
820 &getAnalysis<MachineBranchProbabilityInfoWrapperPass>().getMBPI();
821 auto *
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
823 auto *LISWrapper = getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
824 auto *LIS = LISWrapper ? &LISWrapper->getLIS() :
nullptr;
825 auto *SIWrapper = getAnalysisIfAvailable<SlotIndexesWrapperPass>();
826 auto *
SI = SIWrapper ? &SIWrapper->getSI() :
nullptr;
827 auto *LVWrapper = getAnalysisIfAvailable<LiveVariablesWrapperPass>();
828 auto *LV = LVWrapper ? &LVWrapper->getLV() :
nullptr;
829 auto *MLIWrapper = getAnalysisIfAvailable<MachineLoopInfoWrapperPass>();
830 auto *MLI = MLIWrapper ? &MLIWrapper->getLI() :
nullptr;
832 MachineSinking Impl(EnableSinkAndFold, DT, PDT, LV, MLI,
SI, LIS, CI, PSI,
847 bool EverMadeChange =
false;
850 bool MadeChange =
false;
853 CEBCandidates.clear();
854 CEMergeCandidates.clear();
861 MachineDomTreeUpdater::UpdateStrategy::Lazy);
862 for (
const auto &Pair : ToSplit) {
863 auto NewSucc = Pair.first->SplitCriticalEdge(
864 Pair.second, {LIS, SI, LV, MLI},
nullptr, &MDTU);
865 if (NewSucc !=
nullptr) {
882 EverMadeChange =
true;
887 SchedModel.
init(STI);
888 bool HasHighPressure;
890 DenseMap<SinkItem, MachineInstr *> SunkInstrs;
892 enum CycleSinkStage { COPY, LOW_LATENCY, AGGRESSIVE, END };
893 for (
unsigned Stage = CycleSinkStage::COPY; Stage != CycleSinkStage::END;
894 ++Stage, SunkInstrs.
clear()) {
895 HasHighPressure =
false;
897 for (
auto *
Cycle : Cycles) {
903 SmallVector<MachineInstr *, 8> Candidates;
904 FindCycleSinkCandidates(
Cycle, Preheader, Candidates);
913 if (Stage == CycleSinkStage::COPY) {
916 <<
"CycleSink: Limit reached of instructions to "
927 if (Stage == CycleSinkStage::LOW_LATENCY &&
928 !
TII->hasLowDefLatency(SchedModel, *
I, 0))
931 if (!aggressivelySinkIntoCycle(
Cycle, *
I, SunkInstrs))
933 EverMadeChange =
true;
938 if (!HasHighPressure)
939 HasHighPressure = registerPressureExceedsLimit(*Preheader);
941 if (!HasHighPressure)
946 HasStoreCache.clear();
947 StoreInstrCache.clear();
950 for (
auto I : RegsToClearKillFlags)
952 RegsToClearKillFlags.clear();
955 return EverMadeChange;
958bool MachineSinking::ProcessBlock(MachineBasicBlock &
MBB) {
968 bool MadeChange =
false;
971 AllSuccsCache AllSuccessors;
976 bool ProcessedBegin, SawStore =
false;
978 MachineInstr &
MI = *
I;
986 if (
MI.isDebugOrPseudoInstr() ||
MI.isFakeUse()) {
987 if (
MI.isDebugValue())
992 if (EnableSinkAndFold && PerformSinkAndFold(
MI, &
MBB)) {
1001 if (PerformTrivialForwardCoalescing(
MI, &
MBB)) {
1012 }
while (!ProcessedBegin);
1014 SeenDbgUsers.clear();
1015 SeenDbgVars.clear();
1017 CachedRegisterPressure.clear();
1021void MachineSinking::ProcessDbgInst(MachineInstr &
MI) {
1024 assert(
MI.isDebugValue() &&
"Expected DBG_VALUE for processing");
1026 DebugVariable Var(
MI.getDebugVariable(),
MI.getDebugExpression(),
1027 MI.getDebugLoc()->getInlinedAt());
1028 bool SeenBefore = SeenDbgVars.contains(Var);
1030 for (MachineOperand &MO :
MI.debug_operands()) {
1032 SeenDbgUsers[MO.
getReg()].push_back(SeenDbgUser(&
MI, SeenBefore));
1036 SeenDbgVars.insert(Var);
1039bool MachineSinking::isWorthBreakingCriticalEdge(
1040 MachineInstr &
MI, MachineBasicBlock *From, MachineBasicBlock *To,
1041 MachineBasicBlock *&DeferredFromBlock) {
1047 if (!CEBCandidates.insert(std::make_pair(From, To)).second)
1058 for (
const auto &MO :
MI.all_defs()) {
1063 auto Key = std::make_pair(SrcReg, To);
1064 auto Res = CEMergeCandidates.try_emplace(
Key, From);
1069 DeferredFromBlock = Res.first->second;
1082 for (
const MachineOperand &MO :
MI.all_uses()) {
1108 return TII->shouldBreakCriticalEdgeToSink(
MI);
1111bool MachineSinking::isLegalToBreakCriticalEdge(MachineInstr &
MI,
1112 MachineBasicBlock *FromBB,
1113 MachineBasicBlock *ToBB,
1114 bool BreakPHIEdge) {
1123 if (FromCycle == ToCycle && FromCycle &&
1166 if (!BreakPHIEdge) {
1168 if (Pred != FromBB && !DT->
dominates(ToBB, Pred))
1175bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &
MI,
1176 MachineBasicBlock *FromBB,
1177 MachineBasicBlock *ToBB,
1178 bool BreakPHIEdge) {
1179 bool Status =
false;
1180 MachineBasicBlock *DeferredFromBB =
nullptr;
1181 if (isWorthBreakingCriticalEdge(
MI, FromBB, ToBB, DeferredFromBB)) {
1184 if ((!DeferredFromBB ||
1185 ToSplit.count(std::make_pair(DeferredFromBB, ToBB)) ||
1186 isLegalToBreakCriticalEdge(
MI, DeferredFromBB, ToBB, BreakPHIEdge)) &&
1187 isLegalToBreakCriticalEdge(
MI, FromBB, ToBB, BreakPHIEdge)) {
1188 ToSplit.insert(std::make_pair(FromBB, ToBB));
1190 ToSplit.insert(std::make_pair(DeferredFromBB, ToBB));
1198std::vector<unsigned> &
1199MachineSinking::getBBRegisterPressure(
const MachineBasicBlock &
MBB,
1206 auto RP = CachedRegisterPressure.find(&
MBB);
1207 if (UseCache && RP != CachedRegisterPressure.end())
1210 RegionPressure Pressure;
1211 RegPressureTracker RPTracker(Pressure);
1219 MII != MIE; --MII) {
1220 const MachineInstr &
MI = *std::prev(MII);
1221 if (
MI.isDebugOrPseudoInstr())
1223 RegisterOperands RegOpers;
1225 RPTracker.recedeSkipDebugValues();
1226 assert(&*RPTracker.getPos() == &
MI &&
"RPTracker sync error!");
1227 RPTracker.recede(RegOpers);
1230 RPTracker.closeRegion();
1232 if (RP != CachedRegisterPressure.end()) {
1233 CachedRegisterPressure[&
MBB] = RPTracker.getPressure().MaxSetPressure;
1234 return CachedRegisterPressure[&
MBB];
1237 auto It = CachedRegisterPressure.insert(
1238 std::make_pair(&
MBB, RPTracker.getPressure().MaxSetPressure));
1239 return It.first->second;
1242bool MachineSinking::registerPressureSetExceedsLimit(
1243 unsigned NRegs,
const TargetRegisterClass *RC,
1244 const MachineBasicBlock &
MBB) {
1245 unsigned Weight = NRegs *
TRI->getRegClassWeight(RC).RegWeight;
1246 const int *PS =
TRI->getRegClassPressureSets(RC);
1247 std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(
MBB);
1248 for (; *PS != -1; PS++)
1249 if (Weight + BBRegisterPressure[*PS] >=
1256bool MachineSinking::registerPressureExceedsLimit(
1257 const MachineBasicBlock &
MBB) {
1258 std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(
MBB,
false);
1260 for (
unsigned PS = 0; PS < BBRegisterPressure.size(); ++PS) {
1261 if (BBRegisterPressure[PS] >=
1271bool MachineSinking::isProfitableToSinkTo(
Register Reg, MachineInstr &
MI,
1272 MachineBasicBlock *
MBB,
1273 MachineBasicBlock *SuccToSinkTo,
1274 AllSuccsCache &AllSuccessors) {
1275 assert(SuccToSinkTo &&
"Invalid SinkTo Candidate BB");
1277 if (
MBB == SuccToSinkTo)
1290 bool NonPHIUse =
false;
1292 MachineBasicBlock *UseBlock = UseInst.
getParent();
1293 if (UseBlock == SuccToSinkTo && !UseInst.
isPHI())
1301 bool BreakPHIEdge =
false;
1303 if (MachineBasicBlock *MBB2 =
1304 FindSuccToSinkTo(
MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
1305 return isProfitableToSinkTo(
Reg,
MI, SuccToSinkTo, MBB2, AllSuccessors);
1316 for (
const MachineOperand &MO :
MI.operands()) {
1327 !
TII->isIgnorableUse(MO))
1335 bool LocalUse =
false;
1336 if (!AllUsesDominatedByBlock(
Reg, SuccToSinkTo,
MBB, BreakPHIEdge,
1356 LLVM_DEBUG(
dbgs() <<
"register pressure exceed limit, not profitable.");
1370SmallVector<MachineBasicBlock *, 4> &
1371MachineSinking::GetAllSortedSuccessors(MachineInstr &
MI, MachineBasicBlock *
MBB,
1372 AllSuccsCache &AllSuccessors)
const {
1374 auto Succs = AllSuccessors.find(
MBB);
1375 if (Succs != AllSuccessors.end())
1376 return Succs->second;
1378 SmallVector<MachineBasicBlock *, 4> AllSuccs(
MBB->
successors());
1389 if (DTChild->getIDom()->getBlock() ==
MI.getParent() &&
1392 AllSuccs.push_back(DTChild->getBlock());
1397 AllSuccs, [&](
const MachineBasicBlock *L,
const MachineBasicBlock *R) {
1401 (!LHSFreq && !RHSFreq))
1403 return LHSFreq < RHSFreq;
1406 auto it = AllSuccessors.insert(std::make_pair(
MBB, AllSuccs));
1408 return it.first->second;
1413MachineSinking::FindSuccToSinkTo(MachineInstr &
MI, MachineBasicBlock *
MBB,
1415 AllSuccsCache &AllSuccessors) {
1416 assert(
MBB &&
"Invalid MachineBasicBlock!");
1423 MachineBasicBlock *SuccToSinkTo =
nullptr;
1424 for (
const MachineOperand &MO :
MI.operands()) {
1439 }
else if (!MO.
isDead()) {
1457 bool LocalUse =
false;
1458 if (!AllUsesDominatedByBlock(
Reg, SuccToSinkTo,
MBB, BreakPHIEdge,
1469 for (MachineBasicBlock *SuccBlock :
1470 GetAllSortedSuccessors(
MI,
MBB, AllSuccessors)) {
1471 bool LocalUse =
false;
1472 if (AllUsesDominatedByBlock(
Reg, SuccBlock,
MBB, BreakPHIEdge,
1474 SuccToSinkTo = SuccBlock;
1485 if (!isProfitableToSinkTo(
Reg,
MI,
MBB, SuccToSinkTo, AllSuccessors))
1492 if (
MBB == SuccToSinkTo)
1497 if (SuccToSinkTo && SuccToSinkTo->
isEHPad())
1507 if (SuccToSinkTo && !
TII->isSafeToSink(
MI, SuccToSinkTo, CI))
1510 return SuccToSinkTo;
1525 auto *
MBB =
MI.getParent();
1526 if (
MBB->pred_size() != 1)
1529 auto *PredMBB = *
MBB->pred_begin();
1530 auto *PredBB = PredMBB->getBasicBlock();
1536 !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
1541 bool OffsetIsScalable;
1542 if (!
TII->getMemOperandWithOffset(
MI, BaseOp,
Offset, OffsetIsScalable,
TRI))
1545 if (!BaseOp->
isReg())
1548 if (!(
MI.mayLoad() && !
MI.isPredicable()))
1551 MachineBranchPredicate MBP;
1552 if (
TII->analyzeBranchPredicate(*PredMBB, MBP,
false))
1555 return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
1556 (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
1557 MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
1558 MBP.LHS.getReg() == BaseOp->
getReg();
1575 auto CopyOperands =
TII.isCopyInstr(SinkInst);
1578 SrcMO = CopyOperands->Source;
1579 DstMO = CopyOperands->Destination;
1590 bool arePhysRegs = !
Reg.isVirtual();
1591 if (arePhysRegs != PostRA)
1598 if (DbgMO.getSubReg() != SrcMO->
getSubReg() ||
1599 DbgMO.getSubReg() != DstMO->getSubReg())
1605 if (PostRA &&
Reg != DstMO->getReg())
1609 DbgMO.setReg(SrcMO->
getReg());
1615using MIRegs = std::pair<MachineInstr *, SmallVector<Register, 2>>;
1623 if (!SuccToSinkTo.
empty() && InsertPos != SuccToSinkTo.
end())
1625 InsertPos->getDebugLoc()));
1631 SuccToSinkTo.
splice(InsertPos, ParentBlock,
MI,
1638 for (
const auto &DbgValueToSink : DbgValuesToSink) {
1641 SuccToSinkTo.
insert(InsertPos, NewDbgMI);
1643 bool PropagatedAllSunkOps =
true;
1647 PropagatedAllSunkOps =
false;
1652 if (!PropagatedAllSunkOps)
1659bool MachineSinking::hasStoreBetween(MachineBasicBlock *From,
1660 MachineBasicBlock *To, MachineInstr &
MI) {
1666 auto BlockPair = std::make_pair(From, To);
1670 if (
auto It = HasStoreCache.find(BlockPair); It != HasStoreCache.end())
1673 if (
auto It = StoreInstrCache.find(BlockPair); It != StoreInstrCache.end())
1675 return I->mayAlias(AA, MI, false);
1678 bool SawStore =
false;
1679 bool HasAliasedStore =
false;
1680 DenseSet<MachineBasicBlock *> HandledBlocks;
1681 DenseSet<MachineBasicBlock *> HandledDomBlocks;
1688 if (BB == To || BB == From)
1692 if (HandledBlocks.
count(BB))
1695 HandledBlocks.
insert(BB);
1698 if (!HandledDomBlocks.
count(BB))
1699 HandledDomBlocks.
insert(BB);
1705 for (
auto *DomBB : HandledDomBlocks) {
1706 if (DomBB != BB && DT->
dominates(DomBB, BB))
1707 HasStoreCache[std::make_pair(DomBB, To)] =
true;
1708 else if (DomBB != BB && DT->
dominates(BB, DomBB))
1709 HasStoreCache[std::make_pair(From, DomBB)] =
true;
1711 HasStoreCache[BlockPair] =
true;
1715 for (MachineInstr &
I : *BB) {
1718 if (
I.isCall() ||
I.hasOrderedMemoryRef()) {
1719 for (
auto *DomBB : HandledDomBlocks) {
1720 if (DomBB != BB && DT->
dominates(DomBB, BB))
1721 HasStoreCache[std::make_pair(DomBB, To)] =
true;
1722 else if (DomBB != BB && DT->
dominates(BB, DomBB))
1723 HasStoreCache[std::make_pair(From, DomBB)] =
true;
1725 HasStoreCache[BlockPair] =
true;
1735 if (
I.mayAlias(AA,
MI,
false))
1736 HasAliasedStore =
true;
1737 StoreInstrCache[BlockPair].push_back(&
I);
1744 HasStoreCache[BlockPair] =
false;
1745 return HasAliasedStore;
1753bool MachineSinking::aggressivelySinkIntoCycle(
1755 DenseMap<SinkItem, MachineInstr *> &SunkInstrs) {
1757 if (
I.getNumDefs() > 1)
1760 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Finding sink block for: " <<
I);
1764 MachineOperand &DefMO =
I.getOperand(0);
1769 for (std::pair<RegSubRegPair, MachineInstr *> Entry :
Uses) {
1770 MachineInstr *
MI =
Entry.second;
1774 dbgs() <<
"AggressiveCycleSink: Not attempting to sink for PHI.\n");
1778 if (
MI->isPosition() ||
TII->isBasicBlockPrologue(*
MI)) {
1779 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Use is BasicBlock prologue, "
1785 dbgs() <<
"AggressiveCycleSink: Use not in cycle, can't sink.\n");
1789 MachineBasicBlock *SinkBlock =
MI->getParent();
1790 MachineInstr *NewMI =
nullptr;
1791 SinkItem MapEntry(&
I, SinkBlock);
1793 auto SI = SunkInstrs.
find(MapEntry);
1797 if (SI != SunkInstrs.
end()) {
1798 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Already sunk to block: "
1805 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Sinking instruction to block: "
1808 NewMI =
I.
getMF()->CloneMachineInstr(&
I);
1817 SunkInstrs.
insert({MapEntry, NewMI});
1821 for (MachineOperand &MO : NewMI->
all_uses()) {
1823 RegsToClearKillFlags.insert(MO.
getReg());
1838 I.eraseFromParent();
1844bool MachineSinking::SinkInstruction(MachineInstr &
MI,
bool &SawStore,
1845 AllSuccsCache &AllSuccessors) {
1851 if (!
MI.isSafeToMove(SawStore))
1856 if (
MI.isConvergent())
1872 bool BreakPHIEdge =
false;
1873 MachineBasicBlock *ParentBlock =
MI.getParent();
1874 MachineBasicBlock *SuccToSinkTo =
1875 FindSuccToSinkTo(
MI, ParentBlock, BreakPHIEdge, AllSuccessors);
1884 for (
const MachineOperand &MO :
MI.all_defs()) {
1892 LLVM_DEBUG(
dbgs() <<
"Sink instr " <<
MI <<
"\tinto block " << *SuccToSinkTo);
1899 bool TryBreak =
false;
1901 MI.mayLoad() ? hasStoreBetween(ParentBlock, SuccToSinkTo,
MI) :
true;
1902 if (!
MI.isSafeToMove(Store)) {
1903 LLVM_DEBUG(
dbgs() <<
" *** NOTE: Won't sink load along critical edge.\n");
1909 if (!TryBreak && !DT->
dominates(ParentBlock, SuccToSinkTo)) {
1915 if (!TryBreak && CI->
getCycle(SuccToSinkTo) &&
1929 bool Status = PostponeSplitCriticalEdge(
MI, ParentBlock, SuccToSinkTo,
1933 "break critical edge\n");
1944 PostponeSplitCriticalEdge(
MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
1947 "break critical edge\n");
1956 LLVM_DEBUG(
dbgs() <<
" *** Not sinking: prologue interference\n");
1962 for (
auto &MO :
MI.all_defs()) {
1965 auto It = SeenDbgUsers.find(MO.
getReg());
1966 if (It == SeenDbgUsers.end())
1970 auto &
Users = It->second;
1971 for (
auto &User :
Users) {
1972 MachineInstr *DbgMI =
User.getPointer();
1973 if (
User.getInt()) {
1988 if (
MI.getMF()->getFunction().getSubprogram() &&
MI.isCopy())
1989 SalvageUnsunkDebugUsersOfCopy(
MI, SuccToSinkTo);
1998 for (MachineOperand &MO :
MI.all_uses())
1999 RegsToClearKillFlags.insert(MO.
getReg());
2004void MachineSinking::SalvageUnsunkDebugUsersOfCopy(
2005 MachineInstr &
MI, MachineBasicBlock *TargetBlock) {
2012 SmallVector<MachineInstr *, 4> DbgDefUsers;
2014 const MachineRegisterInfo &MRI =
MI.getMF()->getRegInfo();
2015 for (
auto &MO :
MI.all_defs()) {
2024 if (
User.getParent() ==
MI.getParent())
2028 "DBG_VALUE user of vreg, but has no operand for it?");
2035 for (
auto *User : DbgDefUsers) {
2036 for (
auto &
Reg : DbgUseRegs) {
2037 for (
auto &DbgOp :
User->getDebugOperandsForReg(
Reg)) {
2038 DbgOp.setReg(
MI.getOperand(1).getReg());
2039 DbgOp.setSubReg(
MI.getOperand(1).getSubReg());
2081class PostRAMachineSinkingImpl {
2083 LiveRegUnits ModifiedRegUnits, UsedRegUnits;
2089 DenseMap<MCRegUnit, SmallVector<MIRegs, 2>> SeenDbgInstrs;
2093 bool tryToSinkCopy(MachineBasicBlock &BB, MachineFunction &MF,
2094 const TargetRegisterInfo *
TRI,
const TargetInstrInfo *
TII);
2097 bool run(MachineFunction &MF);
2100class PostRAMachineSinkingLegacy :
public MachineFunctionPass {
2102 bool runOnMachineFunction(MachineFunction &MF)
override;
2105 PostRAMachineSinkingLegacy() : MachineFunctionPass(
ID) {}
2106 StringRef getPassName()
const override {
return "PostRA Machine Sink"; }
2108 void getAnalysisUsage(AnalysisUsage &AU)
const override {
2113 MachineFunctionProperties getRequiredProperties()
const override {
2114 return MachineFunctionProperties().setNoVRegs();
2120char PostRAMachineSinkingLegacy::ID = 0;
2124 "PostRA Machine Sink",
false,
false)
2133static MachineBasicBlock *
2139 for (
auto *
SI : SinkableBBs) {
2140 if (aliasWithRegsInLiveIn(*
SI,
Reg,
TRI)) {
2160static MachineBasicBlock *
2166 for (
auto DefReg : DefedRegsInCopy) {
2169 if (!BB || (SingleBB && SingleBB != BB))
2180 for (
auto U : UsedOpsInCopy) {
2186 if (UI.killsRegister(SrcReg,
TRI)) {
2187 UI.clearRegisterKills(SrcReg,
TRI);
2199 for (
Register DefReg : DefedRegsInCopy)
2202 for (
auto U : UsedOpsInCopy)
2212 bool HasRegDependency =
false;
2213 for (
unsigned i = 0, e =
MI->getNumOperands(); i != e; ++i) {
2222 HasRegDependency =
true;
2231 }
else if (MO.
isUse()) {
2233 HasRegDependency =
true;
2239 return HasRegDependency;
2242bool PostRAMachineSinkingImpl::tryToSinkCopy(MachineBasicBlock &CurBB,
2243 MachineFunction &MF,
2244 const TargetRegisterInfo *
TRI,
2245 const TargetInstrInfo *
TII) {
2246 SmallPtrSet<MachineBasicBlock *, 2> SinkableBBs;
2250 for (MachineBasicBlock *SI : CurBB.
successors())
2251 if (!
SI->livein_empty() &&
SI->pred_size() == 1)
2254 if (SinkableBBs.
empty())
2261 ModifiedRegUnits.
clear();
2262 UsedRegUnits.
clear();
2263 SeenDbgInstrs.clear();
2267 SmallVector<unsigned, 2> UsedOpsInCopy;
2273 if (
MI.isDebugValue() && !
MI.isDebugRef()) {
2274 SmallDenseMap<MCRegUnit, SmallVector<Register, 2>, 4> MIUnits;
2275 bool IsValid =
true;
2276 for (MachineOperand &MO :
MI.debug_operands()) {
2281 ModifiedRegUnits, UsedRegUnits)) {
2287 for (MCRegUnit Unit :
TRI->regunits(MO.
getReg()))
2292 for (
auto &RegOps : MIUnits)
2293 SeenDbgInstrs[RegOps.first].emplace_back(&
MI,
2294 std::move(RegOps.second));
2300 if (!
TII->shouldPostRASink(
MI))
2303 if (
MI.isDebugOrPseudoInstr())
2310 if (!
MI.isCopy() || !
MI.getOperand(0).isRenamable()) {
2318 ModifiedRegUnits, UsedRegUnits)) {
2324 "Unexpect SrcReg or DefReg");
2325 MachineBasicBlock *SuccBB =
2335 "Unexpected predecessor");
2340 MapVector<MachineInstr *, MIRegs::second_type> DbgValsToSinkMap;
2341 for (
auto &MO :
MI.all_defs()) {
2342 for (MCRegUnit Unit :
TRI->regunits(MO.
getReg())) {
2343 for (
const auto &
MIRegs : SeenDbgInstrs.lookup(Unit)) {
2344 auto &Regs = DbgValsToSinkMap[
MIRegs.first];
2349 auto DbgValsToSink = DbgValsToSinkMap.
takeVector();
2358 LLVM_DEBUG(
dbgs() <<
" *** Not sinking: prologue interference\n");
2369 ++NumPostRACopySink;
2374bool PostRAMachineSinkingImpl::run(MachineFunction &MF) {
2387bool PostRAMachineSinkingLegacy::runOnMachineFunction(MachineFunction &MF) {
2391 return PostRAMachineSinkingImpl().run(MF);
2399 if (!PostRAMachineSinkingImpl().
run(MF))
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
ManagedStatic< HTTPClientCleanup > Cleanup
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
iv Induction Variable Users
static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI)
Return true if this machine instruction loads from global offset table or constant pool.
static cl::opt< unsigned > SinkLoadInstsPerBlockThreshold("machine-sink-load-instrs-threshold", cl::desc("Do not try to find alias store for a load if there is a in-path " "block whose instruction number is higher than this threshold."), cl::init(2000), cl::Hidden)
static cl::opt< unsigned > SinkIntoCycleLimit("machine-sink-cycle-limit", cl::desc("The maximum number of instructions considered for cycle sinking."), cl::init(50), cl::Hidden)
TargetInstrInfo::RegSubRegPair RegSubRegPair
static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB, const SmallVectorImpl< unsigned > &UsedOpsInCopy, const LiveRegUnits &UsedRegUnits, const TargetRegisterInfo *TRI)
static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo, MachineBasicBlock::iterator InsertPos, ArrayRef< MIRegs > DbgValuesToSink)
Sink an instruction and its associated debug instructions.
static cl::opt< bool > SplitEdges("machine-sink-split", cl::desc("Split critical edges during machine sinking"), cl::init(true), cl::Hidden)
static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
Return true if MI is likely to be usable as a memory operation by the implicit null check optimizatio...
static cl::opt< bool > SinkInstsIntoCycle("sink-insts-to-avoid-spills", cl::desc("Sink instructions into cycles to avoid " "register spills"), cl::init(false), cl::Hidden)
static cl::opt< unsigned > SinkLoadBlocksThreshold("machine-sink-load-blocks-threshold", cl::desc("Do not try to find alias store for a load if the block number in " "the straight line is higher than this threshold."), cl::init(20), cl::Hidden)
static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB, const SmallVectorImpl< unsigned > &UsedOpsInCopy, const SmallVectorImpl< Register > &DefedRegsInCopy)
static bool hasRegisterDependency(MachineInstr *MI, SmallVectorImpl< unsigned > &UsedOpsInCopy, SmallVectorImpl< Register > &DefedRegsInCopy, LiveRegUnits &ModifiedRegUnits, LiveRegUnits &UsedRegUnits)
Register const TargetRegisterInfo * TRI
std::pair< MachineInstr *, SmallVector< Register, 2 > > MIRegs
Machine code static false bool blockPrologueInterferes(const MachineBasicBlock *BB, MachineBasicBlock::const_iterator End, const MachineInstr &MI, const TargetRegisterInfo *TRI, const TargetInstrInfo *TII, const MachineRegisterInfo *MRI)
Return true if a target defined block prologue instruction interferes with a sink candidate.
static cl::opt< unsigned > SplitEdgeProbabilityThreshold("machine-sink-split-probability-threshold", cl::desc("Percentage threshold for splitting single-instruction critical edge. " "If the branch threshold is higher than this threshold, we allow " "speculative execution of up to 1 instruction to avoid branching to " "splitted critical edge"), cl::init(40), cl::Hidden)
static bool attemptDebugCopyProp(MachineInstr &SinkInst, MachineInstr &DbgMI, Register Reg)
If the sunk instruction is a copy, try to forward the copy instead of leaving an 'undef' DBG_VALUE in...
static cl::opt< bool > UseBlockFreqInfo("machine-sink-bfi", cl::desc("Use block frequency info to find successors to sink"), cl::init(true), cl::Hidden)
static MachineBasicBlock * getSingleLiveInSuccBB(MachineBasicBlock &CurBB, const SmallPtrSetImpl< MachineBasicBlock * > &SinkableBBs, Register Reg, const TargetRegisterInfo *TRI)
This file implements a map that provides insertion order iteration.
Promote Memory to Register
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file defines the PointerIntPair class.
Remove Loads Into Fake Uses
This file implements a set that has insertion order iteration characteristics.
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
static bool SinkInstruction(Instruction *Inst, SmallPtrSetImpl< Instruction * > &Stores, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
SinkInstruction - Determine whether it is safe to sink the specified machine instruction out of its c...
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Target-Independent Code Generator Pass Configuration Options pass.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
Represents analyses that only rely on functions' control flow.
static LLVM_ABI DebugLoc getMergedLocation(DebugLoc LocA, DebugLoc LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
bool isReachableFromEntry(const NodeT *A) const
isReachableFromEntry - Return true if A is dominated by the entry block of the function containing it...
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
iterator_range< const_toplevel_iterator > toplevel_cycles() const
void splitCriticalEdge(BlockT *Pred, BlockT *Succ, BlockT *New)
unsigned getCycleDepth(const BlockT *Block) const
get the depth for the cycle which containing a given block.
CycleT * getCycle(const BlockT *Block) const
Find the innermost cycle containing a given block.
BlockT * getHeader() const
bool isReducible() const
Whether the cycle is a natural loop.
BlockT * getCyclePreheader() const
Return the preheader block for this cycle.
bool contains(const BlockT *Block) const
Return whether Block is contained in the cycle.
Module * getParent()
Get the module that this global value is contained inside of...
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool shouldSink(const MachineInstr &MI) const override
A set of register units used to track register liveness.
static void accumulateUsedDefed(const MachineInstr &MI, LiveRegUnits &ModifiedRegUnits, LiveRegUnits &UsedRegUnits, const TargetRegisterInfo *TRI)
For a machine instruction MI, adds all register units used in UsedRegUnits and defined or clobbered i...
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
void init(const TargetRegisterInfo &TRI)
Initialize and clear the set.
LLVM_ABI void addLiveIns(const MachineBasicBlock &MBB)
Adds registers living into block MBB.
void clear()
Clears the set.
An RAII based helper class to modify MachineFunctionProperties when running pass.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
instr_iterator instr_begin()
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
unsigned succ_size() const
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
pred_iterator pred_begin()
LLVM_ABI void removeLiveInOverlappedWith(MCRegister Reg)
Remove the specified register from any overlapped live in.
instr_iterator instr_end()
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
LLVM_ABI BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const
getblockFreq - Return block frequency.
LLVM_ABI void onEdgeSplit(const MachineBasicBlock &NewPredecessor, const MachineBasicBlock &NewSuccessor, const MachineBranchProbabilityInfo &MBPI)
incrementally calculate block frequencies when we split edges, to avoid full CFG traversal.
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
Legacy analysis pass which computes a MachineCycleInfo.
Analysis pass which computes a MachineDominatorTree.
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
bool dominates(const MachineInstr *A, const MachineInstr *B) const
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
bool hasDebugOperandForReg(Register Reg) const
Returns whether this debug value has at least one debug operand with the register Reg.
void setDebugValueUndef()
Sets all register debug operands in this debug value instruction to be undef.
LLVM_ABI iterator_range< filter_iterator< const MachineOperand *, std::function< bool(const MachineOperand &Op)> > > getDebugOperandsForReg(Register Reg) const
Returns a range of all of the operands that correspond to a debug use of Reg.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool isDebugInstr() const
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Analysis pass that exposes the MachineLoopInfo for a machine function.
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
iterator_range< use_nodbg_iterator > use_nodbg_operands(Register Reg) const
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
bool hasOneDef(Register RegNo) const
Return true if there is exactly one operand defining the specified register.
iterator_range< use_instr_iterator > use_instructions(Register Reg) const
LLVM_ABI bool isConstantPhysReg(MCRegister PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
iterator_range< use_iterator > use_operands(Register Reg) const
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
VectorType takeVector()
Clear the MapVector and return the underlying vector.
PointerIntPair - This class implements a pair of a pointer and small integer.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
Special value supplied for machine level alias analysis.
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
LLVM_ABI void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)
Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
A vector that has set insertion semantics.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
Target-Independent Code Generator Pass Configuration Options.
bool getEnableSinkAndFold() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
LLVM_ABI void init(const TargetSubtargetInfo *TSInfo, bool EnableSModel=true, bool EnableSItins=true)
Initialize the machine model for instruction scheduling.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Abstract Attribute helper functions.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
OuterAnalysisManagerProxy< ModuleAnalysisManager, MachineFunction > ModuleAnalysisManagerMachineFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI bool isCycleInvariant(const MachineCycle *Cycle, MachineInstr &I)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI char & PostRAMachineSinkingID
This pass perform post-ra machine sink for COPY instructions.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
DomTreeNodeBase< MachineBasicBlock > MachineDomTreeNode
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
LLVM_ABI char & MachineSinkingLegacyID
MachineSinking - This pass performs sinking on machine instructions.
iterator_range< df_iterator< T > > depth_first(const T &G)
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
MachineCycleInfo::CycleT MachineCycle
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Represents a predicate at the MachineFunction level.
A pair composed of a register and a sub-register index.