71#define DEBUG_TYPE "machine-sink"
75 cl::desc(
"Split critical edges during machine sinking"),
80 cl::desc(
"Use block frequency info to find successors to sink"),
84 "machine-sink-split-probability-threshold",
86 "Percentage threshold for splitting single-instruction critical edge. "
87 "If the branch threshold is higher than this threshold, we allow "
88 "speculative execution of up to 1 instruction to avoid branching to "
89 "splitted critical edge"),
93 "machine-sink-load-instrs-threshold",
94 cl::desc(
"Do not try to find alias store for a load if there is a in-path "
95 "block whose instruction number is higher than this threshold."),
99 "machine-sink-load-blocks-threshold",
100 cl::desc(
"Do not try to find alias store for a load if the block number in "
101 "the straight line is higher than this threshold."),
106 cl::desc(
"Sink instructions into cycles to avoid "
111 "machine-sink-cycle-limit",
113 "The maximum number of instructions considered for cycle sinking."),
116STATISTIC(NumSunk,
"Number of machine instructions sunk");
117STATISTIC(NumCycleSunk,
"Number of machine instructions sunk into a cycle");
120STATISTIC(NumPostRACopySink,
"Number of copies sunk after RA");
126class MachineSinking {
164 using AllSuccsCache =
178 using SinkItem = std::pair<MachineInstr *, MachineBasicBlock *>;
197 CachedRegisterPressure;
199 bool EnableSinkAndFold;
208 : DT(DT), PDT(PDT), CI(CI), PSI(PSI), MBFI(MBFI), MBPI(MBPI),
AA(
AA),
209 LIS(LIS),
SI(
SI), LV(LV), MLI(MLI),
210 EnableSinkAndFold(EnableSinkAndFold) {}
214 void releaseMemory() {
215 CEBCandidates.
clear();
216 CEMergeCandidates.
clear();
246 AllSuccsCache &AllSuccessors);
256 bool &LocalUse)
const;
259 AllSuccsCache &AllSuccessors);
271 AllSuccsCache &AllSuccessors);
280 AllSuccsCache &AllSuccessors)
const;
283 bool UseCache =
true);
285 bool registerPressureSetExceedsLimit(
unsigned NRegs,
322char MachineSinkingLegacy::ID = 0;
347 if (!TII->isBasicBlockPrologue(*PI))
349 for (auto &MO : MI.operands()) {
352 Register Reg = MO.getReg();
356 if (Reg.isPhysical() &&
357 (TII->isIgnorableUse(MO) || (MRI && MRI->isConstantPhysReg(Reg))))
359 if (PI->modifiesRegister(Reg, TRI))
362 if (PI->readsRegister(Reg, TRI))
365 auto *DefOp = PI->findRegisterDefOperand(Reg, TRI, false, true);
366 if (DefOp && !DefOp->isDead())
375bool MachineSinking::PerformTrivialForwardCoalescing(
MachineInstr &
MI,
383 !
MRI->hasOneNonDBGUse(SrcReg))
386 const TargetRegisterClass *SRC =
MRI->getRegClass(SrcReg);
387 const TargetRegisterClass *DRC =
MRI->getRegClass(DstReg);
391 MachineInstr *
DefMI =
MRI->getVRegDef(SrcReg);
396 MRI->replaceRegWith(DstReg, SrcReg);
397 MI.eraseFromParent();
401 MRI->clearKillFlags(SrcReg);
407bool MachineSinking::PerformSinkAndFold(MachineInstr &
MI,
408 MachineBasicBlock *
MBB) {
409 if (
MI.isCopy() ||
MI.mayLoadOrStore() ||
410 MI.getOpcode() == TargetOpcode::REG_SEQUENCE)
418 bool SawStore =
true;
419 if (!
MI.isSafeToMove(SawStore))
424 if (
MI.isConvergent())
433 for (
const MachineOperand &MO :
MI.operands()) {
434 if (MO.isImm() || MO.isRegMask() || MO.isRegLiveOut() || MO.isMetadata() ||
435 MO.isMCSymbol() || MO.isDbgInstrRef() || MO.isCFIIndex() ||
436 MO.isIntrinsicID() || MO.isPredicate() || MO.isShuffleMask())
455 else if (UsedRegB == 0)
463 (
MRI->isConstantPhysReg(
Reg) ||
TII->isIgnorableUse(MO)))
473 using SinkInfo = std::pair<MachineInstr *, ExtAddrMode>;
477 const TargetRegisterClass *RC =
MRI->getRegClass(DefReg);
478 const TargetRegisterClass *RCA =
479 UsedRegA == 0 ? nullptr :
MRI->getRegClass(UsedRegA);
480 const TargetRegisterClass *RCB =
481 UsedRegB == 0 ? nullptr :
MRI->getRegClass(UsedRegB);
484 while (!Worklist.
empty()) {
487 for (MachineOperand &MO :
MRI->use_nodbg_operands(
Reg)) {
489 MachineInstr &UseInst = *MO.getParent();
492 if (
const MachineOperand &O = UseInst.
getOperand(0);
O.isReg())
510 if (!
TII->canFoldIntoAddrMode(UseInst,
Reg,
MI, AM))
521 const TargetRegisterClass *RCS =
MRI->getRegClass(
Reg);
527 if (RCA ==
nullptr) {
532 unsigned NRegs = !!RCA + !!RCB;
538 if (RCB ==
nullptr) {
539 if (registerPressureSetExceedsLimit(NRegs, RCA,
MBB))
541 }
else if (registerPressureSetExceedsLimit(1, RCA,
MBB) ||
542 registerPressureSetExceedsLimit(1, RCB,
MBB)) {
552 if (SinkInto.
empty())
556 for (
auto &[SinkDst, MaybeAM] : SinkInto) {
557 MachineInstr *
New =
nullptr;
560 if (SinkDst->isCopy()) {
573 Register DstReg = SinkDst->getOperand(0).getReg();
574 TII->reMaterialize(*SinkDst->getParent(), InsertPt, DstReg, 0,
MI);
575 New = &*std::prev(InsertPt);
576 if (!
New->getDebugLoc())
577 New->setDebugLoc(SinkDst->getDebugLoc());
583 MRI->clearKillFlags(UsedRegA);
585 MRI->clearKillFlags(UsedRegB);
588 New =
TII->emitLdStWithAddr(*SinkDst, MaybeAM);
594 MRI->clearKillFlags(R);
596 MRI->clearKillFlags(R);
600 if (SinkDst->mayStore() && !SinkDst->hasOrderedMemoryRef())
601 StoreInstrCache.clear();
602 SinkDst->eraseFromParent();
610 while (!Worklist.
empty()) {
612 for (MachineOperand &MO :
MRI->use_operands(
Reg)) {
613 MachineInstr *
U = MO.getParent();
614 assert((
U->isCopy() ||
U->isDebugInstr()) &&
615 "Only debug uses and copies must remain");
617 Worklist.
push_back(
U->getOperand(0).getReg());
623 for (MachineOperand *MO :
Cleanup) {
624 MachineInstr *
I = MO->getParent();
626 I->eraseFromParent();
633 MI.eraseFromParent();
641bool MachineSinking::AllUsesDominatedByBlock(
Register Reg,
642 MachineBasicBlock *
MBB,
643 MachineBasicBlock *DefMBB,
645 bool &LocalUse)
const {
649 if (
MRI->use_nodbg_empty(
Reg))
666 if (
all_of(
MRI->use_nodbg_operands(
Reg), [&](MachineOperand &MO) {
667 MachineInstr *UseInst = MO.getParent();
668 unsigned OpNo = MO.getOperandNo();
669 MachineBasicBlock *UseBlock = UseInst->getParent();
670 return UseBlock == MBB && UseInst->isPHI() &&
671 UseInst->getOperand(OpNo + 1).getMBB() == DefMBB;
677 for (MachineOperand &MO :
MRI->use_nodbg_operands(
Reg)) {
680 unsigned OpNo = &MO - &UseInst->
getOperand(0);
681 MachineBasicBlock *UseBlock = UseInst->
getParent();
682 if (UseInst->
isPHI()) {
686 }
else if (UseBlock == DefMBB) {
702 assert(
MI.mayLoad() &&
"Expected MI that loads!");
706 if (
MI.memoperands_empty())
711 if (PSV->isGOT() || PSV->isConstantPool())
717void MachineSinking::FindCycleSinkCandidates(
719 SmallVectorImpl<MachineInstr *> &Candidates) {
720 for (
auto &
MI : *BB) {
722 if (
MI.isMetaInstruction()) {
723 LLVM_DEBUG(
dbgs() <<
"CycleSink: not sinking meta instruction\n");
727 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction not a candidate for this "
732 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction is not cycle invariant\n");
735 bool DontMoveAcrossStore =
true;
736 if (!
MI.isSafeToMove(DontMoveAcrossStore)) {
737 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction not safe to move.\n");
741 LLVM_DEBUG(
dbgs() <<
"CycleSink: Dont sink GOT or constant pool loads\n");
744 if (
MI.isConvergent())
747 const MachineOperand &MO =
MI.getOperand(0);
753 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction added as candidate.\n");
765 .getCachedResult<ProfileSummaryAnalysis>(
778 MachineSinking Impl(EnableSinkAndFold, DT, PDT, LV, MLI,
SI, LIS, CI, PSI,
793 OS << MapClassName2PassName(
name());
794 if (EnableSinkAndFold)
795 OS <<
"<enable-sink-fold>";
805 auto *DT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
807 &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
808 auto *CI = &getAnalysis<MachineCycleInfoWrapperPass>().getCycleInfo();
809 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
812 ? &getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI()
815 &getAnalysis<MachineBranchProbabilityInfoWrapperPass>().getMBPI();
816 auto *
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
818 auto *LISWrapper = getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
819 auto *LIS = LISWrapper ? &LISWrapper->getLIS() :
nullptr;
820 auto *SIWrapper = getAnalysisIfAvailable<SlotIndexesWrapperPass>();
821 auto *
SI = SIWrapper ? &SIWrapper->getSI() :
nullptr;
822 auto *LVWrapper = getAnalysisIfAvailable<LiveVariablesWrapperPass>();
823 auto *LV = LVWrapper ? &LVWrapper->getLV() :
nullptr;
824 auto *MLIWrapper = getAnalysisIfAvailable<MachineLoopInfoWrapperPass>();
825 auto *MLI = MLIWrapper ? &MLIWrapper->getLI() :
nullptr;
827 MachineSinking Impl(EnableSinkAndFold, DT, PDT, LV, MLI,
SI, LIS, CI, PSI,
842 bool EverMadeChange =
false;
845 bool MadeChange =
false;
848 CEBCandidates.clear();
849 CEMergeCandidates.clear();
856 MachineDomTreeUpdater::UpdateStrategy::Lazy);
857 for (
const auto &Pair : ToSplit) {
858 auto NewSucc = Pair.first->SplitCriticalEdge(
859 Pair.second, {LIS, SI, LV, MLI},
nullptr, &MDTU);
860 if (NewSucc !=
nullptr) {
877 EverMadeChange =
true;
882 SchedModel.
init(STI);
883 bool HasHighPressure;
885 DenseMap<SinkItem, MachineInstr *> SunkInstrs;
887 enum CycleSinkStage { COPY, LOW_LATENCY, AGGRESSIVE, END };
888 for (
unsigned Stage = CycleSinkStage::COPY; Stage != CycleSinkStage::END;
889 ++Stage, SunkInstrs.
clear()) {
890 HasHighPressure =
false;
892 for (
auto *
Cycle : Cycles) {
898 SmallVector<MachineInstr *, 8> Candidates;
899 FindCycleSinkCandidates(
Cycle, Preheader, Candidates);
908 if (Stage == CycleSinkStage::COPY) {
911 <<
"CycleSink: Limit reached of instructions to "
922 if (Stage == CycleSinkStage::LOW_LATENCY &&
923 !
TII->hasLowDefLatency(SchedModel, *
I, 0))
926 if (!aggressivelySinkIntoCycle(
Cycle, *
I, SunkInstrs))
928 EverMadeChange =
true;
933 if (!HasHighPressure)
934 HasHighPressure = registerPressureExceedsLimit(*Preheader);
936 if (!HasHighPressure)
941 HasStoreCache.clear();
942 StoreInstrCache.clear();
945 for (
auto I : RegsToClearKillFlags)
946 MRI->clearKillFlags(
I);
947 RegsToClearKillFlags.clear();
950 return EverMadeChange;
953bool MachineSinking::ProcessBlock(MachineBasicBlock &
MBB) {
963 bool MadeChange =
false;
966 AllSuccsCache AllSuccessors;
971 bool ProcessedBegin, SawStore =
false;
973 MachineInstr &
MI = *
I;
981 if (
MI.isDebugOrPseudoInstr() ||
MI.isFakeUse()) {
982 if (
MI.isDebugValue())
987 if (EnableSinkAndFold && PerformSinkAndFold(
MI, &
MBB)) {
996 if (PerformTrivialForwardCoalescing(
MI, &
MBB)) {
1007 }
while (!ProcessedBegin);
1009 SeenDbgUsers.clear();
1010 SeenDbgVars.clear();
1012 CachedRegisterPressure.clear();
1016void MachineSinking::ProcessDbgInst(MachineInstr &
MI) {
1019 assert(
MI.isDebugValue() &&
"Expected DBG_VALUE for processing");
1021 DebugVariable Var(
MI.getDebugVariable(),
MI.getDebugExpression(),
1022 MI.getDebugLoc()->getInlinedAt());
1023 bool SeenBefore = SeenDbgVars.contains(Var);
1025 for (MachineOperand &MO :
MI.debug_operands()) {
1027 SeenDbgUsers[MO.
getReg()].push_back(SeenDbgUser(&
MI, SeenBefore));
1031 SeenDbgVars.insert(Var);
1034bool MachineSinking::isWorthBreakingCriticalEdge(
1035 MachineInstr &
MI, MachineBasicBlock *From, MachineBasicBlock *To,
1036 MachineBasicBlock *&DeferredFromBlock) {
1042 if (!CEBCandidates.insert(std::make_pair(From, To)).second)
1053 for (
const auto &MO :
MI.all_defs()) {
1058 auto Key = std::make_pair(SrcReg, To);
1059 auto Res = CEMergeCandidates.try_emplace(
Key, From);
1064 DeferredFromBlock = Res.first->second;
1077 for (
const MachineOperand &MO :
MI.all_uses()) {
1090 if (
MRI->hasOneNonDBGUse(
Reg)) {
1103 return TII->shouldBreakCriticalEdgeToSink(
MI);
1106bool MachineSinking::isLegalToBreakCriticalEdge(MachineInstr &
MI,
1107 MachineBasicBlock *FromBB,
1108 MachineBasicBlock *ToBB,
1109 bool BreakPHIEdge) {
1118 if (FromCycle == ToCycle && FromCycle &&
1161 if (!BreakPHIEdge) {
1163 if (Pred != FromBB && !DT->
dominates(ToBB, Pred))
1170bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &
MI,
1171 MachineBasicBlock *FromBB,
1172 MachineBasicBlock *ToBB,
1173 bool BreakPHIEdge) {
1174 bool Status =
false;
1175 MachineBasicBlock *DeferredFromBB =
nullptr;
1176 if (isWorthBreakingCriticalEdge(
MI, FromBB, ToBB, DeferredFromBB)) {
1179 if ((!DeferredFromBB ||
1180 ToSplit.count(std::make_pair(DeferredFromBB, ToBB)) ||
1181 isLegalToBreakCriticalEdge(
MI, DeferredFromBB, ToBB, BreakPHIEdge)) &&
1182 isLegalToBreakCriticalEdge(
MI, FromBB, ToBB, BreakPHIEdge)) {
1183 ToSplit.insert(std::make_pair(FromBB, ToBB));
1185 ToSplit.insert(std::make_pair(DeferredFromBB, ToBB));
1193std::vector<unsigned> &
1194MachineSinking::getBBRegisterPressure(
const MachineBasicBlock &
MBB,
1201 auto RP = CachedRegisterPressure.find(&
MBB);
1202 if (UseCache && RP != CachedRegisterPressure.end())
1205 RegionPressure Pressure;
1206 RegPressureTracker RPTracker(Pressure);
1214 MII != MIE; --MII) {
1215 const MachineInstr &
MI = *std::prev(MII);
1216 if (
MI.isDebugOrPseudoInstr())
1218 RegisterOperands RegOpers;
1220 RPTracker.recedeSkipDebugValues();
1221 assert(&*RPTracker.getPos() == &
MI &&
"RPTracker sync error!");
1222 RPTracker.recede(RegOpers);
1225 RPTracker.closeRegion();
1227 if (RP != CachedRegisterPressure.end()) {
1228 CachedRegisterPressure[&
MBB] = RPTracker.getPressure().MaxSetPressure;
1229 return CachedRegisterPressure[&
MBB];
1232 auto It = CachedRegisterPressure.insert(
1233 std::make_pair(&
MBB, RPTracker.getPressure().MaxSetPressure));
1234 return It.first->second;
1237bool MachineSinking::registerPressureSetExceedsLimit(
1238 unsigned NRegs,
const TargetRegisterClass *RC,
1239 const MachineBasicBlock &
MBB) {
1240 unsigned Weight = NRegs *
TRI->getRegClassWeight(RC).RegWeight;
1241 const int *PS =
TRI->getRegClassPressureSets(RC);
1242 std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(
MBB);
1243 for (; *PS != -1; PS++)
1244 if (Weight + BBRegisterPressure[*PS] >=
1251bool MachineSinking::registerPressureExceedsLimit(
1252 const MachineBasicBlock &
MBB) {
1253 std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(
MBB,
false);
1255 for (
unsigned PS = 0; PS < BBRegisterPressure.size(); ++PS) {
1256 if (BBRegisterPressure[PS] >=
1266bool MachineSinking::isProfitableToSinkTo(
Register Reg, MachineInstr &
MI,
1267 MachineBasicBlock *
MBB,
1268 MachineBasicBlock *SuccToSinkTo,
1269 AllSuccsCache &AllSuccessors) {
1270 assert(SuccToSinkTo &&
"Invalid SinkTo Candidate BB");
1272 if (
MBB == SuccToSinkTo)
1285 bool NonPHIUse =
false;
1286 for (MachineInstr &UseInst :
MRI->use_nodbg_instructions(
Reg)) {
1287 MachineBasicBlock *UseBlock = UseInst.
getParent();
1288 if (UseBlock == SuccToSinkTo && !UseInst.
isPHI())
1296 bool BreakPHIEdge =
false;
1298 if (MachineBasicBlock *MBB2 =
1299 FindSuccToSinkTo(
MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
1300 return isProfitableToSinkTo(
Reg,
MI, SuccToSinkTo, MBB2, AllSuccessors);
1311 for (
const MachineOperand &MO :
MI.operands()) {
1322 !
TII->isIgnorableUse(MO))
1330 bool LocalUse =
false;
1331 if (!AllUsesDominatedByBlock(
Reg, SuccToSinkTo,
MBB, BreakPHIEdge,
1349 if (registerPressureSetExceedsLimit(1,
MRI->getRegClass(
Reg),
1351 LLVM_DEBUG(
dbgs() <<
"register pressure exceed limit, not profitable.");
1365SmallVector<MachineBasicBlock *, 4> &
1366MachineSinking::GetAllSortedSuccessors(MachineInstr &
MI, MachineBasicBlock *
MBB,
1367 AllSuccsCache &AllSuccessors)
const {
1369 auto Succs = AllSuccessors.find(
MBB);
1370 if (Succs != AllSuccessors.end())
1371 return Succs->second;
1373 SmallVector<MachineBasicBlock *, 4> AllSuccs(
MBB->
successors());
1384 if (DTChild->getIDom()->getBlock() ==
MI.getParent() &&
1387 AllSuccs.push_back(DTChild->getBlock());
1392 AllSuccs, [&](
const MachineBasicBlock *L,
const MachineBasicBlock *R) {
1396 (!LHSFreq && !RHSFreq))
1398 return LHSFreq < RHSFreq;
1401 auto it = AllSuccessors.insert(std::make_pair(
MBB, AllSuccs));
1403 return it.first->second;
1408MachineSinking::FindSuccToSinkTo(MachineInstr &
MI, MachineBasicBlock *
MBB,
1410 AllSuccsCache &AllSuccessors) {
1411 assert(
MBB &&
"Invalid MachineBasicBlock!");
1418 MachineBasicBlock *SuccToSinkTo =
nullptr;
1419 for (
const MachineOperand &MO :
MI.operands()) {
1432 if (!
MRI->isConstantPhysReg(
Reg) && !
TII->isIgnorableUse(MO))
1434 }
else if (!MO.
isDead()) {
1444 if (!
TII->isSafeToMoveRegClassDefs(
MRI->getRegClass(
Reg)))
1452 bool LocalUse =
false;
1453 if (!AllUsesDominatedByBlock(
Reg, SuccToSinkTo,
MBB, BreakPHIEdge,
1464 for (MachineBasicBlock *SuccBlock :
1465 GetAllSortedSuccessors(
MI,
MBB, AllSuccessors)) {
1466 bool LocalUse =
false;
1467 if (AllUsesDominatedByBlock(
Reg, SuccBlock,
MBB, BreakPHIEdge,
1469 SuccToSinkTo = SuccBlock;
1480 if (!isProfitableToSinkTo(
Reg,
MI,
MBB, SuccToSinkTo, AllSuccessors))
1487 if (
MBB == SuccToSinkTo)
1492 if (SuccToSinkTo && SuccToSinkTo->
isEHPad())
1502 if (SuccToSinkTo && !
TII->isSafeToSink(
MI, SuccToSinkTo, CI))
1505 return SuccToSinkTo;
1520 auto *
MBB =
MI.getParent();
1521 if (
MBB->pred_size() != 1)
1524 auto *PredMBB = *
MBB->pred_begin();
1525 auto *PredBB = PredMBB->getBasicBlock();
1531 !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
1536 bool OffsetIsScalable;
1537 if (!
TII->getMemOperandWithOffset(
MI, BaseOp,
Offset, OffsetIsScalable,
TRI))
1540 if (!BaseOp->
isReg())
1543 if (!(
MI.mayLoad() && !
MI.isPredicable()))
1546 MachineBranchPredicate MBP;
1547 if (
TII->analyzeBranchPredicate(*PredMBB, MBP,
false))
1550 return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
1551 (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
1552 MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
1553 MBP.LHS.getReg() == BaseOp->
getReg();
1570 auto CopyOperands =
TII.isCopyInstr(SinkInst);
1573 SrcMO = CopyOperands->Source;
1574 DstMO = CopyOperands->Destination;
1577 bool PostRA =
MRI.getNumVirtRegs() == 0;
1585 bool arePhysRegs = !
Reg.isVirtual();
1586 if (arePhysRegs != PostRA)
1593 if (DbgMO.getSubReg() != SrcMO->
getSubReg() ||
1594 DbgMO.getSubReg() != DstMO->getSubReg())
1600 if (PostRA &&
Reg != DstMO->getReg())
1604 DbgMO.setReg(SrcMO->
getReg());
1610using MIRegs = std::pair<MachineInstr *, SmallVector<Register, 2>>;
1618 if (!SuccToSinkTo.
empty() && InsertPos != SuccToSinkTo.
end())
1620 InsertPos->getDebugLoc()));
1626 SuccToSinkTo.
splice(InsertPos, ParentBlock,
MI,
1633 for (
const auto &DbgValueToSink : DbgValuesToSink) {
1636 SuccToSinkTo.
insert(InsertPos, NewDbgMI);
1638 bool PropagatedAllSunkOps =
true;
1642 PropagatedAllSunkOps =
false;
1647 if (!PropagatedAllSunkOps)
1654bool MachineSinking::hasStoreBetween(MachineBasicBlock *From,
1655 MachineBasicBlock *To, MachineInstr &
MI) {
1661 auto BlockPair = std::make_pair(From, To);
1665 if (
auto It = HasStoreCache.find(BlockPair); It != HasStoreCache.end())
1668 if (
auto It = StoreInstrCache.find(BlockPair); It != StoreInstrCache.end())
1670 return I->mayAlias(AA, MI, false);
1673 bool SawStore =
false;
1674 bool HasAliasedStore =
false;
1675 DenseSet<MachineBasicBlock *> HandledBlocks;
1676 DenseSet<MachineBasicBlock *> HandledDomBlocks;
1683 if (BB == To || BB == From)
1687 if (HandledBlocks.
count(BB))
1690 HandledBlocks.
insert(BB);
1693 if (!HandledDomBlocks.
count(BB))
1694 HandledDomBlocks.
insert(BB);
1700 for (
auto *DomBB : HandledDomBlocks) {
1701 if (DomBB != BB && DT->
dominates(DomBB, BB))
1702 HasStoreCache[std::make_pair(DomBB, To)] =
true;
1703 else if (DomBB != BB && DT->
dominates(BB, DomBB))
1704 HasStoreCache[std::make_pair(From, DomBB)] =
true;
1706 HasStoreCache[BlockPair] =
true;
1710 for (MachineInstr &
I : *BB) {
1713 if (
I.isCall() ||
I.hasOrderedMemoryRef()) {
1714 for (
auto *DomBB : HandledDomBlocks) {
1715 if (DomBB != BB && DT->
dominates(DomBB, BB))
1716 HasStoreCache[std::make_pair(DomBB, To)] =
true;
1717 else if (DomBB != BB && DT->
dominates(BB, DomBB))
1718 HasStoreCache[std::make_pair(From, DomBB)] =
true;
1720 HasStoreCache[BlockPair] =
true;
1730 if (
I.mayAlias(AA,
MI,
false))
1731 HasAliasedStore =
true;
1732 StoreInstrCache[BlockPair].push_back(&
I);
1739 HasStoreCache[BlockPair] =
false;
1740 return HasAliasedStore;
1748bool MachineSinking::aggressivelySinkIntoCycle(
1750 DenseMap<SinkItem, MachineInstr *> &SunkInstrs) {
1752 if (
I.getNumDefs() > 1)
1755 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Finding sink block for: " <<
I);
1759 MachineOperand &DefMO =
I.getOperand(0);
1760 for (MachineInstr &
MI :
MRI->use_instructions(DefMO.
getReg())) {
1764 for (std::pair<RegSubRegPair, MachineInstr *> Entry :
Uses) {
1765 MachineInstr *
MI =
Entry.second;
1769 dbgs() <<
"AggressiveCycleSink: Not attempting to sink for PHI.\n");
1773 if (
MI->isPosition() ||
TII->isBasicBlockPrologue(*
MI)) {
1774 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Use is BasicBlock prologue, "
1780 dbgs() <<
"AggressiveCycleSink: Use not in cycle, can't sink.\n");
1784 MachineBasicBlock *SinkBlock =
MI->getParent();
1785 MachineInstr *NewMI =
nullptr;
1786 SinkItem MapEntry(&
I, SinkBlock);
1788 auto SI = SunkInstrs.
find(MapEntry);
1792 if (SI != SunkInstrs.
end()) {
1793 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Already sunk to block: "
1800 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Sinking instruction to block: "
1803 NewMI =
I.
getMF()->CloneMachineInstr(&
I);
1805 const TargetRegisterClass *TRC =
MRI->getRegClass(DefMO.
getReg());
1806 Register DestReg =
MRI->createVirtualRegister(TRC);
1812 SunkInstrs.
insert({MapEntry, NewMI});
1816 for (MachineOperand &MO : NewMI->
all_uses()) {
1818 RegsToClearKillFlags.insert(MO.
getReg());
1833 I.eraseFromParent();
1839bool MachineSinking::SinkInstruction(MachineInstr &
MI,
bool &SawStore,
1840 AllSuccsCache &AllSuccessors) {
1846 if (!
MI.isSafeToMove(SawStore))
1851 if (
MI.isConvergent())
1867 bool BreakPHIEdge =
false;
1868 MachineBasicBlock *ParentBlock =
MI.getParent();
1869 MachineBasicBlock *SuccToSinkTo =
1870 FindSuccToSinkTo(
MI, ParentBlock, BreakPHIEdge, AllSuccessors);
1879 for (
const MachineOperand &MO :
MI.all_defs()) {
1887 LLVM_DEBUG(
dbgs() <<
"Sink instr " <<
MI <<
"\tinto block " << *SuccToSinkTo);
1894 bool TryBreak =
false;
1896 MI.mayLoad() ? hasStoreBetween(ParentBlock, SuccToSinkTo,
MI) :
true;
1897 if (!
MI.isSafeToMove(Store)) {
1898 LLVM_DEBUG(
dbgs() <<
" *** NOTE: Won't sink load along critical edge.\n");
1904 if (!TryBreak && !DT->
dominates(ParentBlock, SuccToSinkTo)) {
1910 if (!TryBreak && CI->
getCycle(SuccToSinkTo) &&
1924 bool Status = PostponeSplitCriticalEdge(
MI, ParentBlock, SuccToSinkTo,
1928 "break critical edge\n");
1939 PostponeSplitCriticalEdge(
MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
1942 "break critical edge\n");
1951 LLVM_DEBUG(
dbgs() <<
" *** Not sinking: prologue interference\n");
1957 for (
auto &MO :
MI.all_defs()) {
1960 auto It = SeenDbgUsers.find(MO.
getReg());
1961 if (It == SeenDbgUsers.end())
1965 auto &
Users = It->second;
1966 for (
auto &User :
Users) {
1967 MachineInstr *DbgMI =
User.getPointer();
1968 if (
User.getInt()) {
1983 if (
MI.getMF()->getFunction().getSubprogram() &&
MI.isCopy())
1984 SalvageUnsunkDebugUsersOfCopy(
MI, SuccToSinkTo);
1993 for (MachineOperand &MO :
MI.all_uses())
1994 RegsToClearKillFlags.insert(MO.
getReg());
1999void MachineSinking::SalvageUnsunkDebugUsersOfCopy(
2000 MachineInstr &
MI, MachineBasicBlock *TargetBlock) {
2007 SmallVector<MachineInstr *, 4> DbgDefUsers;
2009 const MachineRegisterInfo &
MRI =
MI.getMF()->getRegInfo();
2010 for (
auto &MO :
MI.all_defs()) {
2014 for (
auto &User :
MRI.use_instructions(MO.
getReg())) {
2019 if (
User.getParent() ==
MI.getParent())
2023 "DBG_VALUE user of vreg, but has no operand for it?");
2030 for (
auto *User : DbgDefUsers) {
2031 for (
auto &
Reg : DbgUseRegs) {
2032 for (
auto &DbgOp :
User->getDebugOperandsForReg(
Reg)) {
2033 DbgOp.setReg(
MI.getOperand(1).getReg());
2034 DbgOp.setSubReg(
MI.getOperand(1).getSubReg());
2076class PostRAMachineSinkingImpl {
2078 LiveRegUnits ModifiedRegUnits, UsedRegUnits;
2084 DenseMap<MCRegUnit, SmallVector<MIRegs, 2>> SeenDbgInstrs;
2088 bool tryToSinkCopy(MachineBasicBlock &BB, MachineFunction &MF,
2089 const TargetRegisterInfo *
TRI,
const TargetInstrInfo *
TII);
2092 bool run(MachineFunction &MF);
2095class PostRAMachineSinkingLegacy :
public MachineFunctionPass {
2097 bool runOnMachineFunction(MachineFunction &MF)
override;
2100 PostRAMachineSinkingLegacy() : MachineFunctionPass(
ID) {}
2101 StringRef getPassName()
const override {
return "PostRA Machine Sink"; }
2103 void getAnalysisUsage(AnalysisUsage &AU)
const override {
2108 MachineFunctionProperties getRequiredProperties()
const override {
2109 return MachineFunctionProperties().setNoVRegs();
2115char PostRAMachineSinkingLegacy::ID = 0;
2119 "PostRA Machine Sink",
false,
false)
2128static MachineBasicBlock *
2134 for (
auto *
SI : SinkableBBs) {
2135 if (aliasWithRegsInLiveIn(*
SI,
Reg,
TRI)) {
2155static MachineBasicBlock *
2161 for (
auto DefReg : DefedRegsInCopy) {
2164 if (!BB || (SingleBB && SingleBB != BB))
2175 for (
auto U : UsedOpsInCopy) {
2181 if (UI.killsRegister(SrcReg,
TRI)) {
2182 UI.clearRegisterKills(SrcReg,
TRI);
2194 for (
Register DefReg : DefedRegsInCopy)
2197 for (
auto U : UsedOpsInCopy)
2207 bool HasRegDependency =
false;
2208 for (
unsigned i = 0, e =
MI->getNumOperands(); i != e; ++i) {
2217 HasRegDependency =
true;
2226 }
else if (MO.
isUse()) {
2228 HasRegDependency =
true;
2234 return HasRegDependency;
2237bool PostRAMachineSinkingImpl::tryToSinkCopy(MachineBasicBlock &CurBB,
2238 MachineFunction &MF,
2239 const TargetRegisterInfo *
TRI,
2240 const TargetInstrInfo *
TII) {
2241 SmallPtrSet<MachineBasicBlock *, 2> SinkableBBs;
2245 for (MachineBasicBlock *SI : CurBB.
successors())
2246 if (!
SI->livein_empty() &&
SI->pred_size() == 1)
2249 if (SinkableBBs.
empty())
2256 ModifiedRegUnits.
clear();
2257 UsedRegUnits.
clear();
2258 SeenDbgInstrs.clear();
2262 SmallVector<unsigned, 2> UsedOpsInCopy;
2268 if (
MI.isDebugValue() && !
MI.isDebugRef()) {
2269 SmallDenseMap<MCRegUnit, SmallVector<Register, 2>, 4> MIUnits;
2270 bool IsValid =
true;
2271 for (MachineOperand &MO :
MI.debug_operands()) {
2276 ModifiedRegUnits, UsedRegUnits)) {
2282 for (MCRegUnit Unit :
TRI->regunits(MO.
getReg()))
2287 for (
auto &RegOps : MIUnits)
2288 SeenDbgInstrs[RegOps.first].emplace_back(&
MI,
2289 std::move(RegOps.second));
2295 if (!
TII->shouldPostRASink(
MI))
2298 if (
MI.isDebugOrPseudoInstr())
2305 if (!
MI.isCopy() || !
MI.getOperand(0).isRenamable()) {
2313 ModifiedRegUnits, UsedRegUnits)) {
2319 "Unexpect SrcReg or DefReg");
2320 MachineBasicBlock *SuccBB =
2330 "Unexpected predecessor");
2335 MapVector<MachineInstr *, MIRegs::second_type> DbgValsToSinkMap;
2336 for (
auto &MO :
MI.all_defs()) {
2337 for (MCRegUnit Unit :
TRI->regunits(MO.
getReg())) {
2338 for (
const auto &
MIRegs : SeenDbgInstrs.lookup(Unit)) {
2339 auto &Regs = DbgValsToSinkMap[
MIRegs.first];
2344 auto DbgValsToSink = DbgValsToSinkMap.
takeVector();
2353 LLVM_DEBUG(
dbgs() <<
" *** Not sinking: prologue interference\n");
2364 ++NumPostRACopySink;
2369bool PostRAMachineSinkingImpl::run(MachineFunction &MF) {
2382bool PostRAMachineSinkingLegacy::runOnMachineFunction(MachineFunction &MF) {
2386 return PostRAMachineSinkingImpl().run(MF);
2394 if (!PostRAMachineSinkingImpl().
run(MF))
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static const HTTPClientCleanup Cleanup
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
iv Induction Variable Users
static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI)
Return true if this machine instruction loads from global offset table or constant pool.
static cl::opt< unsigned > SinkLoadInstsPerBlockThreshold("machine-sink-load-instrs-threshold", cl::desc("Do not try to find alias store for a load if there is a in-path " "block whose instruction number is higher than this threshold."), cl::init(2000), cl::Hidden)
static cl::opt< unsigned > SinkIntoCycleLimit("machine-sink-cycle-limit", cl::desc("The maximum number of instructions considered for cycle sinking."), cl::init(50), cl::Hidden)
TargetInstrInfo::RegSubRegPair RegSubRegPair
static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB, const SmallVectorImpl< unsigned > &UsedOpsInCopy, const LiveRegUnits &UsedRegUnits, const TargetRegisterInfo *TRI)
static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo, MachineBasicBlock::iterator InsertPos, ArrayRef< MIRegs > DbgValuesToSink)
Sink an instruction and its associated debug instructions.
static cl::opt< bool > SplitEdges("machine-sink-split", cl::desc("Split critical edges during machine sinking"), cl::init(true), cl::Hidden)
static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
Return true if MI is likely to be usable as a memory operation by the implicit null check optimizatio...
static cl::opt< bool > SinkInstsIntoCycle("sink-insts-to-avoid-spills", cl::desc("Sink instructions into cycles to avoid " "register spills"), cl::init(false), cl::Hidden)
static cl::opt< unsigned > SinkLoadBlocksThreshold("machine-sink-load-blocks-threshold", cl::desc("Do not try to find alias store for a load if the block number in " "the straight line is higher than this threshold."), cl::init(20), cl::Hidden)
static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB, const SmallVectorImpl< unsigned > &UsedOpsInCopy, const SmallVectorImpl< Register > &DefedRegsInCopy)
static bool hasRegisterDependency(MachineInstr *MI, SmallVectorImpl< unsigned > &UsedOpsInCopy, SmallVectorImpl< Register > &DefedRegsInCopy, LiveRegUnits &ModifiedRegUnits, LiveRegUnits &UsedRegUnits)
Register const TargetRegisterInfo * TRI
std::pair< MachineInstr *, SmallVector< Register, 2 > > MIRegs
Machine code static false bool blockPrologueInterferes(const MachineBasicBlock *BB, MachineBasicBlock::const_iterator End, const MachineInstr &MI, const TargetRegisterInfo *TRI, const TargetInstrInfo *TII, const MachineRegisterInfo *MRI)
Return true if a target defined block prologue instruction interferes with a sink candidate.
static cl::opt< unsigned > SplitEdgeProbabilityThreshold("machine-sink-split-probability-threshold", cl::desc("Percentage threshold for splitting single-instruction critical edge. " "If the branch threshold is higher than this threshold, we allow " "speculative execution of up to 1 instruction to avoid branching to " "splitted critical edge"), cl::init(40), cl::Hidden)
static bool attemptDebugCopyProp(MachineInstr &SinkInst, MachineInstr &DbgMI, Register Reg)
If the sunk instruction is a copy, try to forward the copy instead of leaving an 'undef' DBG_VALUE in...
static cl::opt< bool > UseBlockFreqInfo("machine-sink-bfi", cl::desc("Use block frequency info to find successors to sink"), cl::init(true), cl::Hidden)
static MachineBasicBlock * getSingleLiveInSuccBB(MachineBasicBlock &CurBB, const SmallPtrSetImpl< MachineBasicBlock * > &SinkableBBs, Register Reg, const TargetRegisterInfo *TRI)
This file implements a map that provides insertion order iteration.
Promote Memory to Register
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file defines the PointerIntPair class.
Remove Loads Into Fake Uses
This file implements a set that has insertion order iteration characteristics.
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
static bool SinkInstruction(Instruction *Inst, SmallPtrSetImpl< Instruction * > &Stores, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
SinkInstruction - Determine whether it is safe to sink the specified machine instruction out of its c...
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Target-Independent Code Generator Pass Configuration Options pass.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
Represents analyses that only rely on functions' control flow.
static LLVM_ABI DebugLoc getMergedLocation(DebugLoc LocA, DebugLoc LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
bool isReachableFromEntry(const NodeT *A) const
isReachableFromEntry - Return true if A is dominated by the entry block of the function containing it...
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
iterator_range< const_toplevel_iterator > toplevel_cycles() const
void splitCriticalEdge(BlockT *Pred, BlockT *Succ, BlockT *New)
unsigned getCycleDepth(const BlockT *Block) const
get the depth for the cycle which containing a given block.
CycleT * getCycle(const BlockT *Block) const
Find the innermost cycle containing a given block.
BlockT * getHeader() const
bool isReducible() const
Whether the cycle is a natural loop.
BlockT * getCyclePreheader() const
Return the preheader block for this cycle.
bool contains(const BlockT *Block) const
Return whether Block is contained in the cycle.
Module * getParent()
Get the module that this global value is contained inside of...
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool shouldSink(const MachineInstr &MI) const override
A set of register units used to track register liveness.
static void accumulateUsedDefed(const MachineInstr &MI, LiveRegUnits &ModifiedRegUnits, LiveRegUnits &UsedRegUnits, const TargetRegisterInfo *TRI)
For a machine instruction MI, adds all register units used in UsedRegUnits and defined or clobbered i...
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
void init(const TargetRegisterInfo &TRI)
Initialize and clear the set.
LLVM_ABI void addLiveIns(const MachineBasicBlock &MBB)
Adds registers living into block MBB.
void clear()
Clears the set.
An RAII based helper class to modify MachineFunctionProperties when running pass.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
instr_iterator instr_begin()
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
unsigned succ_size() const
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
pred_iterator pred_begin()
LLVM_ABI void removeLiveInOverlappedWith(MCRegister Reg)
Remove the specified register from any overlapped live in.
instr_iterator instr_end()
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
LLVM_ABI BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const
getblockFreq - Return block frequency.
LLVM_ABI void onEdgeSplit(const MachineBasicBlock &NewPredecessor, const MachineBasicBlock &NewSuccessor, const MachineBranchProbabilityInfo &MBPI)
incrementally calculate block frequencies when we split edges, to avoid full CFG traversal.
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
Legacy analysis pass which computes a MachineCycleInfo.
Analysis pass which computes a MachineDominatorTree.
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
bool dominates(const MachineInstr *A, const MachineInstr *B) const
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
bool hasDebugOperandForReg(Register Reg) const
Returns whether this debug value has at least one debug operand with the register Reg.
void setDebugValueUndef()
Sets all register debug operands in this debug value instruction to be undef.
LLVM_ABI iterator_range< filter_iterator< const MachineOperand *, std::function< bool(const MachineOperand &Op)> > > getDebugOperandsForReg(Register Reg) const
Returns a range of all of the operands that correspond to a debug use of Reg.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool isDebugInstr() const
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Analysis pass that exposes the MachineLoopInfo for a machine function.
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
VectorType takeVector()
Clear the MapVector and return the underlying vector.
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
PointerIntPair - This class implements a pair of a pointer and small integer.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
Special value supplied for machine level alias analysis.
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
LLVM_ABI void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)
Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
A vector that has set insertion semantics.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
Target-Independent Code Generator Pass Configuration Options.
bool getEnableSinkAndFold() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
LLVM_ABI void init(const TargetSubtargetInfo *TSInfo, bool EnableSModel=true, bool EnableSItins=true)
Initialize the machine model for instruction scheduling.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Abstract Attribute helper functions.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
LLVM_ABI void initializeMachineSinkingLegacyPass(PassRegistry &)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
OuterAnalysisManagerProxy< ModuleAnalysisManager, MachineFunction > ModuleAnalysisManagerMachineFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI bool isCycleInvariant(const MachineCycle *Cycle, MachineInstr &I)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI char & PostRAMachineSinkingID
This pass perform post-ra machine sink for COPY instructions.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
DomTreeNodeBase< MachineBasicBlock > MachineDomTreeNode
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI char & MachineSinkingLegacyID
MachineSinking - This pass performs sinking on machine instructions.
iterator_range< df_iterator< T > > depth_first(const T &G)
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
GenericCycleInfo< MachineSSAContext > MachineCycleInfo
MachineCycleInfo::CycleT MachineCycle
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Represents a predicate at the MachineFunction level.
A pair composed of a register and a sub-register index.