79#define DEBUG_TYPE "regalloc"
81STATISTIC(NumGlobalSplits,
"Number of split global live ranges");
82STATISTIC(NumLocalSplits,
"Number of split local live ranges");
83STATISTIC(NumEvicted,
"Number of interferences evicted");
87 cl::desc(
"Spill mode for splitting live ranges"),
95 cl::desc(
"Last chance recoloring max depth"),
100 cl::desc(
"Last chance recoloring maximum number of considered"
101 " interference at a time"),
106 cl::desc(
"Exhaustive Search for registers bypassing the depth "
107 "and interference cutoffs of last chance recoloring"),
114 cl::desc(
"Cost for first time use of callee-saved register."),
118 "regalloc-csr-cost-scale",
119 cl::desc(
"Scale for the callee-saved register cost, in percentage."),
123 "grow-region-complexity-budget",
124 cl::desc(
"growRegion() does not scale with the number of BB edges, so "
125 "limit its budget and bail out once we reach the limit."),
129 "greedy-regclass-priority-trumps-globalness",
130 cl::desc(
"Change the greedy register allocator's live range priority "
131 "calculation to make the AllocationPriority of the register class "
132 "more important then whether the range is global"),
136 "greedy-reverse-local-assignment",
137 cl::desc(
"Reverse allocation order of local live ranges, such that "
138 "shorter local live ranges will tend to be allocated first"),
142 "split-threshold-for-reg-with-hint",
143 cl::desc(
"The threshold for splitting a virtual register with a hint, in "
159 StringRef getPassName()
const override {
return "Greedy Register Allocator"; }
162 void getAnalysisUsage(AnalysisUsage &AU)
const override;
164 bool runOnMachineFunction(MachineFunction &mf)
override;
166 MachineFunctionProperties getRequiredProperties()
const override {
167 return MachineFunctionProperties().setNoPHIs();
170 MachineFunctionProperties getClearedProperties()
const override {
171 return MachineFunctionProperties().setIsSSA();
210 MBFI = Analyses.
MBFI;
212 Loops = Analyses.
Loops;
225 StringRef FilterName = Opts.FilterName.
empty() ?
"all" : Opts.FilterName;
226 OS <<
"greedy<" << FilterName <<
'>';
253 RAGreedy Impl(Analyses, Opts.Filter);
295char RAGreedyLegacy::ID = 0;
319const char *
const RAGreedy::StageName[] = {
334 return new RAGreedyLegacy();
338 return new RAGreedyLegacy(Ftor);
341void RAGreedyLegacy::getAnalysisUsage(
AnalysisUsage &AU)
const {
373bool RAGreedy::LRE_CanEraseVirtReg(
Register VirtReg) {
374 LiveInterval &LI =
LIS->getInterval(VirtReg);
375 if (
VRM->hasPhys(VirtReg)) {
388void RAGreedy::LRE_WillShrinkVirtReg(
Register VirtReg) {
389 if (!
VRM->hasPhys(VirtReg))
393 LiveInterval &LI =
LIS->getInterval(VirtReg);
399 ExtraInfo->LRE_DidCloneVirtReg(New, Old);
404 if (!Info.inBounds(Old))
413 Info[New] = Info[Old];
417 SpillerInstance.reset();
423void RAGreedy::enqueue(PQueue &CurQueue,
const LiveInterval *LI) {
427 assert(Reg.isVirtual() &&
"Can only enqueue virtual registers");
429 auto Stage = ExtraInfo->getOrInitStage(Reg);
432 ExtraInfo->setStage(Reg, Stage);
435 unsigned Ret = PriorityAdvisor->getPriority(*LI);
439 CurQueue.push(std::make_pair(Ret, ~
Reg.id()));
442unsigned DefaultPriorityAdvisor::getPriority(
const LiveInterval &LI)
const {
455 const TargetRegisterClass &RC = *
MRI->getRegClass(
Reg);
457 (!ReverseLocalAssignment &&
460 unsigned GlobalBit = 0;
463 LIS->intervalIsInOneMBB(LI)) {
467 if (!ReverseLocalAssignment)
473 Prio = Indexes->getZeroIndex().getApproxInstrDistance(LI.
endIndex());
495 Prio = std::min(Prio, (
unsigned)
maxUIntN(24));
498 if (RegClassPriorityTrumpsGlobalness)
507 if (
VRM->hasKnownPreference(
Reg))
514unsigned DummyPriorityAdvisor::getPriority(
const LiveInterval &LI)
const {
523 if (CurQueue.empty())
540 for (
auto I = Order.
begin(),
E = Order.
end();
I !=
E && !PhysReg; ++
I) {
542 if (!
Matrix->checkInterference(VirtReg, *
I)) {
558 MCRegister PhysHint =
Hint.asMCReg();
561 if (EvictAdvisor->canEvictHintInterference(VirtReg, PhysHint,
563 evictInterference(VirtReg, PhysHint, NewVRegs);
568 if (trySplitAroundHintReg(PhysHint, VirtReg, NewVRegs, Order))
573 SetOfBrokenHints.insert(&VirtReg);
577 uint8_t
Cost = RegCosts[PhysReg.
id()];
584 << (
unsigned)
Cost <<
'\n');
585 MCRegister CheapReg = tryEvict(VirtReg, Order, NewVRegs,
Cost, FixedRegisters);
586 return CheapReg ? CheapReg : PhysReg;
595 auto HasRegUnitInterference = [&](MCRegUnit Unit) {
598 VirtReg,
Matrix->getLiveUnions()[
static_cast<unsigned>(Unit)]);
607 if (
none_of(
TRI->regunits(Reg), HasRegUnitInterference)) {
620void RAGreedy::evictInterference(
const LiveInterval &VirtReg,
626 unsigned Cascade = ExtraInfo->getOrAssignNewCascade(VirtReg.
reg());
629 <<
" interference: Cascade " << Cascade <<
'\n');
633 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
650 assert((ExtraInfo->getCascade(Intf->reg()) < Cascade ||
652 "Cannot decrease cascade number, illegal eviction");
653 ExtraInfo->setCascade(Intf->reg(), Cascade);
666 return !
Matrix->isPhysRegUsed(PhysReg);
669std::optional<unsigned>
672 unsigned CostPerUseLimit)
const {
673 unsigned OrderLimit = Order.
getOrder().size();
675 if (CostPerUseLimit <
uint8_t(~0u)) {
679 if (MinCost >= CostPerUseLimit) {
681 << MinCost <<
", no cheaper registers to be found.\n");
698 if (
RegCosts[PhysReg.
id()] >= CostPerUseLimit)
724 MCRegister BestPhys = EvictAdvisor->tryFindEvictionCandidate(
725 VirtReg, Order, CostPerUseLimit, FixedRegisters);
727 evictInterference(VirtReg, BestPhys, NewVRegs);
745 SplitConstraints.resize(UseBlocks.
size());
747 for (
unsigned I = 0;
I != UseBlocks.
size(); ++
I) {
768 if (Intf.
first() <= Indexes->getMBBStartIdx(BC.
Number)) {
782 SA->getFirstSplitPoint(BC.
Number)))
788 if (Intf.
last() >= SA->getLastSplitPoint(BC.
Number)) {
801 StaticCost += SpillPlacer->getBlockFrequency(BC.
Number);
807 SpillPlacer->addConstraints(SplitConstraints);
808 return SpillPlacer->scanActiveBundles();
813bool RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
814 ArrayRef<unsigned> Blocks) {
815 const unsigned GroupSize = 8;
816 SpillPlacement::BlockConstraint BCS[GroupSize];
817 unsigned TBS[GroupSize];
818 unsigned B = 0,
T = 0;
820 for (
unsigned Number : Blocks) {
824 assert(
T < GroupSize &&
"Array overflow");
826 if (++
T == GroupSize) {
833 assert(
B < GroupSize &&
"Array overflow");
837 MachineBasicBlock *
MBB = MF->getBlockNumbered(
Number);
839 if (FirstNonDebugInstr !=
MBB->
end() &&
841 SA->getFirstSplitPoint(
Number)))
844 if (Intf.
first() <= Indexes->getMBBStartIdx(
Number))
850 if (Intf.
last() >= SA->getLastSplitPoint(
Number))
855 if (++
B == GroupSize) {
856 SpillPlacer->addConstraints(
ArrayRef(BCS,
B));
861 SpillPlacer->addConstraints(
ArrayRef(BCS,
B));
866bool RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
868 BitVector Todo = SA->getThroughBlocks();
869 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
870 unsigned AddedTo = 0;
872 unsigned Visited = 0;
877 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
879 for (
unsigned Bundle : NewBundles) {
881 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
883 if (Blocks.
size() >= Budget)
885 Budget -= Blocks.
size();
886 for (
unsigned Block : Blocks) {
898 if (ActiveBlocks.
size() == AddedTo)
903 auto NewBlocks =
ArrayRef(ActiveBlocks).slice(AddedTo);
905 if (!addThroughConstraints(Cand.Intf, NewBlocks))
913 bool PrefSpill =
true;
914 if (SA->looksLikeLoopIV() && NewBlocks.size() >= 2) {
919 MachineLoop *
L = Loops->getLoopFor(MF->getBlockNumbered(NewBlocks[0]));
920 if (L &&
L->getHeader()->getNumber() == (
int)NewBlocks[0] &&
921 all_of(NewBlocks.drop_front(), [&](
unsigned Block) {
922 return L == Loops->getLoopFor(MF->getBlockNumbered(Block));
927 SpillPlacer->addPrefSpill(NewBlocks,
true);
929 AddedTo = ActiveBlocks.
size();
932 SpillPlacer->iterate();
945bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
947 if (!SA->getNumThroughBlocks())
957 SpillPlacer->prepare(Cand.LiveBundles);
961 if (!addSplitConstraints(Cand.Intf,
Cost)) {
966 if (!growRegion(Cand)) {
971 SpillPlacer->finish();
973 if (!Cand.LiveBundles.any()) {
979 for (
int I : Cand.LiveBundles.set_bits())
980 dbgs() <<
" EB#" <<
I;
988BlockFrequency RAGreedy::calcBlockSplitCost() {
989 BlockFrequency
Cost = BlockFrequency(0);
991 for (
const SplitAnalysis::BlockInfo &BI : UseBlocks) {
994 Cost += SpillPlacer->getBlockFrequency(
Number);
998 Cost += SpillPlacer->getBlockFrequency(
Number);
1007BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
1008 const AllocationOrder &Order) {
1009 BlockFrequency GlobalCost = BlockFrequency(0);
1010 const BitVector &LiveBundles = Cand.LiveBundles;
1012 for (
unsigned I = 0;
I != UseBlocks.
size(); ++
I) {
1013 const SplitAnalysis::BlockInfo &BI = UseBlocks[
I];
1014 SpillPlacement::BlockConstraint &BC = SplitConstraints[
I];
1015 bool RegIn = LiveBundles[Bundles->getBundle(BC.
Number,
false)];
1016 bool RegOut = LiveBundles[Bundles->getBundle(BC.
Number,
true)];
1019 Cand.Intf.moveToBlock(BC.
Number);
1026 GlobalCost += SpillPlacer->getBlockFrequency(BC.
Number);
1029 for (
unsigned Number : Cand.ActiveBlocks) {
1030 bool RegIn = LiveBundles[Bundles->getBundle(
Number,
false)];
1031 bool RegOut = LiveBundles[Bundles->getBundle(
Number,
true)];
1032 if (!RegIn && !RegOut)
1034 if (RegIn && RegOut) {
1036 Cand.Intf.moveToBlock(
Number);
1037 if (Cand.Intf.hasInterference()) {
1038 GlobalCost += SpillPlacer->getBlockFrequency(
Number);
1039 GlobalCost += SpillPlacer->getBlockFrequency(
Number);
1044 GlobalCost += SpillPlacer->getBlockFrequency(
Number);
1061void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
1062 ArrayRef<unsigned> UsedCands) {
1065 const unsigned NumGlobalIntvs = LREdit.
size();
1068 assert(NumGlobalIntvs &&
"No global intervals configured");
1078 for (
const SplitAnalysis::BlockInfo &BI : UseBlocks) {
1080 unsigned IntvIn = 0, IntvOut = 0;
1081 SlotIndex IntfIn, IntfOut;
1083 unsigned CandIn = BundleCand[Bundles->getBundle(
Number,
false)];
1084 if (CandIn != NoCand) {
1085 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1086 IntvIn = Cand.IntvIdx;
1087 Cand.Intf.moveToBlock(
Number);
1088 IntfIn = Cand.Intf.first();
1092 unsigned CandOut = BundleCand[Bundles->getBundle(
Number,
true)];
1093 if (CandOut != NoCand) {
1094 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1095 IntvOut = Cand.IntvIdx;
1096 Cand.Intf.moveToBlock(
Number);
1097 IntfOut = Cand.Intf.last();
1102 if (!IntvIn && !IntvOut) {
1104 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1105 SE->splitSingleBlock(BI);
1109 if (IntvIn && IntvOut)
1110 SE->splitLiveThroughBlock(
Number, IntvIn, IntfIn, IntvOut, IntfOut);
1112 SE->splitRegInBlock(BI, IntvIn, IntfIn);
1114 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1120 BitVector Todo = SA->getThroughBlocks();
1121 for (
unsigned UsedCand : UsedCands) {
1122 ArrayRef<unsigned> Blocks = GlobalCand[UsedCand].ActiveBlocks;
1123 for (
unsigned Number : Blocks) {
1128 unsigned IntvIn = 0, IntvOut = 0;
1129 SlotIndex IntfIn, IntfOut;
1131 unsigned CandIn = BundleCand[Bundles->getBundle(
Number,
false)];
1132 if (CandIn != NoCand) {
1133 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1134 IntvIn = Cand.IntvIdx;
1135 Cand.Intf.moveToBlock(
Number);
1136 IntfIn = Cand.Intf.first();
1139 unsigned CandOut = BundleCand[Bundles->getBundle(
Number,
true)];
1140 if (CandOut != NoCand) {
1141 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1142 IntvOut = Cand.IntvIdx;
1143 Cand.Intf.moveToBlock(
Number);
1144 IntfOut = Cand.Intf.last();
1146 if (!IntvIn && !IntvOut)
1148 SE->splitLiveThroughBlock(
Number, IntvIn, IntfIn, IntvOut, IntfOut);
1154 SmallVector<unsigned, 8> IntvMap;
1155 SE->finish(&IntvMap);
1156 DebugVars->splitRegister(
Reg, LREdit.
regs(), *
LIS);
1158 unsigned OrigBlocks = SA->getNumLiveBlocks();
1165 for (
unsigned I = 0,
E = LREdit.
size();
I !=
E; ++
I) {
1166 const LiveInterval &
Reg =
LIS->getInterval(LREdit.
get(
I));
1169 if (ExtraInfo->getOrInitStage(
Reg.reg()) !=
RS_New)
1174 if (IntvMap[
I] == 0) {
1181 if (IntvMap[
I] < NumGlobalIntvs) {
1182 if (SA->countLiveBlocks(&
Reg) >= OrigBlocks) {
1183 LLVM_DEBUG(
dbgs() <<
"Main interval covers the same " << OrigBlocks
1184 <<
" blocks as original.\n");
1196 MF->verify(
LIS, Indexes,
"After splitting live range around region",
1200MCRegister RAGreedy::tryRegionSplit(
const LiveInterval &VirtReg,
1201 AllocationOrder &Order,
1202 SmallVectorImpl<Register> &NewVRegs) {
1203 if (!
TRI->shouldRegionSplitForVirtReg(*MF, VirtReg))
1205 unsigned NumCands = 0;
1206 BlockFrequency SpillCost = calcBlockSplitCost();
1207 BlockFrequency BestCost;
1210 bool HasCompact = calcCompactRegion(GlobalCand.front());
1218 BestCost = SpillCost;
1223 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
1227 if (!HasCompact && BestCand == NoCand)
1230 return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
1233unsigned RAGreedy::calculateRegionSplitCostAroundReg(MCRegister PhysReg,
1234 AllocationOrder &Order,
1235 BlockFrequency &BestCost,
1237 unsigned &BestCand) {
1240 if (NumCands == IntfCache.getMaxCursors()) {
1241 unsigned WorstCount = ~0
u;
1243 for (
unsigned CandIndex = 0; CandIndex != NumCands; ++CandIndex) {
1244 if (CandIndex == BestCand || !GlobalCand[CandIndex].PhysReg)
1246 unsigned Count = GlobalCand[CandIndex].LiveBundles.count();
1247 if (
Count < WorstCount) {
1253 GlobalCand[Worst] = GlobalCand[NumCands];
1254 if (BestCand == NumCands)
1258 if (GlobalCand.size() <= NumCands)
1259 GlobalCand.resize(NumCands+1);
1260 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1261 Cand.reset(IntfCache, PhysReg);
1263 SpillPlacer->prepare(Cand.LiveBundles);
1264 BlockFrequency
Cost;
1265 if (!addSplitConstraints(Cand.Intf,
Cost)) {
1271 if (
Cost >= BestCost) {
1273 if (BestCand == NoCand)
1274 dbgs() <<
" worse than no bundles\n";
1276 dbgs() <<
" worse than "
1277 <<
printReg(GlobalCand[BestCand].PhysReg,
TRI) <<
'\n';
1281 if (!growRegion(Cand)) {
1286 SpillPlacer->finish();
1289 if (!Cand.LiveBundles.any()) {
1294 Cost += calcGlobalSplitCost(Cand, Order);
1297 for (
int I : Cand.LiveBundles.set_bits())
1298 dbgs() <<
" EB#" <<
I;
1301 if (
Cost < BestCost) {
1302 BestCand = NumCands;
1310unsigned RAGreedy::calculateRegionSplitCost(
const LiveInterval &VirtReg,
1311 AllocationOrder &Order,
1312 BlockFrequency &BestCost,
1315 unsigned BestCand = NoCand;
1316 for (MCRegister PhysReg : Order) {
1318 if (IgnoreCSR && EvictAdvisor->isUnusedCalleeSavedReg(PhysReg))
1321 calculateRegionSplitCostAroundReg(PhysReg, Order, BestCost, NumCands,
1328MCRegister RAGreedy::doRegionSplit(
const LiveInterval &VirtReg,
1329 unsigned BestCand,
bool HasCompact,
1330 SmallVectorImpl<Register> &NewVRegs) {
1331 SmallVector<unsigned, 8> UsedCands;
1333 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *
LIS,
VRM,
this, &
DeadRemats);
1337 BundleCand.assign(Bundles->getNumBundles(), NoCand);
1340 if (BestCand != NoCand) {
1341 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1342 if (
unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1344 Cand.IntvIdx = SE->openIntv();
1346 <<
B <<
" bundles, intv " << Cand.IntvIdx <<
".\n");
1353 GlobalSplitCandidate &Cand = GlobalCand.front();
1354 assert(!Cand.PhysReg &&
"Compact region has no physreg");
1355 if (
unsigned B = Cand.getBundles(BundleCand, 0)) {
1357 Cand.IntvIdx = SE->openIntv();
1359 <<
" bundles, intv " << Cand.IntvIdx <<
".\n");
1364 splitAroundRegion(LREdit, UsedCands);
1365 return MCRegister();
1370bool RAGreedy::trySplitAroundHintReg(MCRegister Hint,
1371 const LiveInterval &VirtReg,
1372 SmallVectorImpl<Register> &NewVRegs,
1373 AllocationOrder &Order) {
1377 if (MF->getFunction().hasOptSize())
1381 if (ExtraInfo->getStage(VirtReg) >=
RS_Split2)
1384 BlockFrequency
Cost = BlockFrequency(0);
1394 for (
const MachineOperand &Opnd :
MRI->reg_nodbg_operands(
Reg)) {
1395 const MachineInstr &
Instr = *Opnd.getParent();
1396 if (!
Instr.isCopy() || Opnd.isImplicit())
1400 const bool IsDef = Opnd.isDef();
1401 const MachineOperand &OtherOpnd =
Instr.getOperand(IsDef);
1404 if (OtherReg ==
Reg)
1407 unsigned SubReg = Opnd.getSubReg();
1408 unsigned OtherSubReg = OtherOpnd.
getSubReg();
1413 if (Opnd.readsReg()) {
1414 SlotIndex
Index =
LIS->getInstructionIndex(Instr).getRegSlot();
1421 if (
any_of(VirtReg.
subranges(), [=](
const LiveInterval::SubRange &S) {
1422 return (S.LaneMask & Mask).any() && S.liveAt(Index);
1427 if (VirtReg.
liveAt(Index))
1432 MCRegister OtherPhysReg =
1435 if (OtherPhysReg == ThisHint)
1436 Cost += MBFI->getBlockFreq(
Instr.getParent());
1442 if (
Cost == BlockFrequency(0))
1445 unsigned NumCands = 0;
1446 unsigned BestCand = NoCand;
1447 SA->analyze(&VirtReg);
1448 calculateRegionSplitCostAroundReg(Hint, Order,
Cost, NumCands, BestCand);
1449 if (BestCand == NoCand)
1452 doRegionSplit(VirtReg, BestCand,
false, NewVRegs);
1463MCRegister RAGreedy::tryBlockSplit(
const LiveInterval &VirtReg,
1464 AllocationOrder &Order,
1465 SmallVectorImpl<Register> &NewVRegs) {
1466 assert(&SA->getParent() == &VirtReg &&
"Live range wasn't analyzed");
1469 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *
LIS,
VRM,
this, &
DeadRemats);
1472 for (
const SplitAnalysis::BlockInfo &BI : UseBlocks) {
1473 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1474 SE->splitSingleBlock(BI);
1478 return MCRegister();
1481 SmallVector<unsigned, 8> IntvMap;
1482 SE->finish(&IntvMap);
1485 DebugVars->splitRegister(
Reg, LREdit.
regs(), *
LIS);
1489 for (
unsigned I = 0,
E = LREdit.
size();
I !=
E; ++
I) {
1490 const LiveInterval &LI =
LIS->getInterval(LREdit.
get(
I));
1491 if (ExtraInfo->getOrInitStage(LI.
reg()) ==
RS_New && IntvMap[
I] == 0)
1496 MF->verify(
LIS, Indexes,
"After splitting live range around basic blocks",
1498 return MCRegister();
1511 assert(SuperRC &&
"Invalid register class");
1514 MI->getRegClassConstraintEffectForVReg(
Reg, SuperRC,
TII,
TRI,
1536 return MRI.getMaxLaneMaskForVReg(
Reg);
1542 Mask |= ~SubRegMask;
1559 auto DestSrc =
TII->isCopyInstr(*
MI);
1560 if (DestSrc && !
MI->isBundled() &&
1561 DestSrc->Destination->getSubReg() == DestSrc->Source->getSubReg())
1570 LiveAtMask |= S.LaneMask;
1575 return (ReadMask & ~(LiveAtMask &
TRI->getCoveringLanes())).
any();
1585MCRegister RAGreedy::tryInstructionSplit(
const LiveInterval &VirtReg,
1586 AllocationOrder &Order,
1587 SmallVectorImpl<Register> &NewVRegs) {
1588 const TargetRegisterClass *CurRC =
MRI->getRegClass(VirtReg.
reg());
1591 bool SplitSubClass =
true;
1594 return MCRegister();
1595 SplitSubClass =
false;
1600 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *
LIS,
VRM,
this, &
DeadRemats);
1604 if (
Uses.size() <= 1)
1605 return MCRegister();
1608 <<
" individual instrs.\n");
1610 const TargetRegisterClass *SuperRC =
1611 TRI->getLargestLegalSuperClass(CurRC, *MF);
1612 unsigned SuperRCNumAllocatableRegs =
1618 for (
const SlotIndex Use :
Uses) {
1619 if (
const MachineInstr *
MI = Indexes->getInstructionFromIndex(Use)) {
1620 if (TII->isFullCopyInstr(*
MI) ||
1622 SuperRCNumAllocatableRegs ==
1633 SlotIndex SegStart = SE->enterIntvBefore(Use);
1634 SlotIndex SegStop = SE->leaveIntvAfter(Use);
1635 SE->useIntv(SegStart, SegStop);
1638 if (LREdit.
empty()) {
1640 return MCRegister();
1643 SmallVector<unsigned, 8> IntvMap;
1644 SE->finish(&IntvMap);
1645 DebugVars->splitRegister(VirtReg.
reg(), LREdit.
regs(), *
LIS);
1648 return MCRegister();
1660void RAGreedy::calcGapWeights(MCRegister PhysReg,
1661 SmallVectorImpl<float> &GapWeight) {
1662 assert(SA->getUseBlocks().size() == 1 &&
"Not a local interval");
1663 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1665 const unsigned NumGaps =
Uses.size()-1;
1668 SlotIndex StartIdx =
1673 GapWeight.
assign(NumGaps, 0.0f);
1676 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
1677 if (!
Matrix->query(
const_cast<LiveInterval &
>(SA->getParent()), Unit)
1678 .checkInterference())
1689 Matrix->getLiveUnions()[
static_cast<unsigned>(
Unit)].
find(StartIdx);
1690 for (
unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1692 while (
Uses[Gap+1].getBoundaryIndex() < IntI.start())
1693 if (++Gap == NumGaps)
1699 const float weight = IntI.value()->weight();
1700 for (; Gap != NumGaps; ++Gap) {
1701 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1702 if (
Uses[Gap+1].getBaseIndex() >= IntI.stop())
1711 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
1717 for (
unsigned Gap = 0;
I !=
E &&
I->start < StopIdx; ++
I) {
1718 while (
Uses[Gap+1].getBoundaryIndex() <
I->start)
1719 if (++Gap == NumGaps)
1724 for (; Gap != NumGaps; ++Gap) {
1726 if (
Uses[Gap+1].getBaseIndex() >=
I->end)
1738MCRegister RAGreedy::tryLocalSplit(
const LiveInterval &VirtReg,
1739 AllocationOrder &Order,
1740 SmallVectorImpl<Register> &NewVRegs) {
1743 if (SA->getUseBlocks().size() != 1)
1744 return MCRegister();
1746 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1756 if (
Uses.size() <= 2)
1757 return MCRegister();
1758 const unsigned NumGaps =
Uses.size()-1;
1761 dbgs() <<
"tryLocalSplit: ";
1762 for (
const auto &Use :
Uses)
1769 SmallVector<unsigned, 8> RegMaskGaps;
1770 if (
Matrix->checkRegMaskInterference(VirtReg)) {
1777 unsigned RE = RMS.
size();
1778 for (
unsigned I = 0;
I != NumGaps && RI != RE; ++
I) {
1789 RegMaskGaps.push_back(
I);
1816 bool ProgressRequired = ExtraInfo->getStage(VirtReg) >=
RS_Split2;
1819 unsigned BestBefore = NumGaps;
1820 unsigned BestAfter = 0;
1823 const float blockFreq =
1824 SpillPlacer->getBlockFrequency(BI.
MBB->
getNumber()).getFrequency() *
1825 (1.0f / MBFI->getEntryFreq().getFrequency());
1828 for (MCRegister PhysReg : Order) {
1832 calcGapWeights(PhysReg, GapWeight);
1835 if (
Matrix->checkRegMaskInterference(VirtReg, PhysReg))
1836 for (
unsigned Gap : RegMaskGaps)
1843 unsigned SplitBefore = 0, SplitAfter = 1;
1847 float MaxGap = GapWeight[0];
1851 const bool LiveBefore = SplitBefore != 0 || BI.
LiveIn;
1852 const bool LiveAfter = SplitAfter != NumGaps || BI.
LiveOut;
1855 <<
'-' <<
Uses[SplitAfter] <<
" I=" << MaxGap);
1858 if (!LiveBefore && !LiveAfter) {
1866 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1869 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1878 blockFreq * (NewGaps + 1),
1879 Uses[SplitBefore].distance(
Uses[SplitAfter]) +
1887 float Diff = EstWeight - MaxGap;
1888 if (Diff > BestDiff) {
1891 BestBefore = SplitBefore;
1892 BestAfter = SplitAfter;
1899 if (++SplitBefore < SplitAfter) {
1902 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1903 MaxGap = GapWeight[SplitBefore];
1904 for (
unsigned I = SplitBefore + 1;
I != SplitAfter; ++
I)
1905 MaxGap = std::max(MaxGap, GapWeight[
I]);
1913 if (SplitAfter >= NumGaps) {
1919 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1924 if (BestBefore == NumGaps)
1925 return MCRegister();
1928 <<
Uses[BestAfter] <<
", " << BestDiff <<
", "
1929 << (BestAfter - BestBefore + 1) <<
" instrs\n");
1931 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *
LIS,
VRM,
this, &
DeadRemats);
1935 SlotIndex SegStart = SE->enterIntvBefore(
Uses[BestBefore]);
1936 SlotIndex SegStop = SE->leaveIntvAfter(
Uses[BestAfter]);
1937 SE->useIntv(SegStart, SegStop);
1938 SmallVector<unsigned, 8> IntvMap;
1939 SE->finish(&IntvMap);
1940 DebugVars->splitRegister(VirtReg.
reg(), LREdit.
regs(), *
LIS);
1944 bool LiveBefore = BestBefore != 0 || BI.
LiveIn;
1945 bool LiveAfter = BestAfter != NumGaps || BI.
LiveOut;
1946 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1947 if (NewGaps >= NumGaps) {
1949 assert(!ProgressRequired &&
"Didn't make progress when it was required.");
1950 for (
unsigned I = 0,
E = IntvMap.
size();
I !=
E; ++
I)
1951 if (IntvMap[
I] == 1) {
1959 return MCRegister();
1969MCRegister RAGreedy::trySplit(
const LiveInterval &VirtReg,
1970 AllocationOrder &Order,
1971 SmallVectorImpl<Register> &NewVRegs,
1974 if (ExtraInfo->getStage(VirtReg) >=
RS_Spill)
1975 return MCRegister();
1978 if (
LIS->intervalIsInOneMBB(VirtReg)) {
1981 SA->analyze(&VirtReg);
1982 MCRegister PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1983 if (PhysReg || !NewVRegs.
empty())
1985 return tryInstructionSplit(VirtReg, Order, NewVRegs);
1988 NamedRegionTimer
T(
"global_split",
"Global Splitting",
TimerGroupName,
1991 SA->analyze(&VirtReg);
1996 if (ExtraInfo->getStage(VirtReg) <
RS_Split2) {
1997 MCRegister PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1998 if (PhysReg || !NewVRegs.
empty())
2003 return tryBlockSplit(VirtReg, Order, NewVRegs);
2026 if (PhysReg == AssignedReg)
2028 return TRI.regsOverlap(PhysReg, AssignedReg);
2039bool RAGreedy::mayRecolorAllInterferences(
2040 MCRegister PhysReg,
const LiveInterval &VirtReg,
2041 SmallLISet &RecoloringCandidates,
const SmallVirtRegSet &FixedRegisters) {
2042 const TargetRegisterClass *CurRC =
MRI->getRegClass(VirtReg.
reg());
2044 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
2045 LiveIntervalUnion::Query &Q =
Matrix->query(VirtReg, Unit);
2052 CutOffInfo |= CO_Interf;
2067 if (((ExtraInfo->getStage(*Intf) ==
RS_Done &&
2068 MRI->getRegClass(Intf->reg()) == CurRC &&
2072 FixedRegisters.
count(Intf->reg())) {
2074 dbgs() <<
"Early abort: the interference is not recolorable.\n");
2077 RecoloringCandidates.insert(Intf);
2126MCRegister RAGreedy::tryLastChanceRecoloring(
2127 const LiveInterval &VirtReg, AllocationOrder &Order,
2129 RecoloringStack &RecolorStack,
unsigned Depth) {
2130 if (!
TRI->shouldUseLastChanceRecoloringForVirtReg(*MF, VirtReg))
2133 LLVM_DEBUG(
dbgs() <<
"Try last chance recoloring for " << VirtReg <<
'\n');
2135 const ssize_t EntryStackSize = RecolorStack.size();
2139 "Last chance recoloring should really be last chance");
2145 LLVM_DEBUG(
dbgs() <<
"Abort because max depth has been reached.\n");
2146 CutOffInfo |= CO_Depth;
2151 SmallLISet RecoloringCandidates;
2159 for (MCRegister PhysReg : Order) {
2163 RecoloringCandidates.clear();
2164 CurrentNewVRegs.
clear();
2167 if (
Matrix->checkInterference(VirtReg, PhysReg) >
2170 dbgs() <<
"Some interferences are not with virtual registers.\n");
2177 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
2179 LLVM_DEBUG(
dbgs() <<
"Some interferences cannot be recolored.\n");
2186 PQueue RecoloringQueue;
2187 for (
const LiveInterval *RC : RecoloringCandidates) {
2189 enqueue(RecoloringQueue, RC);
2191 "Interferences are supposed to be with allocated variables");
2194 RecolorStack.push_back(std::make_pair(RC,
VRM->getPhys(ItVirtReg)));
2203 Matrix->assign(VirtReg, PhysReg);
2212 if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs,
2213 FixedRegisters, RecolorStack,
Depth)) {
2218 if (
VRM->hasPhys(ThisVirtReg)) {
2219 Matrix->unassign(VirtReg);
2224 LLVM_DEBUG(
dbgs() <<
"tryRecoloringCandidates deleted a fixed register "
2226 FixedRegisters.
erase(ThisVirtReg);
2227 return MCRegister();
2234 FixedRegisters = SaveFixedRegisters;
2235 Matrix->unassign(VirtReg);
2241 for (
Register R : CurrentNewVRegs) {
2242 if (RecoloringCandidates.count(&
LIS->getInterval(R)))
2253 for (ssize_t
I = RecolorStack.size() - 1;
I >= EntryStackSize; --
I) {
2254 const LiveInterval *LI;
2256 std::tie(LI, PhysReg) = RecolorStack[
I];
2258 if (
VRM->hasPhys(LI->
reg()))
2262 for (
size_t I = EntryStackSize;
I != RecolorStack.size(); ++
I) {
2263 const LiveInterval *LI;
2265 std::tie(LI, PhysReg) = RecolorStack[
I];
2266 if (!LI->
empty() && !
MRI->reg_nodbg_empty(LI->
reg()))
2267 Matrix->assign(*LI, PhysReg);
2271 RecolorStack.resize(EntryStackSize);
2286bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
2287 SmallVectorImpl<Register> &NewVRegs,
2289 RecoloringStack &RecolorStack,
2291 while (!RecoloringQueue.empty()) {
2292 const LiveInterval *LI =
dequeue(RecoloringQueue);
2294 MCRegister PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters,
2295 RecolorStack,
Depth + 1);
2300 if (PhysReg == ~0u || (!PhysReg && !LI->
empty()))
2304 assert(LI->
empty() &&
"Only empty live-range do not require a register");
2306 <<
" succeeded. Empty LI.\n");
2310 <<
" succeeded with: " <<
printReg(PhysReg,
TRI) <<
'\n');
2312 Matrix->assign(*LI, PhysReg);
2324 CutOffInfo = CO_None;
2325 LLVMContext &Ctx = MF->getFunction().getContext();
2327 RecoloringStack RecolorStack;
2329 selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters, RecolorStack);
2330 if (Reg == ~0U && (CutOffInfo != CO_None)) {
2331 uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
2332 if (CutOffEncountered == CO_Depth)
2333 Ctx.emitError(
"register allocation failed: maximum depth for recoloring "
2334 "reached. Use -fexhaustive-register-search to skip "
2336 else if (CutOffEncountered == CO_Interf)
2337 Ctx.emitError(
"register allocation failed: maximum interference for "
2338 "recoloring reached. Use -fexhaustive-register-search "
2340 else if (CutOffEncountered == (CO_Depth | CO_Interf))
2341 Ctx.emitError(
"register allocation failed: maximum interference and "
2342 "depth for recoloring reached. Use "
2343 "-fexhaustive-register-search to skip cutoffs");
2355 I =
MRI->reg_instr_nodbg_begin(LI.
reg()),
2356 E =
MRI->reg_instr_nodbg_end();
2359 if (
MI->isMetaInstruction())
2364 auto [Reads, Writes] =
MI->readsWritesVirtualRegister(LI.
reg());
2365 auto MBBFreq = SpillPlacer->getBlockFrequency(
MI->getParent()->getNumber());
2366 SpillCost += (Reads + Writes) * MBBFreq.getFrequency();
2378MCRegister RAGreedy::tryAssignCSRFirstTime(
2379 const LiveInterval &VirtReg, AllocationOrder &Order, MCRegister PhysReg,
2380 uint8_t &CostPerUseLimit, SmallVectorImpl<Register> &NewVRegs) {
2384 SA->analyze(&VirtReg);
2385 if (calcSpillCost(VirtReg) >= CSRCost)
2390 CostPerUseLimit = 1;
2391 return MCRegister();
2393 if (ExtraInfo->getStage(VirtReg) <
RS_Split) {
2396 SA->analyze(&VirtReg);
2397 unsigned NumCands = 0;
2398 BlockFrequency BestCost = CSRCost;
2399 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
2401 if (BestCand == NoCand)
2406 doRegionSplit(VirtReg, BestCand,
false, NewVRegs);
2407 return MCRegister();
2414 SetOfBrokenHints.remove(&LI);
2417void RAGreedy::initializeCSRCost() {
2427 if (!CSRCost.getFrequency())
2431 uint64_t ActualEntry = MBFI->getEntryFreq().getFrequency();
2437 if (ActualEntry < FixedEntry) {
2439 }
else if (ActualEntry <= UINT32_MAX) {
2441 CSRCost /= BranchProbability(FixedEntry, ActualEntry);
2445 BlockFrequency(CSRCost.getFrequency() * (ActualEntry / FixedEntry));
2448 uint64_t EntryFreq = MBFI->getEntryFreq().getFrequency();
2449 CSRCost = BlockFrequency(
TRI->getCSRFirstUseCost() * EntryFreq);
2460void RAGreedy::collectHintInfo(
Register Reg, HintsInfo &Out) {
2461 const TargetRegisterClass *RC =
MRI->getRegClass(
Reg);
2463 for (
const MachineOperand &Opnd :
MRI->reg_nodbg_operands(
Reg)) {
2464 const MachineInstr &
Instr = *Opnd.getParent();
2465 if (!
Instr.isCopy() || Opnd.isImplicit())
2469 const MachineOperand &OtherOpnd =
Instr.getOperand(Opnd.isDef());
2471 if (OtherReg ==
Reg)
2473 unsigned OtherSubReg = OtherOpnd.
getSubReg();
2474 unsigned SubReg = Opnd.getSubReg();
2477 MCRegister OtherPhysReg;
2480 OtherPhysReg =
TRI->getMatchingSuperReg(OtherReg, OtherSubReg, RC);
2482 OtherPhysReg =
TRI->getMatchingSuperReg(OtherReg,
SubReg, RC);
2484 OtherPhysReg = OtherReg;
2486 OtherPhysReg =
VRM->getPhys(OtherReg);
2496 Out.push_back(HintInfo(MBFI->getBlockFreq(
Instr.getParent()), OtherReg,
2505BlockFrequency RAGreedy::getBrokenHintFreq(
const HintsInfo &
List,
2506 MCRegister PhysReg) {
2507 BlockFrequency
Cost = BlockFrequency(0);
2508 for (
const HintInfo &Info :
List) {
2509 if (
Info.PhysReg != PhysReg)
2523void RAGreedy::tryHintRecoloring(
const LiveInterval &VirtReg) {
2529 MCRegister PhysReg =
VRM->getPhys(
Reg);
2532 SmallSet<Register, 4> Visited = {
Reg};
2541 MCRegister CurrPhys =
VRM->getPhys(
Reg);
2546 "We have an unallocated variable which should have been handled");
2552 LiveInterval &LI =
LIS->getInterval(
Reg);
2555 if (CurrPhys != PhysReg && (!
MRI->getRegClass(
Reg)->contains(PhysReg) ||
2556 Matrix->checkInterference(LI, PhysReg)))
2560 <<
") is recolorable.\n");
2564 collectHintInfo(
Reg, Info);
2567 if (CurrPhys != PhysReg) {
2569 BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys);
2570 BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg);
2574 if (OldCopiesCost < NewCopiesCost) {
2584 Matrix->assign(LI, PhysReg);
2588 for (
const HintInfo &HI : Info) {
2590 if (
HI.Reg.isVirtual() && Visited.
insert(
HI.Reg).second)
2593 }
while (!RecoloringCandidates.
empty());
2632void RAGreedy::tryHintsRecoloring() {
2633 for (
const LiveInterval *LI : SetOfBrokenHints) {
2635 "Recoloring is possible only for virtual registers");
2638 if (!
VRM->hasPhys(LI->
reg()))
2640 tryHintRecoloring(*LI);
2644MCRegister RAGreedy::selectOrSplitImpl(
const LiveInterval &VirtReg,
2645 SmallVectorImpl<Register> &NewVRegs,
2647 RecoloringStack &RecolorStack,
2649 uint8_t CostPerUseLimit = uint8_t(~0u);
2653 if (MCRegister PhysReg =
2654 tryAssign(VirtReg, Order, NewVRegs, FixedRegisters)) {
2658 if (CSRCost.getFrequency() &&
2659 EvictAdvisor->isUnusedCalleeSavedReg(PhysReg) && NewVRegs.
empty()) {
2660 MCRegister CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
2661 CostPerUseLimit, NewVRegs);
2662 if (CSRReg || !NewVRegs.
empty())
2670 if (!NewVRegs.
empty())
2671 return MCRegister();
2675 << ExtraInfo->getCascade(VirtReg.
reg()) <<
'\n');
2681 if (MCRegister PhysReg =
2682 tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit,
2690 if (Hint && Hint != PhysReg)
2691 SetOfBrokenHints.insert(&VirtReg);
2696 assert((NewVRegs.
empty() ||
Depth) &&
"Cannot append to existing NewVRegs");
2702 ExtraInfo->setStage(VirtReg,
RS_Split);
2705 return MCRegister();
2710 unsigned NewVRegSizeBefore = NewVRegs.
size();
2711 MCRegister PhysReg = trySplit(VirtReg, Order, NewVRegs, FixedRegisters);
2712 if (PhysReg || (NewVRegs.
size() - NewVRegSizeBefore))
2719 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
2720 RecolorStack,
Depth);
2734 DebugVars->splitRegister(r, LRE.regs(), *
LIS);
2736 DebugVars->splitRegister(r, LRE.regs(), *
LIS);
2739 MF->verify(
LIS, Indexes,
"After spilling", &
errs());
2743 return MCRegister();
2746void RAGreedy::RAGreedyStats::report(MachineOptimizationRemarkMissed &R) {
2747 using namespace ore;
2749 R <<
NV(
"NumSpills", Spills) <<
" spills ";
2750 R <<
NV(
"TotalSpillsCost", SpillsCost) <<
" total spills cost ";
2753 R <<
NV(
"NumFoldedSpills", FoldedSpills) <<
" folded spills ";
2754 R <<
NV(
"TotalFoldedSpillsCost", FoldedSpillsCost)
2755 <<
" total folded spills cost ";
2758 R <<
NV(
"NumReloads", Reloads) <<
" reloads ";
2759 R <<
NV(
"TotalReloadsCost", ReloadsCost) <<
" total reloads cost ";
2761 if (FoldedReloads) {
2762 R <<
NV(
"NumFoldedReloads", FoldedReloads) <<
" folded reloads ";
2763 R <<
NV(
"TotalFoldedReloadsCost", FoldedReloadsCost)
2764 <<
" total folded reloads cost ";
2766 if (ZeroCostFoldedReloads)
2767 R <<
NV(
"NumZeroCostFoldedReloads", ZeroCostFoldedReloads)
2768 <<
" zero cost folded reloads ";
2770 R <<
NV(
"NumVRCopies",
Copies) <<
" virtual registers copies ";
2771 R <<
NV(
"TotalCopiesCost", CopiesCost) <<
" total copies cost ";
2775RAGreedy::RAGreedyStats RAGreedy::computeStats(MachineBasicBlock &
MBB) {
2776 RAGreedyStats
Stats;
2777 const MachineFrameInfo &MFI = MF->getFrameInfo();
2780 auto isSpillSlotAccess = [&MFI](
const MachineMemOperand *
A) {
2782 A->getPseudoValue())->getFrameIndex());
2784 auto isPatchpointInstr = [](
const MachineInstr &
MI) {
2785 return MI.getOpcode() == TargetOpcode::PATCHPOINT ||
2786 MI.getOpcode() == TargetOpcode::STACKMAP ||
2787 MI.getOpcode() == TargetOpcode::STATEPOINT;
2789 for (MachineInstr &
MI :
MBB) {
2790 auto DestSrc = TII->isCopyInstr(
MI);
2792 const MachineOperand &Dest = *DestSrc->Destination;
2793 const MachineOperand &Src = *DestSrc->Source;
2799 SrcReg =
VRM->getPhys(SrcReg);
2800 if (SrcReg && Src.getSubReg())
2801 SrcReg =
TRI->getSubReg(SrcReg, Src.getSubReg());
2804 DestReg =
VRM->getPhys(DestReg);
2808 if (SrcReg != DestReg)
2814 SmallVector<const MachineMemOperand *, 2>
Accesses;
2823 if (TII->hasLoadFromStackSlot(
MI,
Accesses) &&
2825 if (!isPatchpointInstr(
MI)) {
2830 std::pair<unsigned, unsigned> NonZeroCostRange =
2831 TII->getPatchpointUnfoldableRange(
MI);
2832 SmallSet<unsigned, 16> FoldedReloads;
2833 SmallSet<unsigned, 16> ZeroCostFoldedReloads;
2834 for (
unsigned Idx = 0,
E =
MI.getNumOperands(); Idx <
E; ++Idx) {
2835 MachineOperand &MO =
MI.getOperand(Idx);
2838 if (Idx >= NonZeroCostRange.first && Idx < NonZeroCostRange.second)
2844 for (
unsigned Slot : FoldedReloads)
2845 ZeroCostFoldedReloads.
erase(Slot);
2846 Stats.FoldedReloads += FoldedReloads.size();
2847 Stats.ZeroCostFoldedReloads += ZeroCostFoldedReloads.
size();
2851 if (TII->hasStoreToStackSlot(
MI,
Accesses) &&
2858 float RelFreq = MBFI->getBlockFreqRelativeToEntryBlock(&
MBB);
2860 Stats.FoldedReloadsCost = RelFreq *
Stats.FoldedReloads;
2862 Stats.FoldedSpillsCost = RelFreq *
Stats.FoldedSpills;
2867RAGreedy::RAGreedyStats RAGreedy::reportStats(MachineLoop *L) {
2868 RAGreedyStats
Stats;
2871 for (MachineLoop *SubLoop : *L)
2872 Stats.add(reportStats(SubLoop));
2874 for (MachineBasicBlock *
MBB :
L->getBlocks())
2876 if (Loops->getLoopFor(
MBB) == L)
2879 if (!
Stats.isEmpty()) {
2880 using namespace ore;
2883 MachineOptimizationRemarkMissed
R(
DEBUG_TYPE,
"LoopSpillReloadCopies",
2884 L->getStartLoc(),
L->getHeader());
2886 R <<
"generated in loop";
2893void RAGreedy::reportStats() {
2896 RAGreedyStats
Stats;
2897 for (MachineLoop *L : *Loops)
2898 Stats.add(reportStats(L));
2900 for (MachineBasicBlock &
MBB : *MF)
2901 if (!Loops->getLoopFor(&
MBB))
2903 if (!
Stats.isEmpty()) {
2904 using namespace ore;
2908 if (
auto *SP = MF->getFunction().getSubprogram())
2910 MachineOptimizationRemarkMissed
R(
DEBUG_TYPE,
"SpillReloadCopies", Loc,
2913 R <<
"generated in function";
2919bool RAGreedy::hasVirtRegAlloc() {
2920 for (
unsigned I = 0,
E =
MRI->getNumVirtRegs();
I !=
E; ++
I) {
2922 if (
MRI->reg_nodbg_empty(
Reg))
2932 LLVM_DEBUG(
dbgs() <<
"********** GREEDY REGISTER ALLOCATION **********\n"
2933 <<
"********** Function: " << mf.
getName() <<
'\n');
2939 MF->verify(
LIS, Indexes,
"Before greedy register allocator", &
errs());
2945 if (!hasVirtRegAlloc())
2950 Indexes->packIndexes();
2952 initializeCSRCost();
2954 RegCosts =
TRI->getRegisterCosts(*MF);
2955 RegClassPriorityTrumpsGlobalness =
2958 :
TRI->regClassPriorityTrumpsGlobalness(*MF);
2962 :
TRI->reverseLocalAssignment();
2964 ExtraInfo.emplace();
2966 EvictAdvisor = EvictProvider->getAdvisor(*MF, *
this, MBFI, Loops);
2967 PriorityAdvisor = PriorityProvider->getAdvisor(*MF, *
this, *Indexes);
2969 VRAI = std::make_unique<VirtRegAuxInfo>(*MF, *
LIS, *
VRM, *Loops, *MBFI);
2973 VRAI->calculateSpillWeightsAndHints();
2980 IntfCache.init(MF,
Matrix->getLiveUnions(), Indexes,
LIS,
TRI);
2981 GlobalCand.resize(32);
2982 SetOfBrokenHints.clear();
2985 tryHintsRecoloring();
2988 MF->verify(
LIS, Indexes,
"Before post optimization", &
errs());
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
DXIL Forward Handle Accesses
const HexagonInstrInfo * TII
This file implements an indexed map.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
block placement Basic Block Placement Stats
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static bool hasTiedDef(MachineRegisterInfo *MRI, Register reg)
Return true if reg has any tied def operand.
static cl::opt< bool > GreedyRegClassPriorityTrumpsGlobalness("greedy-regclass-priority-trumps-globalness", cl::desc("Change the greedy register allocator's live range priority " "calculation to make the AllocationPriority of the register class " "more important then whether the range is global"), cl::Hidden)
static cl::opt< bool > ExhaustiveSearch("exhaustive-register-search", cl::NotHidden, cl::desc("Exhaustive Search for registers bypassing the depth " "and interference cutoffs of last chance recoloring"), cl::Hidden)
static cl::opt< unsigned > CSRCostScale("regalloc-csr-cost-scale", cl::desc("Scale for the callee-saved register cost, in percentage."), cl::init(80), cl::Hidden)
static cl::opt< unsigned > LastChanceRecoloringMaxInterference("lcr-max-interf", cl::Hidden, cl::desc("Last chance recoloring maximum number of considered" " interference at a time"), cl::init(8))
static bool readsLaneSubset(const MachineRegisterInfo &MRI, const MachineInstr *MI, const LiveInterval &VirtReg, const TargetRegisterInfo *TRI, SlotIndex Use, const TargetInstrInfo *TII)
Return true if MI at \P Use reads a subset of the lanes live in VirtReg.
static bool assignedRegPartiallyOverlaps(const TargetRegisterInfo &TRI, const VirtRegMap &VRM, MCRegister PhysReg, const LiveInterval &Intf)
Return true if the existing assignment of Intf overlaps, but is not the same, as PhysReg.
static cl::opt< unsigned > CSRFirstTimeCost("regalloc-csr-first-time-cost", cl::desc("Cost for first time use of callee-saved register."), cl::init(0), cl::Hidden)
static cl::opt< unsigned > LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden, cl::desc("Last chance recoloring max depth"), cl::init(5))
static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", createGreedyRegisterAllocator)
static cl::opt< unsigned long > GrowRegionComplexityBudget("grow-region-complexity-budget", cl::desc("growRegion() does not scale with the number of BB edges, so " "limit its budget and bail out once we reach the limit."), cl::init(10000), cl::Hidden)
static cl::opt< unsigned > SplitThresholdForRegWithHint("split-threshold-for-reg-with-hint", cl::desc("The threshold for splitting a virtual register with a hint, in " "percentage"), cl::init(75), cl::Hidden)
static cl::opt< SplitEditor::ComplementSpillMode > SplitSpillMode("split-spill-mode", cl::Hidden, cl::desc("Spill mode for splitting live ranges"), cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")), cl::init(SplitEditor::SM_Speed))
static unsigned getNumAllocatableRegsForConstraints(const MachineInstr *MI, Register Reg, const TargetRegisterClass *SuperRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, const RegisterClassInfo &RCI)
Get the number of allocatable registers that match the constraints of Reg on MI and that are also in ...
static cl::opt< bool > GreedyReverseLocalAssignment("greedy-reverse-local-assignment", cl::desc("Reverse allocation order of local live ranges, such that " "shorter local live ranges will tend to be allocated first"), cl::Hidden)
static LaneBitmask getInstReadLaneMask(const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const MachineInstr &FirstMI, Register Reg)
Remove Loads Into Fake Uses
SI optimize exec mask operations pre RA
SI Optimize VGPR LiveRange
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName) const
PreservedAnalyses run(MachineFunction &F, MachineFunctionAnalysisManager &AM)
bool isHint(Register Reg) const
Return true if Reg is a preferred physical register.
ArrayRef< MCPhysReg > getOrder() const
Get the allocation order without reordered hints.
static AllocationOrder create(Register VirtReg, const VirtRegMap &VRM, const RegisterClassInfo &RegClassInfo, const LiveRegMatrix *Matrix)
Create a new AllocationOrder for VirtReg.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
static BlockFrequency max()
Returns the maximum possible frequency, the saturation value.
Represents analyses that only rely on functions' control flow.
FunctionPass class - This class is used to implement most global optimizations.
Cursor - The primary query interface for the block interference cache.
SlotIndex first()
first - Return the starting index of the first interfering range in the current block.
SlotIndex last()
last - Return the ending index of the last interfering range in the current block.
bool hasInterference()
hasInterference - Return true if the current block has any interference.
void moveToBlock(unsigned MBBNum)
moveTo - Move cursor to basic block MBBNum.
This is an important class for using LLVM in a threaded context.
Query interferences between a single live virtual register and a live interval union.
const SmallVectorImpl< const LiveInterval * > & interferingVRegs(unsigned MaxInterferingRegs=std::numeric_limits< unsigned >::max())
LiveSegments::iterator SegmentIter
A live range for subregisters.
LiveInterval - This class represents the liveness of a register, or stack slot.
bool isSpillable() const
isSpillable - Can this interval be spilled?
bool hasSubRanges() const
Returns true if subregister liveness information is available.
LLVM_ABI unsigned getSize() const
getSize - Returns the sum of sizes of all the LiveRange's.
iterator_range< subrange_iterator > subranges()
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
LiveInterval & getInterval(Register Reg)
Register get(unsigned idx) const
ArrayRef< Register > regs() const
Segments::const_iterator const_iterator
bool liveAt(SlotIndex index) const
SlotIndex beginIndex() const
beginIndex - Return the lowest numbered slot covered.
SlotIndex endIndex() const
endNumber - return the maximum point of the range of the whole, exclusive.
LLVM_ABI iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
@ IK_VirtReg
Virtual register interference.
Wrapper class representing physical registers. Should be passed by value.
constexpr bool isValid() const
static constexpr unsigned NoRegister
constexpr unsigned id() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
An RAII based helper class to modify MachineFunctionProperties when running pass.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the first non-debug instruction in the basic block, or end().
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Analysis pass which computes a MachineDominatorTree.
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Representation of each machine instruction.
bool isImplicitDef() const
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, true, true, true > reg_instr_nodbg_iterator
reg_instr_nodbg_iterator/reg_instr_nodbg_begin/reg_instr_nodbg_end - Walk all defs and uses of the sp...
Pass interface - Implemented by all 'passes'.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
bool run(MachineFunction &mf)
Perform register allocation.
Spiller & spiller() override
MCRegister selectOrSplit(const LiveInterval &, SmallVectorImpl< Register > &) override
RAGreedy(RequiredAnalyses &Analyses, const RegAllocFilterFunc F=nullptr)
const LiveInterval * dequeue() override
dequeue - Return the next unassigned register, or NULL.
void enqueueImpl(const LiveInterval *LI) override
enqueue - Add VirtReg to the priority queue of unassigned registers.
void aboutToRemoveInterval(const LiveInterval &) override
Method called when the allocator is about to remove a LiveInterval.
RegAllocBase(const RegAllocFilterFunc F=nullptr)
void enqueue(const LiveInterval *LI)
enqueue - Add VirtReg to the priority queue of unassigned registers.
void init(VirtRegMap &vrm, LiveIntervals &lis, LiveRegMatrix &mat)
SmallPtrSet< MachineInstr *, 32 > DeadRemats
Inst which is a def of an original reg and whose defs are already all dead after remat is saved in De...
const TargetRegisterInfo * TRI
static const char TimerGroupName[]
static const char TimerGroupDescription[]
virtual void postOptimization()
RegisterClassInfo RegClassInfo
MachineRegisterInfo * MRI
bool shouldAllocateRegister(Register Reg)
Get whether a given register should be allocated.
static bool VerifyEnabled
VerifyEnabled - True when -verify-regalloc is given.
ImmutableAnalysis abstraction for fetching the Eviction Advisor.
A MachineFunction analysis for fetching the Eviction Advisor.
Common provider for legacy and new pass managers.
const TargetRegisterInfo *const TRI
std::optional< unsigned > getOrderLimit(const LiveInterval &VirtReg, const AllocationOrder &Order, unsigned CostPerUseLimit) const
const ArrayRef< uint8_t > RegCosts
MachineRegisterInfo *const MRI
const RegisterClassInfo & RegClassInfo
bool isUnusedCalleeSavedReg(MCRegister PhysReg) const
Returns true if the given PhysReg is a callee saved register and has not been used for allocation yet...
bool canReassign(const LiveInterval &VirtReg, MCRegister FromReg) const
bool canAllocatePhysReg(unsigned CostPerUseLimit, MCRegister PhysReg) const
LiveRegMatrix *const Matrix
Common provider for getting the priority advisor and logging rewards.
unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const
getNumAllocatableRegs - Returns the number of actually allocatable registers in RC in the current fun...
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
unsigned virtRegIndex() const
Convert a virtual register number to a 0-based index.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
SlotIndex - An opaque wrapper around machine indexes.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
static bool isEarlierInstr(SlotIndex A, SlotIndex B)
isEarlierInstr - Return true if A refers to an instruction earlier than B.
@ InstrDist
The default distance between instructions as returned by distance().
bool isValid() const
Returns true if this is a valid index.
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
int getApproxInstrDistance(SlotIndex other) const
Return the scaled distance from this index to the given one, where all slots on the same instruction ...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
@ MustSpill
A register is impossible, variable must be spilled.
@ DontCare
Block doesn't care / variable not live.
@ PrefReg
Block entry/exit prefers a register.
@ PrefSpill
Block entry/exit prefers a stack slot.
virtual void spill(LiveRangeEdit &LRE, AllocationOrder *Order=nullptr)=0
spill - Spill the LRE.getParent() live interval.
SplitAnalysis - Analyze a LiveInterval, looking for live range splitting opportunities.
SplitEditor - Edit machine code and LiveIntervals for live range splitting.
@ SM_Partition
SM_Partition(Default) - Try to create the complement interval so it doesn't overlap any other interva...
@ SM_Speed
SM_Speed - Overlap intervals to minimize the expected execution frequency of the inserted copies.
@ SM_Size
SM_Size - Overlap intervals to minimize the number of inserted COPY instructions.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
TargetInstrInfo - Interface to description of machine instruction set.
const bool GlobalPriority
const uint8_t AllocationPriority
Classes with a higher priority value are assigned first by register allocators using a greedy heurist...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
A Use represents the edge between a Value definition and its users.
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Pass manager infrastructure for declaring and invalidating analyses.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
NodeAddr< UseNode * > Use
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
constexpr uint64_t maxUIntN(uint64_t N)
Gets the maximum value for a N-bit unsigned integer.
SmallSet< Register, 16 > SmallVirtRegSet
LLVM_ABI FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool TimePassesIsEnabled
If the user specifies the -time-passes argument on an LLVM tool command line then the value of this b...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
@ RS_Split2
Attempt more aggressive live range splitting that is guaranteed to make progress.
@ RS_Spill
Live range will be spilled. No more splitting will be attempted.
@ RS_Split
Attempt live range splitting if assignment is impossible.
@ RS_New
Newly created live range that has never been queued.
@ RS_Done
There is nothing more we can do to this live range.
@ RS_Assign
Only attempt assignment and eviction. Then requeue as RS_Split.
FunctionAddr VTableAddr Count
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI const float huge_valf
Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Spiller * createInlineSpiller(const Spiller::RequiredAnalyses &Analyses, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI, LiveRegMatrix *Matrix=nullptr)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
ArrayRef(const T &OneElt) -> ArrayRef< T >
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Printable printBlockFreq(const BlockFrequencyInfo &BFI, BlockFrequency Freq)
Print the block frequency Freq relative to the current functions entry frequency.
LLVM_ABI char & RAGreedyLegacyID
Greedy register allocator.
static float normalizeSpillWeight(float UseDefFreq, unsigned Size, unsigned NumInstr)
Normalize the spill weight of a live interval.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Implement std::hash so that hash_code can be used in STL containers.
MachineBlockFrequencyInfo * MBFI
RegAllocEvictionAdvisorProvider * EvictProvider
MachineOptimizationRemarkEmitter * ORE
LiveDebugVariables * DebugVars
SpillPlacement * SpillPlacer
RegAllocPriorityAdvisorProvider * PriorityProvider
MachineDominatorTree * DomTree
RequiredAnalyses()=delete
constexpr bool any() const
This class is basically a combination of TimeRegion and Timer.
BlockConstraint - Entry and exit constraints for a basic block.
BorderConstraint Exit
Constraint on block exit.
bool ChangesValue
True when this block changes the value of the live range.
BorderConstraint Entry
Constraint on block entry.
unsigned Number
Basic block number (from MBB::getNumber()).
Additional information about basic blocks where the current variable is live.
SlotIndex FirstDef
First non-phi valno->def, or SlotIndex().
bool LiveOut
Current reg is live out.
bool LiveIn
Current reg is live in.
SlotIndex LastInstr
Last instr accessing current reg.
SlotIndex FirstInstr
First instr accessing current reg.