53 #define DEBUG_TYPE "regalloc"
55 STATISTIC(NumGlobalSplits,
"Number of split global live ranges");
56 STATISTIC(NumLocalSplits,
"Number of split local live ranges");
57 STATISTIC(NumEvicted,
"Number of interferences evicted");
61 cl::desc(
"Spill mode for splitting live ranges"),
69 cl::desc(
"Last chance recoloring max depth"),
74 cl::desc(
"Last chance recoloring maximum number of considered"
75 " interference at a time"),
80 cl::desc(
"Exhaustive Search for registers bypassing the depth "
81 "and interference cutoffs of last chance recoloring"));
85 cl::desc(
"Local reassignment can yield better allocation decisions, but "
86 "may be compile time intensive"),
91 cl::desc(
"Instead of spilling a variable right away, defer the actual "
92 "code insertion to the end of the allocation. That way the "
93 "allocator might still find a suitable coloring for this "
94 "variable because of other evicted variables."),
100 cl::desc(
"Cost for first time use of callee-saved register."),
111 typedef std::priority_queue<std::pair<unsigned, unsigned> > PQueue;
134 std::unique_ptr<Spiller> SpillerInstance;
136 unsigned NextCascade;
151 enum LiveRangeStage {
196 static const char *
const StageName[];
201 LiveRangeStage Stage;
206 RegInfo() : Stage(RS_New), Cascade(0) {}
211 LiveRangeStage getStage(
const LiveInterval &VirtReg)
const {
212 return ExtraRegInfo[VirtReg.
reg].Stage;
215 void setStage(
const LiveInterval &VirtReg, LiveRangeStage Stage) {
216 ExtraRegInfo.
resize(
MRI->getNumVirtRegs());
217 ExtraRegInfo[VirtReg.
reg].Stage = Stage;
220 template<
typename Iterator>
221 void setStage(Iterator Begin, Iterator
End, LiveRangeStage NewStage) {
222 ExtraRegInfo.resize(
MRI->getNumVirtRegs());
223 for (;Begin !=
End; ++Begin) {
224 unsigned Reg = *Begin;
225 if (ExtraRegInfo[Reg].Stage == RS_New)
226 ExtraRegInfo[
Reg].Stage = NewStage;
231 struct EvictionCost {
232 unsigned BrokenHints;
235 EvictionCost(): BrokenHints(0), MaxWeight(0) {}
237 bool isMax()
const {
return BrokenHints == ~0u; }
239 void setMax() { BrokenHints = ~0u; }
241 void setBrokenHints(
unsigned NHints) { BrokenHints = NHints; }
243 bool operator<(
const EvictionCost &O)
const {
244 return std::tie(BrokenHints, MaxWeight) <
245 std::tie(O.BrokenHints, O.MaxWeight);
250 std::unique_ptr<SplitAnalysis> SA;
251 std::unique_ptr<SplitEditor> SE;
260 struct GlobalSplitCandidate {
277 Intf.setPhysReg(Cache, Reg);
279 ActiveBlocks.clear();
285 for (
int i = LiveBundles.find_first();
i >= 0;
286 i = LiveBundles.find_next(
i))
300 enum :
unsigned {
NoCand = ~0u };
311 bool EnableLocalReassign;
320 StringRef getPassName()
const override {
return "Greedy Register Allocator"; }
324 void releaseMemory()
override;
325 Spiller &spiller()
override {
return *SpillerInstance; }
343 SmallVirtRegSet &,
unsigned = 0);
345 bool LRE_CanEraseVirtReg(
unsigned)
override;
346 void LRE_WillShrinkVirtReg(
unsigned)
override;
347 void LRE_DidCloneVirtReg(
unsigned,
unsigned)
override;
354 void growRegion(GlobalSplitCandidate &Cand);
356 bool calcCompactRegion(GlobalSplitCandidate&);
359 unsigned canReassign(
LiveInterval &VirtReg,
unsigned PhysReg);
361 bool canEvictInterference(
LiveInterval&,
unsigned,
bool, EvictionCost&);
364 bool mayRecolorAllInterferences(
unsigned PhysReg,
LiveInterval &VirtReg,
365 SmallLISet &RecoloringCandidates,
366 const SmallVirtRegSet &FixedRegisters);
375 unsigned calculateRegionSplitCost(
LiveInterval &VirtReg,
378 unsigned &NumCands,
bool IgnoreCSR);
380 unsigned doRegionSplit(
LiveInterval &VirtReg,
unsigned BestCand,
386 unsigned PhysReg,
unsigned &CostPerUseLimit,
388 void initializeCSRCost();
399 SmallVirtRegSet &,
unsigned);
401 SmallVirtRegSet &,
unsigned);
403 void tryHintsRecoloring();
415 : Freq(Freq), Reg(Reg), PhysReg(PhysReg) {}
419 void collectHintInfo(
unsigned, HintsInfo &);
421 bool isUnusedCalleeSavedReg(
unsigned PhysReg)
const;
429 "Greedy Register Allocator",
false,
false)
446 const char *
const RAGreedy::StageName[] = {
463 return new RAGreedy();
493 MachineFunctionPass::getAnalysisUsage(AU);
501 bool RAGreedy::LRE_CanEraseVirtReg(
unsigned VirtReg) {
502 if (VRM->hasPhys(VirtReg)) {
505 aboutToRemoveInterval(LI);
513 void RAGreedy::LRE_WillShrinkVirtReg(
unsigned VirtReg) {
514 if (!VRM->hasPhys(VirtReg))
523 void RAGreedy::LRE_DidCloneVirtReg(
unsigned New,
unsigned Old) {
525 if (!ExtraRegInfo.inBounds(Old))
532 ExtraRegInfo[Old].Stage = RS_Assign;
533 ExtraRegInfo.grow(New);
534 ExtraRegInfo[New] = ExtraRegInfo[Old];
537 void RAGreedy::releaseMemory() {
538 SpillerInstance.reset();
539 ExtraRegInfo.clear();
545 void RAGreedy::enqueue(PQueue &CurQueue,
LiveInterval *LI) {
548 const unsigned Size = LI->
getSize();
549 const unsigned Reg = LI->
reg;
550 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
551 "Can only enqueue virtual registers");
554 ExtraRegInfo.grow(Reg);
555 if (ExtraRegInfo[Reg].Stage == RS_New)
556 ExtraRegInfo[
Reg].Stage = RS_Assign;
558 if (ExtraRegInfo[Reg].Stage == RS_Split) {
562 }
else if (ExtraRegInfo[Reg].Stage == RS_Memory) {
567 static unsigned MemOp = 0;
572 bool ReverseLocal = TRI->reverseLocalAssignment();
574 bool ForceGlobal = !ReverseLocal &&
575 (Size / SlotIndex::InstrDist) > (2 * RC.
getNumRegs());
577 if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->
empty() &&
578 LIS->intervalIsInOneMBB(*LI)) {
588 Prio = Indexes->getZeroIndex().getInstrDistance(LI->
endIndex());
595 Prio = (1u << 29) + Size;
601 if (VRM->hasKnownPreference(Reg))
606 CurQueue.push(std::make_pair(Prio, ~Reg));
612 if (CurQueue.empty())
614 LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second);
630 while ((PhysReg = Order.
next()))
631 if (!
Matrix->checkInterference(VirtReg, PhysReg))
633 if (!PhysReg || Order.
isHint())
640 if (
unsigned Hint =
MRI->getSimpleHint(VirtReg.
reg))
643 EvictionCost MaxCost;
644 MaxCost.setBrokenHints(1);
645 if (canEvictInterference(VirtReg, Hint,
true, MaxCost)) {
646 evictInterference(VirtReg, Hint, NewVRegs);
651 SetOfBrokenHints.insert(&VirtReg);
655 unsigned Cost = TRI->getCostPerUse(PhysReg);
663 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
664 return CheapReg ? CheapReg : PhysReg;
672 unsigned RAGreedy::canReassign(
LiveInterval &VirtReg,
unsigned PrevReg) {
675 while ((PhysReg = Order.
next())) {
676 if (PhysReg == PrevReg)
680 for (; Units.isValid(); ++Units) {
683 if (subQ.checkInterference())
687 if (!Units.isValid())
691 DEBUG(
dbgs() <<
"can reassign: " << VirtReg <<
" from "
712 bool CanSplit = getStage(B) < RS_Spill;
716 if (CanSplit && IsHint && !BreaksHint)
735 bool RAGreedy::canEvictInterference(
LiveInterval &VirtReg,
unsigned PhysReg,
736 bool IsHint, EvictionCost &MaxCost) {
738 if (
Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
741 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
750 unsigned Cascade = ExtraRegInfo[VirtReg.
reg].Cascade;
752 Cascade = NextCascade;
764 assert(TargetRegisterInfo::isVirtualRegister(Intf->
reg) &&
765 "Only expecting virtual register interference from query");
767 if (getStage(*Intf) == RS_Done)
777 RegClassInfo.getNumAllocatableRegs(
MRI->getRegClass(VirtReg.
reg)) <
778 RegClassInfo.getNumAllocatableRegs(
MRI->getRegClass(Intf->
reg)));
780 unsigned IntfCascade = ExtraRegInfo[Intf->
reg].Cascade;
781 if (Cascade <= IntfCascade) {
786 Cost.BrokenHints += 10;
789 bool BreaksHint = VRM->hasPreferredPhys(Intf->
reg);
791 Cost.BrokenHints += BreaksHint;
792 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->
weight);
794 if (!(Cost < MaxCost))
799 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
804 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
805 (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) {
817 void RAGreedy::evictInterference(
LiveInterval &VirtReg,
unsigned PhysReg,
822 unsigned Cascade = ExtraRegInfo[VirtReg.
reg].Cascade;
824 Cascade = ExtraRegInfo[VirtReg.
reg].Cascade = NextCascade++;
827 <<
" interference: Cascade " << Cascade <<
'\n');
835 Intfs.
append(IVR.begin(), IVR.end());
839 for (
unsigned i = 0, e = Intfs.
size();
i != e; ++
i) {
842 if (!VRM->hasPhys(Intf->
reg))
845 assert((ExtraRegInfo[Intf->
reg].Cascade < Cascade ||
847 "Cannot decrease cascade number, illegal eviction");
848 ExtraRegInfo[Intf->
reg].Cascade = Cascade;
856 bool RAGreedy::isUnusedCalleeSavedReg(
unsigned PhysReg)
const {
857 unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg);
861 return !
Matrix->isPhysRegUsed(PhysReg);
871 unsigned CostPerUseLimit) {
876 EvictionCost BestCost;
878 unsigned BestPhys = 0;
883 if (CostPerUseLimit < ~0u) {
884 BestCost.BrokenHints = 0;
885 BestCost.MaxWeight = VirtReg.
weight;
889 unsigned MinCost = RegClassInfo.getMinCost(RC);
890 if (MinCost >= CostPerUseLimit) {
891 DEBUG(
dbgs() << TRI->getRegClassName(RC) <<
" minimum cost = " << MinCost
892 <<
", no cheaper registers to be found.\n");
898 if (TRI->getCostPerUse(Order.
getOrder().
back()) >= CostPerUseLimit) {
899 OrderLimit = RegClassInfo.getLastCostChange(RC);
900 DEBUG(
dbgs() <<
"Only trying the first " << OrderLimit <<
" regs.\n");
905 while (
unsigned PhysReg = Order.
next(OrderLimit)) {
906 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
910 if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
912 <<
PrintReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
917 if (!canEvictInterference(VirtReg, PhysReg,
false, BestCost))
931 evictInterference(VirtReg, BestPhys, NewVRegs);
950 SplitConstraints.resize(UseBlocks.
size());
952 for (
unsigned i = 0;
i != UseBlocks.
size(); ++
i) {
958 BC.
Entry = BI.
LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
959 BC.
Exit = BI.
LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
970 if (Intf.
first() <= Indexes->getMBBStartIdx(BC.
Number)) {
971 BC.
Entry = SpillPlacement::MustSpill;
974 BC.
Entry = SpillPlacement::PrefSpill;
983 if (Intf.
last() >= SA->getLastSplitPoint(BC.
Number)) {
984 BC.
Exit = SpillPlacement::MustSpill;
987 BC.
Exit = SpillPlacement::PrefSpill;
996 StaticCost += SpillPlacer->getBlockFrequency(BC.
Number);
1002 SpillPlacer->addConstraints(SplitConstraints);
1003 return SpillPlacer->scanActiveBundles();
1011 const unsigned GroupSize = 8;
1013 unsigned TBS[GroupSize];
1014 unsigned B = 0,
T = 0;
1016 for (
unsigned i = 0;
i != Blocks.
size(); ++
i) {
1021 assert(
T < GroupSize &&
"Array overflow");
1023 if (++
T == GroupSize) {
1030 assert(B < GroupSize &&
"Array overflow");
1034 if (Intf.
first() <= Indexes->getMBBStartIdx(Number))
1035 BCS[B].Entry = SpillPlacement::MustSpill;
1037 BCS[
B].
Entry = SpillPlacement::PrefSpill;
1040 if (Intf.
last() >= SA->getLastSplitPoint(Number))
1041 BCS[B].Exit = SpillPlacement::MustSpill;
1043 BCS[
B].
Exit = SpillPlacement::PrefSpill;
1045 if (++B == GroupSize) {
1055 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
1057 BitVector Todo = SA->getThroughBlocks();
1059 unsigned AddedTo = 0;
1061 unsigned Visited = 0;
1067 for (
int i = 0, e = NewBundles.
size();
i != e; ++
i) {
1068 unsigned Bundle = NewBundles[
i];
1074 if (!Todo.
test(Block))
1085 if (ActiveBlocks.
size() == AddedTo)
1090 auto NewBlocks =
makeArrayRef(ActiveBlocks).slice(AddedTo);
1092 addThroughConstraints(Cand.Intf, NewBlocks);
1096 SpillPlacer->addPrefSpill(NewBlocks,
true);
1097 AddedTo = ActiveBlocks.
size();
1100 SpillPlacer->iterate();
1112 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
1114 if (!SA->getNumThroughBlocks())
1118 Cand.reset(IntfCache, 0);
1120 DEBUG(
dbgs() <<
"Compact region bundles");
1124 SpillPlacer->prepare(Cand.LiveBundles);
1128 if (!addSplitConstraints(Cand.Intf, Cost)) {
1134 SpillPlacer->finish();
1136 if (!Cand.LiveBundles.any()) {
1142 for (
int i = Cand.LiveBundles.find_first();
i>=0;
1143 i = Cand.LiveBundles.find_next(
i))
1144 dbgs() <<
" EB#" <<
i;
1155 for (
unsigned i = 0;
i != UseBlocks.
size(); ++
i) {
1159 Cost += SpillPlacer->getBlockFrequency(Number);
1163 Cost += SpillPlacer->getBlockFrequency(Number);
1172 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
1174 const BitVector &LiveBundles = Cand.LiveBundles;
1176 for (
unsigned i = 0;
i != UseBlocks.
size(); ++
i) {
1179 bool RegIn = LiveBundles[Bundles->getBundle(BC.
Number, 0)];
1180 bool RegOut = LiveBundles[Bundles->getBundle(BC.
Number, 1)];
1184 Ins += RegIn != (BC.
Entry == SpillPlacement::PrefReg);
1186 Ins += RegOut != (BC.
Exit == SpillPlacement::PrefReg);
1188 GlobalCost += SpillPlacer->getBlockFrequency(BC.
Number);
1191 for (
unsigned i = 0, e = Cand.ActiveBlocks.size();
i != e; ++
i) {
1192 unsigned Number = Cand.ActiveBlocks[
i];
1193 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
1194 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
1195 if (!RegIn && !RegOut)
1197 if (RegIn && RegOut) {
1199 Cand.Intf.moveToBlock(Number);
1200 if (Cand.Intf.hasInterference()) {
1201 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1202 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1207 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1228 const unsigned NumGlobalIntvs = LREdit.
size();
1229 DEBUG(
dbgs() <<
"splitAroundRegion with " << NumGlobalIntvs <<
" globals.\n");
1230 assert(NumGlobalIntvs &&
"No global intervals configured");
1235 unsigned Reg = SA->getParent().reg;
1236 bool SingleInstrs = RegClassInfo.isProperSubClass(
MRI->getRegClass(Reg));
1240 for (
unsigned i = 0;
i != UseBlocks.
size(); ++
i) {
1243 unsigned IntvIn = 0, IntvOut = 0;
1246 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1248 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1249 IntvIn = Cand.IntvIdx;
1250 Cand.Intf.moveToBlock(Number);
1251 IntfIn = Cand.Intf.first();
1255 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1257 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1258 IntvOut = Cand.IntvIdx;
1259 Cand.Intf.moveToBlock(Number);
1260 IntfOut = Cand.Intf.last();
1265 if (!IntvIn && !IntvOut) {
1267 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1268 SE->splitSingleBlock(BI);
1272 if (IntvIn && IntvOut)
1273 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1275 SE->splitRegInBlock(BI, IntvIn, IntfIn);
1277 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1283 BitVector Todo = SA->getThroughBlocks();
1284 for (
unsigned c = 0; c != UsedCands.
size(); ++c) {
1286 for (
unsigned i = 0, e = Blocks.
size();
i != e; ++
i) {
1287 unsigned Number = Blocks[
i];
1288 if (!Todo.
test(Number))
1292 unsigned IntvIn = 0, IntvOut = 0;
1295 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1297 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1298 IntvIn = Cand.IntvIdx;
1299 Cand.Intf.moveToBlock(Number);
1300 IntfIn = Cand.Intf.first();
1303 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1305 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1306 IntvOut = Cand.IntvIdx;
1307 Cand.Intf.moveToBlock(Number);
1308 IntfOut = Cand.Intf.last();
1310 if (!IntvIn && !IntvOut)
1312 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1319 SE->finish(&IntvMap);
1320 DebugVars->splitRegister(Reg, LREdit.
regs(), *LIS);
1322 ExtraRegInfo.
resize(
MRI->getNumVirtRegs());
1323 unsigned OrigBlocks = SA->getNumLiveBlocks();
1330 for (
unsigned i = 0, e = LREdit.
size();
i != e; ++
i) {
1334 if (getStage(Reg) != RS_New)
1339 if (IntvMap[
i] == 0) {
1340 setStage(Reg, RS_Spill);
1346 if (IntvMap[
i] < NumGlobalIntvs) {
1347 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
1348 DEBUG(
dbgs() <<
"Main interval covers the same " << OrigBlocks
1349 <<
" blocks as original.\n");
1351 setStage(Reg, RS_Split2);
1361 MF->verify(
this,
"After splitting live range around region");
1366 unsigned NumCands = 0;
1370 bool HasCompact = calcCompactRegion(GlobalCand.front());
1374 BestCost = BlockFrequency::getMaxFrequency();
1378 BestCost = calcSpillCost();
1379 DEBUG(
dbgs() <<
"Cost of isolating all blocks = ";
1380 MBFI->printBlockFreq(
dbgs(), BestCost) <<
'\n');
1384 calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands,
1388 if (!HasCompact && BestCand ==
NoCand)
1391 return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
1394 unsigned RAGreedy::calculateRegionSplitCost(
LiveInterval &VirtReg,
1399 unsigned BestCand =
NoCand;
1401 while (
unsigned PhysReg = Order.
next()) {
1402 if (IgnoreCSR && isUnusedCalleeSavedReg(PhysReg))
1407 if (NumCands == IntfCache.getMaxCursors()) {
1408 unsigned WorstCount = ~0u;
1410 for (
unsigned i = 0;
i != NumCands; ++
i) {
1411 if (
i == BestCand || !GlobalCand[
i].PhysReg)
1413 unsigned Count = GlobalCand[
i].LiveBundles.count();
1414 if (Count < WorstCount) {
1420 GlobalCand[Worst] = GlobalCand[NumCands];
1421 if (BestCand == NumCands)
1425 if (GlobalCand.size() <= NumCands)
1426 GlobalCand.resize(NumCands+1);
1427 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1428 Cand.reset(IntfCache, PhysReg);
1430 SpillPlacer->prepare(Cand.LiveBundles);
1432 if (!addSplitConstraints(Cand.Intf, Cost)) {
1437 MBFI->printBlockFreq(
dbgs(), Cost));
1438 if (Cost >= BestCost) {
1441 dbgs() <<
" worse than no bundles\n";
1443 dbgs() <<
" worse than "
1444 <<
PrintReg(GlobalCand[BestCand].PhysReg, TRI) <<
'\n';
1450 SpillPlacer->finish();
1453 if (!Cand.LiveBundles.any()) {
1458 Cost += calcGlobalSplitCost(Cand);
1460 dbgs() <<
", total = "; MBFI->printBlockFreq(
dbgs(), Cost)
1462 for (
int i = Cand.LiveBundles.find_first();
i>=0;
1463 i = Cand.LiveBundles.find_next(
i))
1464 dbgs() <<
" EB#" <<
i;
1467 if (Cost < BestCost) {
1468 BestCand = NumCands;
1476 unsigned RAGreedy::doRegionSplit(
LiveInterval &VirtReg,
unsigned BestCand,
1481 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM,
this, &DeadRemats);
1485 BundleCand.assign(Bundles->getNumBundles(),
NoCand);
1488 if (BestCand !=
NoCand) {
1489 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1490 if (
unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1492 Cand.IntvIdx = SE->openIntv();
1494 << B <<
" bundles, intv " << Cand.IntvIdx <<
".\n");
1501 GlobalSplitCandidate &Cand = GlobalCand.front();
1502 assert(!Cand.PhysReg &&
"Compact region has no physreg");
1503 if (
unsigned B = Cand.getBundles(BundleCand, 0)) {
1505 Cand.IntvIdx = SE->openIntv();
1506 DEBUG(
dbgs() <<
"Split for compact region in " << B <<
" bundles, intv "
1507 << Cand.IntvIdx <<
".\n");
1512 splitAroundRegion(LREdit, UsedCands);
1526 assert(&SA->getParent() == &VirtReg &&
"Live range wasn't analyzed");
1527 unsigned Reg = VirtReg.
reg;
1528 bool SingleInstrs = RegClassInfo.isProperSubClass(
MRI->getRegClass(Reg));
1529 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM,
this, &DeadRemats);
1532 for (
unsigned i = 0;
i != UseBlocks.
size(); ++
i) {
1534 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1535 SE->splitSingleBlock(BI);
1543 SE->finish(&IntvMap);
1546 DebugVars->splitRegister(Reg, LREdit.
regs(), *LIS);
1548 ExtraRegInfo.
resize(
MRI->getNumVirtRegs());
1552 for (
unsigned i = 0, e = LREdit.
size();
i != e; ++
i) {
1554 if (getStage(LI) == RS_New && IntvMap[
i] == 0)
1555 setStage(LI, RS_Spill);
1559 MF->
verify(
this,
"After splitting live range around basic blocks");
1574 assert(SuperRC &&
"Invalid register class");
1596 if (!RegClassInfo.isProperSubClass(CurRC))
1601 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM,
this, &DeadRemats);
1602 SE->reset(LREdit, SplitEditor::SM_Size);
1605 if (Uses.
size() <= 1)
1608 DEBUG(
dbgs() <<
"Split around " << Uses.
size() <<
" individual instrs.\n");
1611 TRI->getLargestLegalSuperClass(CurRC, *MF);
1612 unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC);
1617 for (
unsigned i = 0;
i != Uses.
size(); ++
i) {
1618 if (
const MachineInstr *
MI = Indexes->getInstructionFromIndex(Uses[
i]))
1619 if (
MI->isFullCopy() ||
1620 SuperRCNumAllocatableRegs ==
1623 DEBUG(
dbgs() <<
" skip:\t" << Uses[i] <<
'\t' << *
MI);
1627 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
1628 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]);
1629 SE->useIntv(SegStart, SegStop);
1632 if (LREdit.
empty()) {
1633 DEBUG(
dbgs() <<
"All uses were copies.\n");
1638 SE->finish(&IntvMap);
1639 DebugVars->splitRegister(VirtReg.
reg, LREdit.
regs(), *LIS);
1640 ExtraRegInfo.
resize(
MRI->getNumVirtRegs());
1643 setStage(LREdit.
begin(), LREdit.
end(), RS_Spill);
1658 void RAGreedy::calcGapWeights(
unsigned PhysReg,
1660 assert(SA->getUseBlocks().size() == 1 &&
"Not a local interval");
1663 const unsigned NumGaps = Uses.
size()-1;
1667 BI.LiveIn ? BI.FirstInstr.
getBaseIndex() : BI.FirstInstr;
1671 GapWeight.
assign(NumGaps, 0.0f);
1675 if (!
Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
1676 .checkInterference())
1687 Matrix->getLiveUnions()[*Units] .find(StartIdx);
1688 for (
unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1690 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1691 if (++Gap == NumGaps)
1697 const float weight = IntI.value()->weight;
1698 for (; Gap != NumGaps; ++Gap) {
1699 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1700 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1710 const LiveRange &LR = LIS->getRegUnit(*Units);
1715 for (
unsigned Gap = 0; I != E && I->start < StopIdx; ++
I) {
1716 while (Uses[Gap+1].getBoundaryIndex() < I->start)
1717 if (++Gap == NumGaps)
1722 for (; Gap != NumGaps; ++Gap) {
1724 if (Uses[Gap+1].getBaseIndex() >= I->end)
1738 assert(SA->getUseBlocks().size() == 1 &&
"Not a local interval");
1749 if (Uses.
size() <= 2)
1751 const unsigned NumGaps = Uses.
size()-1;
1754 dbgs() <<
"tryLocalSplit: ";
1755 for (
unsigned i = 0, e = Uses.
size(); i != e; ++
i)
1756 dbgs() <<
' ' << Uses[
i];
1763 if (
Matrix->checkRegMaskInterference(VirtReg)) {
1768 unsigned ri = std::lower_bound(RMS.
begin(), RMS.
end(),
1770 unsigned re = RMS.
size();
1771 for (
unsigned i = 0; i != NumGaps && ri != re; ++
i) {
1773 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
1774 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
1778 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
1780 DEBUG(
dbgs() <<
' ' << RMS[ri] <<
':' << Uses[i] <<
'-' << Uses[i+1]);
1781 RegMaskGaps.push_back(i);
1784 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
1808 bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
1811 unsigned BestBefore = NumGaps;
1812 unsigned BestAfter = 0;
1815 const float blockFreq =
1816 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
1817 (1.0f / MBFI->getEntryFreq());
1821 while (
unsigned PhysReg = Order.
next()) {
1824 calcGapWeights(PhysReg, GapWeight);
1827 if (
Matrix->checkRegMaskInterference(VirtReg, PhysReg))
1828 for (
unsigned i = 0, e = RegMaskGaps.size(); i != e; ++
i)
1835 unsigned SplitBefore = 0, SplitAfter = 1;
1839 float MaxGap = GapWeight[0];
1843 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1844 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1847 << Uses[SplitBefore] <<
'-' << Uses[SplitAfter]
1848 <<
" i=" << MaxGap);
1851 if (!LiveBefore && !LiveAfter) {
1859 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1862 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1871 blockFreq * (NewGaps + 1),
1872 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1873 (LiveBefore + LiveAfter) * SlotIndex::InstrDist,
1880 float Diff = EstWeight - MaxGap;
1881 if (Diff > BestDiff) {
1884 BestBefore = SplitBefore;
1885 BestAfter = SplitAfter;
1892 if (++SplitBefore < SplitAfter) {
1895 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1896 MaxGap = GapWeight[SplitBefore];
1897 for (
unsigned i = SplitBefore + 1; i != SplitAfter; ++
i)
1898 MaxGap = std::max(MaxGap, GapWeight[i]);
1906 if (SplitAfter >= NumGaps) {
1912 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1917 if (BestBefore == NumGaps)
1920 DEBUG(
dbgs() <<
"Best local split range: " << Uses[BestBefore]
1921 <<
'-' << Uses[BestAfter] <<
", " << BestDiff
1922 <<
", " << (BestAfter - BestBefore + 1) <<
" instrs\n");
1924 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM,
this, &DeadRemats);
1928 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1929 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1930 SE->useIntv(SegStart, SegStop);
1932 SE->finish(&IntvMap);
1933 DebugVars->splitRegister(VirtReg.
reg, LREdit.
regs(), *LIS);
1938 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1939 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1940 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1941 if (NewGaps >= NumGaps) {
1942 DEBUG(
dbgs() <<
"Tagging non-progress ranges: ");
1943 assert(!ProgressRequired &&
"Didn't make progress when it was required.");
1944 for (
unsigned i = 0, e = IntvMap.
size(); i != e; ++
i)
1945 if (IntvMap[i] == 1) {
1946 setStage(LIS->getInterval(LREdit.
get(i)), RS_Split2);
1966 if (getStage(VirtReg) >= RS_Spill)
1970 if (LIS->intervalIsInOneMBB(VirtReg)) {
1973 SA->analyze(&VirtReg);
1974 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1975 if (PhysReg || !NewVRegs.
empty())
1977 return tryInstructionSplit(VirtReg, Order, NewVRegs);
1983 SA->analyze(&VirtReg);
1989 if (SA->didRepairRange()) {
1991 Matrix->invalidateVirtRegs();
1992 if (
unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1999 if (getStage(VirtReg) < RS_Split2) {
2000 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
2001 if (PhysReg || !NewVRegs.
empty())
2006 return tryBlockSplit(VirtReg, Order, NewVRegs);
2022 RAGreedy::mayRecolorAllInterferences(
unsigned PhysReg,
LiveInterval &VirtReg,
2023 SmallLISet &RecoloringCandidates,
2024 const SmallVirtRegSet &FixedRegisters) {
2033 DEBUG(
dbgs() <<
"Early abort: too many interferences.\n");
2034 CutOffInfo |= CO_Interf;
2041 if ((getStage(*Intf) == RS_Done &&
2042 MRI->getRegClass(Intf->
reg) == CurRC) ||
2043 FixedRegisters.count(Intf->
reg)) {
2044 DEBUG(
dbgs() <<
"Early abort: the inteference is not recolorable.\n");
2047 RecoloringCandidates.insert(Intf);
2092 unsigned RAGreedy::tryLastChanceRecoloring(
LiveInterval &VirtReg,
2095 SmallVirtRegSet &FixedRegisters,
2097 DEBUG(
dbgs() <<
"Try last chance recoloring for " << VirtReg <<
'\n');
2100 "Last chance recoloring should really be last chance");
2106 DEBUG(
dbgs() <<
"Abort because max depth has been reached.\n");
2107 CutOffInfo |= CO_Depth;
2112 SmallLISet RecoloringCandidates;
2122 while (
unsigned PhysReg = Order.
next()) {
2123 DEBUG(
dbgs() <<
"Try to assign: " << VirtReg <<
" to "
2124 <<
PrintReg(PhysReg, TRI) <<
'\n');
2125 RecoloringCandidates.clear();
2126 VirtRegToPhysReg.
clear();
2127 CurrentNewVRegs.
clear();
2130 if (
Matrix->checkInterference(VirtReg, PhysReg) >
2131 LiveRegMatrix::IK_VirtReg) {
2132 DEBUG(
dbgs() <<
"Some inteferences are not with virtual registers.\n");
2139 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
2141 DEBUG(
dbgs() <<
"Some inteferences cannot be recolored.\n");
2148 PQueue RecoloringQueue;
2149 for (SmallLISet::iterator It = RecoloringCandidates.begin(),
2150 EndIt = RecoloringCandidates.end();
2151 It != EndIt; ++It) {
2152 unsigned ItVirtReg = (*It)->reg;
2153 enqueue(RecoloringQueue, *It);
2154 assert(VRM->hasPhys(ItVirtReg) &&
2155 "Interferences are supposed to be with allocated vairables");
2158 VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg);
2166 Matrix->assign(VirtReg, PhysReg);
2171 SmallVirtRegSet SaveFixedRegisters(FixedRegisters);
2172 if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs,
2173 FixedRegisters, Depth)) {
2175 for (
unsigned NewVReg : CurrentNewVRegs)
2179 Matrix->unassign(VirtReg);
2183 DEBUG(
dbgs() <<
"Fail to assign: " << VirtReg <<
" to "
2184 <<
PrintReg(PhysReg, TRI) <<
'\n');
2187 FixedRegisters = SaveFixedRegisters;
2188 Matrix->unassign(VirtReg);
2195 End = CurrentNewVRegs.end();
2196 Next !=
End; ++Next) {
2197 if (RecoloringCandidates.count(&LIS->getInterval(*Next)))
2202 for (SmallLISet::iterator It = RecoloringCandidates.begin(),
2203 EndIt = RecoloringCandidates.end();
2204 It != EndIt; ++It) {
2205 unsigned ItVirtReg = (*It)->reg;
2206 if (VRM->hasPhys(ItVirtReg))
2208 unsigned ItPhysReg = VirtRegToPhysReg[ItVirtReg];
2209 Matrix->assign(**It, ItPhysReg);
2225 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
2227 SmallVirtRegSet &FixedRegisters,
2229 while (!RecoloringQueue.empty()) {
2231 DEBUG(
dbgs() <<
"Try to recolor: " << *LI <<
'\n');
2233 PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1);
2238 if (PhysReg == ~0u || (!PhysReg && !LI->
empty()))
2242 assert(LI->
empty() &&
"Only empty live-range do not require a register");
2243 DEBUG(
dbgs() <<
"Recoloring of " << *LI <<
" succeeded. Empty LI.\n");
2247 <<
" succeeded with: " <<
PrintReg(PhysReg, TRI) <<
'\n');
2249 Matrix->assign(*LI, PhysReg);
2250 FixedRegisters.insert(LI->
reg);
2259 unsigned RAGreedy::selectOrSplit(
LiveInterval &VirtReg,
2261 CutOffInfo = CO_None;
2262 LLVMContext &Ctx = MF->getFunction()->getContext();
2263 SmallVirtRegSet FixedRegisters;
2264 unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
2265 if (Reg == ~0U && (CutOffInfo != CO_None)) {
2266 uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
2267 if (CutOffEncountered == CO_Depth)
2268 Ctx.
emitError(
"register allocation failed: maximum depth for recoloring "
2269 "reached. Use -fexhaustive-register-search to skip "
2271 else if (CutOffEncountered == CO_Interf)
2272 Ctx.
emitError(
"register allocation failed: maximum interference for "
2273 "recoloring reached. Use -fexhaustive-register-search "
2275 else if (CutOffEncountered == (CO_Depth | CO_Interf))
2276 Ctx.
emitError(
"register allocation failed: maximum interference and "
2277 "depth for recoloring reached. Use "
2278 "-fexhaustive-register-search to skip cutoffs");
2289 unsigned RAGreedy::tryAssignCSRFirstTime(
LiveInterval &VirtReg,
2292 unsigned &CostPerUseLimit,
2294 if (getStage(VirtReg) == RS_Spill && VirtReg.
isSpillable()) {
2297 SA->analyze(&VirtReg);
2298 if (calcSpillCost() >= CSRCost)
2303 CostPerUseLimit = 1;
2306 if (getStage(VirtReg) < RS_Split) {
2309 SA->analyze(&VirtReg);
2310 unsigned NumCands = 0;
2312 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
2319 doRegionSplit(VirtReg, BestCand,
false, NewVRegs);
2325 void RAGreedy::aboutToRemoveInterval(
LiveInterval &LI) {
2327 SetOfBrokenHints.remove(&LI);
2330 void RAGreedy::initializeCSRCost() {
2335 if (!CSRCost.getFrequency())
2339 uint64_t ActualEntry = MBFI->getEntryFreq();
2344 uint64_t FixedEntry = 1 << 14;
2345 if (ActualEntry < FixedEntry)
2347 else if (ActualEntry <= UINT32_MAX)
2352 CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry);
2358 void RAGreedy::collectHintInfo(
unsigned Reg, HintsInfo &Out) {
2360 if (!Instr.isFullCopy())
2363 unsigned OtherReg = Instr.getOperand(0).getReg();
2364 if (OtherReg == Reg) {
2365 OtherReg = Instr.getOperand(1).getReg();
2366 if (OtherReg == Reg)
2370 unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
2372 : VRM->getPhys(OtherReg);
2374 Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg,
2385 for (
const HintInfo &Info : List) {
2386 if (Info.PhysReg != PhysReg)
2400 void RAGreedy::tryHintRecoloring(
LiveInterval &VirtReg) {
2407 unsigned Reg = VirtReg.
reg;
2408 unsigned PhysReg = VRM->getPhys(Reg);
2415 <<
PrintReg(PhysReg, TRI) <<
")\n");
2421 if (TargetRegisterInfo::isPhysicalRegister(Reg))
2424 assert(VRM->hasPhys(Reg) &&
"We have unallocated variable!!");
2429 unsigned CurrPhys = VRM->getPhys(Reg);
2432 if (CurrPhys != PhysReg && (!
MRI->getRegClass(Reg)->contains(PhysReg) ||
2433 Matrix->checkInterference(LI, PhysReg)))
2437 <<
") is recolorable.\n");
2441 collectHintInfo(Reg, Info);
2444 if (CurrPhys != PhysReg) {
2445 DEBUG(
dbgs() <<
"Checking profitability:\n");
2446 BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys);
2449 <<
"\nNew Cost: " << NewCopiesCost.
getFrequency() <<
'\n');
2450 if (OldCopiesCost < NewCopiesCost) {
2451 DEBUG(
dbgs() <<
"=> Not profitable.\n");
2460 Matrix->assign(LI, PhysReg);
2464 for (
const HintInfo &
HI : Info) {
2468 }
while (!RecoloringCandidates.
empty());
2507 void RAGreedy::tryHintsRecoloring() {
2509 assert(TargetRegisterInfo::isVirtualRegister(LI->reg) &&
2510 "Recoloring is possible only for virtual registers");
2513 if (!VRM->hasPhys(LI->reg))
2515 tryHintRecoloring(*LI);
2519 unsigned RAGreedy::selectOrSplitImpl(
LiveInterval &VirtReg,
2521 SmallVirtRegSet &FixedRegisters,
2523 unsigned CostPerUseLimit = ~0u;
2526 if (
unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) {
2530 if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) &&
2532 unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
2533 CostPerUseLimit, NewVRegs);
2534 if (CSRReg || !NewVRegs.
empty())
2542 LiveRangeStage Stage = getStage(VirtReg);
2544 <<
" Cascade " << ExtraRegInfo[VirtReg.
reg].Cascade <<
'\n');
2549 if (Stage != RS_Split)
2550 if (
unsigned PhysReg =
2551 tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit)) {
2552 unsigned Hint =
MRI->getSimpleHint(VirtReg.
reg);
2558 if (Hint && Hint != PhysReg)
2559 SetOfBrokenHints.insert(&VirtReg);
2563 assert((NewVRegs.
empty() ||
Depth) &&
"Cannot append to existing NewVRegs");
2568 if (Stage < RS_Split) {
2569 setStage(VirtReg, RS_Split);
2570 DEBUG(
dbgs() <<
"wait for second round\n");
2575 if (Stage < RS_Spill) {
2577 unsigned NewVRegSizeBefore = NewVRegs.
size();
2578 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
2579 if (PhysReg || (NewVRegs.
size() - NewVRegSizeBefore))
2586 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
2595 setStage(VirtReg, RS_Memory);
2596 DEBUG(
dbgs() <<
"Do as if this register is in memory\n");
2601 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM,
this, &DeadRemats);
2602 spiller().spill(LRE);
2603 setStage(NewVRegs.
begin(), NewVRegs.
end(), RS_Done);
2606 MF->verify(
this,
"After spilling");
2615 DEBUG(
dbgs() <<
"********** GREEDY REGISTER ALLOCATION **********\n"
2616 <<
"********** Function: " << mf.
getName() <<
'\n');
2620 TII = MF->getSubtarget().getInstrInfo();
2621 RCI.runOnMachineFunction(mf);
2624 MF->getSubtarget().enableRALocalReassignment(
2625 MF->getTarget().getOptLevel());
2628 MF->verify(
this,
"Before greedy register allocator");
2631 getAnalysis<LiveIntervals>(),
2632 getAnalysis<LiveRegMatrix>());
2633 Indexes = &getAnalysis<SlotIndexes>();
2634 MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
2635 DomTree = &getAnalysis<MachineDominatorTree>();
2637 Loops = &getAnalysis<MachineLoopInfo>();
2638 Bundles = &getAnalysis<EdgeBundles>();
2639 SpillPlacer = &getAnalysis<SpillPlacement>();
2640 DebugVars = &getAnalysis<LiveDebugVariables>();
2641 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2643 initializeCSRCost();
2650 SE.reset(
new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI));
2651 ExtraRegInfo.clear();
2652 ExtraRegInfo.resize(
MRI->getNumVirtRegs());
2654 IntfCache.init(MF,
Matrix->getLiveUnions(), Indexes, LIS, TRI);
2655 GlobalCand.resize(32);
2656 SetOfBrokenHints.clear();
2659 tryHintsRecoloring();
bool seenAllInterferences() const
void push_back(const T &Elt)
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void rewind()
Start over from the beginning.
STATISTIC(NumFunctions,"Total number of functions")
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
int getInstrDistance(SlotIndex other) const
Return the scaled distance from this index to the given one, where all slots on the same instruction ...
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
const T & front() const
front - Get the first element.
LiveInterval - This class represents the liveness of a register, or stack slot.
ArrayRef< MCPhysReg > getOrder() const
Get the allocation order without reordered hints.
bool isSpillable() const
isSpillable - Can this interval be spilled?
SM_Speed - Overlap intervals to minimize the expected execution frequency of the inserted copies...
SlotIndex FirstDef
First non-phi valno->def, or SlotIndex().
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
unsigned next(unsigned Limit=0)
Return the next physical register in the allocation order, or 0.
unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const
getNumAllocatableRegs - Returns the number of actually allocatable registers in RC in the current fun...
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
char & RAGreedyID
Greedy register allocator.
Callback methods for LiveRangeEdit owners.
This class represents the liveness of a register, stack slot, etc.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Query interferences between a single live virtual register and a live interval union.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
static cl::opt< unsigned > LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden, cl::desc("Last chance recoloring max depth"), cl::init(5))
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
This class is basically a combination of TimeRegion and Timer.
Reg
All possible values of the reg field in the ModR/M byte.
unsigned getSize() const
getSize - Returns the sum of sizes of all the LiveRange's.
RegAllocBase provides the register allocation driver and interface that can be extended to add intere...
LLVM_NODISCARD bool empty() const
BorderConstraint Exit
Constraint on block exit.
void assign(size_type NumElts, const T &Elt)
void emitError(unsigned LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
const SmallVectorImpl< LiveInterval * > & interferingVRegs() const
SplitEditor - Edit machine code and LiveIntervals for live range splitting.
const float huge_valf
Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
RegisterRegAlloc class - Track the registration of register allocators.
BlockConstraint - Entry and exit constraints for a basic block.
Printable PrintReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubRegIdx=0)
Prints virtual and physical registers with or without a TRI instance.
size_t size() const
size - Get the array size.
SlotIndex LastInstr
Last instr accessing current reg.
unsigned get(unsigned idx) const
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
TargetInstrInfo - Interface to description of machine instruction set.
static cl::opt< unsigned > CSRFirstTimeCost("regalloc-csr-first-time-cost", cl::desc("Cost for first time use of callee-saved register."), cl::init(0), cl::Hidden)
static cl::opt< SplitEditor::ComplementSpillMode > SplitSpillMode("split-spill-mode", cl::Hidden, cl::desc("Spill mode for splitting live ranges"), cl::values(clEnumValN(SplitEditor::SM_Partition,"default","Default"), clEnumValN(SplitEditor::SM_Size,"size","Optimize for size"), clEnumValN(SplitEditor::SM_Speed,"speed","Optimize for speed")), cl::init(SplitEditor::SM_Speed))
ValuesClass values(OptsTy...Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
SlotIndex last()
last - Return the ending index of the last interfering range in the current block.
initializer< Ty > init(const Ty &Val)
const uint8_t AllocationPriority
Classes with a higher priority value are assigned first by register allocators using a greedy heurist...
bool isValid() const
Returns true if this is a valid index.
unsigned const MachineRegisterInfo * MRI
This is an important class for using LLVM in a threaded context.
Cursor - The primary query interface for the block interference cache.
SM_Partition(Default) - Try to create the complement interval so it doesn't overlap any other interva...
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
SM_Size - Overlap intervals to minimize the number of inserted COPY instructions. ...
INITIALIZE_PASS_BEGIN(RAGreedy,"greedy","Greedy Register Allocator", false, false) INITIALIZE_PASS_END(RAGreedy
Represent the analysis usage information of a pass.
Greedy Register Allocator
static unsigned getNumAllocatableRegsForConstraints(const MachineInstr *MI, unsigned Reg, const TargetRegisterClass *SuperRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, const RegisterClassInfo &RCI)
Get the number of allocatable registers that match the constraints of Reg on MI and that are also in ...
unsigned Number
Basic block number (from MBB::getNumber()).
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE,"Assign register bank of generic virtual registers", false, false) RegBankSelect
void resize(typename StorageT::size_type s)
static const unsigned End
FunctionPass class - This class is used to implement most global optimizations.
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
BorderConstraint Entry
Constraint on block entry.
static cl::opt< bool > EnableLocalReassignment("enable-local-reassign", cl::Hidden, cl::desc("Local reassignment can yield better allocation decisions, but ""may be compile time intensive"), cl::init(false))
const T & back() const
back - Get the last element.
iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
static cl::opt< unsigned > LastChanceRecoloringMaxInterference("lcr-max-interf", cl::Hidden, cl::desc("Last chance recoloring maximum number of considered"" interference at a time"), cl::init(8))
A SetVector that performs no allocations if smaller than a certain size.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
void calculateSpillWeightsAndHints(LiveIntervals &LIS, MachineFunction &MF, VirtRegMap *VRM, const MachineLoopInfo &MLI, const MachineBlockFrequencyInfo &MBFI, VirtRegAuxInfo::NormalizingFn norm=normalizeSpillWeight)
Compute spill weights and allocation hints for all virtual register live intervals.
SlotIndex FirstInstr
First instr accessing current reg.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
bool test(unsigned Idx) const
Additional information about basic blocks where the current variable is live.
Promote Memory to Register
SplitAnalysis - Analyze a LiveInterval, looking for live range splitting opportunities.
LLVM_NODISCARD T pop_back_val()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
bool hasInterference()
hasInterference - Return true if the current block has any interference.
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static RegisterRegAlloc greedyRegAlloc("greedy","greedy register allocator", createGreedyRegisterAllocator)
unsigned getNumRegs() const
Return the number of registers in this class.
LiveSegments::iterator SegmentIter
bool LiveOut
Current reg is live out.
void moveToBlock(unsigned MBBNum)
moveTo - Move cursor to basic block MBBNum.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
MachineFunctionProperties & set(Property P)
Spiller * createInlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
SlotIndex beginIndex() const
beginIndex - Return the lowest numbered slot covered.
Representation of each machine instruction.
ArrayRef< unsigned > regs() const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
SlotIndex endIndex() const
endNumber - return the maximum point of the range of the whole, exclusive.
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
unsigned collectInterferingVRegs(unsigned MaxInterferingRegs=UINT_MAX)
const TargetRegisterClass * getRegClassConstraintEffectForVReg(unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ExploreBundle=false) const
Applies the constraints (def/use) implied by this MI on Reg to the given CurRC.
void verify(const MachineRegisterInfo *MRI=nullptr) const
Walks the interval and assert if any invariants fail to hold.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def...
static cl::opt< bool > EnableDeferredSpilling("enable-deferred-spilling", cl::Hidden, cl::desc("Instead of spilling a variable right away, defer the actual ""code insertion to the end of the allocation. That way the ""allocator might still find a suitable coloring for this ""variable because of other evicted variables."), cl::init(false))
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static float normalizeSpillWeight(float UseDefFreq, unsigned Size, unsigned NumInstr)
Normalize the spill weight of a live interval.
bool operator<(int64_t V1, const APSInt &V2)
bool isHint() const
Return true if the last register returned from next() was a preferred register.
static cl::opt< bool > ExhaustiveSearch("exhaustive-register-search", cl::NotHidden, cl::desc("Exhaustive Search for registers bypassing the depth ""and interference cutoffs of last chance recoloring"))
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
StringRef - Represent a constant reference to a string, i.e.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
bool TimePassesIsEnabled
If the user specifies the -time-passes argument on an LLVM tool command line then the value of this b...
bool LiveIn
Current reg is live in.
SlotIndex - An opaque wrapper around machine indexes.
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
SlotIndex first()
first - Return the starting index of the first interfering range in the current block.
Properties which a MachineFunction may have at a given point in time.
bool ChangesValue
True when this block changes the value of the live range.