46#include "llvm/Config/llvm-config.h"
61#define DEBUG_TYPE "regalloc"
63STATISTIC(NumSpilledRanges,
"Number of spilled live ranges");
64STATISTIC(NumSnippets,
"Number of spilled snippets");
66STATISTIC(NumSpillsRemoved,
"Number of spills removed");
68STATISTIC(NumReloadsRemoved,
"Number of reloads removed");
69STATISTIC(NumFolded,
"Number of folded stack accesses");
71STATISTIC(NumRemats,
"Number of rematerialized defs for spilling");
76 cl::desc(
"Restrict remat for statepoint operands"));
101 using MergeableSpillsMap =
103 MergeableSpillsMap MergeableSpills;
113 void rmRedundantSpills(
133 : MF(mf), LIS(Analyses.LIS), LSS(Analyses.LSS), MDT(Analyses.MDT),
134 VRM(vrm),
MRI(mf.getRegInfo()),
TII(*mf.getSubtarget().getInstrInfo()),
135 TRI(*mf.getSubtarget().getRegisterInfo()), MBFI(Analyses.MBFI),
136 IPA(LIS, mf.getNumBlockIDs()) {}
138 void addToMergeableSpills(MachineInstr &Spill,
int StackSlot,
140 bool rmFromMergeableSpills(MachineInstr &Spill,
int StackSlot);
141 void hoistAllSpills();
145class InlineSpiller :
public Spiller {
150 MachineRegisterInfo &MRI;
151 const TargetInstrInfo &TII;
152 const TargetRegisterInfo &TRI;
153 LiveRegMatrix *Matrix =
nullptr;
156 LiveRangeEdit *Edit =
nullptr;
157 LiveInterval *StackInt =
nullptr;
160 AllocationOrder *Order =
nullptr;
172 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
175 SmallPtrSet<VNInfo*, 8> UsedValues;
178 SmallVector<MachineInstr*, 8> DeadDefs;
181 HoistSpillHelper HSpiller;
184 VirtRegAuxInfo &VRAI;
186 ~InlineSpiller()
override =
default;
189 InlineSpiller(
const Spiller::RequiredAnalyses &Analyses, MachineFunction &MF,
190 VirtRegMap &VRM, VirtRegAuxInfo &VRAI, LiveRegMatrix *Matrix)
191 : MF(MF), LIS(Analyses.LIS), LSS(Analyses.LSS), VRM(VRM),
192 MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
193 TRI(*MF.getSubtarget().getRegisterInfo()), Matrix(Matrix),
194 HSpiller(Analyses, MF, VRM), VRAI(VRAI) {}
196 void spill(LiveRangeEdit &, AllocationOrder *Order =
nullptr)
override;
199 void postOptimization()
override;
202 bool isSnippet(
const LiveInterval &SnipLI);
203 void collectRegsToSpill();
208 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
209 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
211 void markValueUsed(LiveInterval*, VNInfo*);
212 bool canGuaranteeAssignmentAfterRemat(
Register VReg, MachineInstr &
MI);
213 bool hasPhysRegAvailable(
const MachineInstr &
MI);
214 bool reMaterializeFor(LiveInterval &, MachineInstr &
MI);
215 void reMaterializeAll();
218 bool foldMemoryOperand(
ArrayRef<std::pair<MachineInstr *, unsigned>>,
219 MachineInstr *LoadMI =
nullptr);
231void Spiller::anchor() {}
237 return new InlineSpiller(Analyses, MF, VRM, VRAI,
Matrix);
256 if (!
TII.isCopyInstr(
MI))
279 "expected to see first instruction in bundle");
283 while (
I->isBundledWithSucc()) {
285 auto CopyInst =
TII.isCopyInstr(
MI);
311 if (MO.getReg().isVirtual())
318bool InlineSpiller::isSnippet(
const LiveInterval &SnipLI) {
332 if (!LIS.intervalIsInOneMBB(SnipLI))
338 for (
auto *VNI : SnipLI.
vnis()) {
340 if (
MI->getOpcode() == TargetOpcode::STATEPOINT)
350 RI =
MRI.reg_bundle_nodbg_begin(SnipLI.
reg()),
351 E =
MRI.reg_bundle_nodbg_end();
381void InlineSpiller::collectRegsToSpill() {
385 RegsToSpill.assign(1,
Reg);
386 SnippetCopies.clear();
387 RegsReplaced.clear();
396 if (!isSibling(SnipReg))
399 if (!isSnippet(SnipLI))
401 SnippetCopies.insert(&
MI);
402 if (isRegToSpill(SnipReg))
404 RegsToSpill.push_back(SnipReg);
433bool InlineSpiller::hoistSpillInsideBB(
LiveInterval &SpillLI,
435 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
452 assert(StackInt &&
"No stack slot assigned yet.");
455 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
457 << *StackInt <<
'\n');
461 eliminateRedundantSpills(SrcLI, SrcVNI);
469 assert(
DefMI &&
"Defining instruction disappeared");
477 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
486 if (MIS.begin() == MII)
487 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
495 assert(VNI &&
"Missing value");
497 WorkList.
push_back(std::make_pair(&SLI, VNI));
498 assert(StackInt &&
"No stack slot assigned yet.");
505 << VNI->
def <<
" in " << *LI <<
'\n');
508 if (isRegToSpill(
Reg))
512 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
513 LLVM_DEBUG(
dbgs() <<
"Merged to stack int: " << *StackInt <<
'\n');
518 if (!
MI.mayStore() && !
TII.isCopyInstr(
MI))
526 if (isSibling(DstReg)) {
529 assert(DstVNI &&
"Missing defined value");
532 WorkList.
push_back(std::make_pair(&DstLI, DstVNI));
542 MI.setDesc(
TII.get(TargetOpcode::KILL));
543 DeadDefs.push_back(&
MI);
545 if (HSpiller.rmFromMergeableSpills(
MI, StackSlot))
549 }
while (!WorkList.
empty());
560 WorkList.
push_back(std::make_pair(LI, VNI));
563 if (!UsedValues.insert(VNI).second)
571 WorkList.
push_back(std::make_pair(LI, PVNI));
578 if (!SnippetCopies.count(
MI))
580 LiveInterval &SnipLI = LIS.getInterval(
MI->getOperand(1).getReg());
581 assert(isRegToSpill(SnipLI.
reg()) &&
"Unexpected register in copy");
583 assert(SnipVNI &&
"Snippet undefined before copy");
584 WorkList.
push_back(std::make_pair(&SnipLI, SnipVNI));
585 }
while (!WorkList.
empty());
588bool InlineSpiller::canGuaranteeAssignmentAfterRemat(
Register VReg,
607 if (
MI.getOpcode() != TargetOpcode::STATEPOINT)
613 EndIdx =
MI.getNumOperands();
614 Idx < EndIdx; ++Idx) {
632 if (!
Matrix->checkInterference(PrevIdx, UseIdx, PhysReg))
666 if (SnippetCopies.count(&
MI)) {
667 LLVM_DEBUG(
dbgs() <<
"\tskipping remat snippet copy for " << UseIdx <<
'\t'
674 assert(OrigVNI &&
"corrupted sub-interval");
681 markValueUsed(&VirtReg, ParentVNI);
682 LLVM_DEBUG(
dbgs() <<
"\tcannot remat missing def for " << UseIdx <<
'\t'
689 if (!Edit->canRematerializeAt(RM, UseIdx)) {
690 markValueUsed(&VirtReg, ParentVNI);
698 markValueUsed(&VirtReg, ParentVNI);
705 if (
RM.OrigMI->canFoldAsLoad() &&
706 (
RM.OrigMI->mayLoad() || !hasPhysRegAvailable(
MI)) &&
707 foldMemoryOperand(
Ops,
RM.OrigMI)) {
708 Edit->markRematerialized(
RM.ParentVNI);
715 if (!canGuaranteeAssignmentAfterRemat(VirtReg.
reg(),
MI)) {
716 markValueUsed(&VirtReg, ParentVNI);
722 Register NewVReg = Edit->createFrom(Original);
725 MRI.constrainRegClass(NewVReg,
MRI.getRegClass(VirtReg.
reg()));
729 Edit->rematerializeAt(*
MI.getParent(),
MI, NewVReg, RM,
TRI);
733 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
734 NewMI->setDebugLoc(
MI.getDebugLoc());
738 << *LIS.getInstructionFromIndex(DefIdx));
741 for (
const auto &OpPair :
Ops) {
756void InlineSpiller::reMaterializeAll() {
760 bool anyRemat =
false;
765 if (
MI.isDebugValue())
768 assert(!
MI.isDebugInstr() &&
"Did not expect to find a use in debug "
769 "instruction that isn't a DBG_VALUE");
771 anyRemat |= reMaterializeFor(LI,
MI);
785 if (!
MI->allDefsAreDead())
788 DeadDefs.push_back(
MI);
792 if (
MI->isBundledWithSucc() && !
MI->isBundledWithPred()) {
794 EndIt =
MI->getParent()->instr_end();
797 bool OnlyDeadCopies =
true;
799 It != EndIt && It->isBundledWithPred(); ++It) {
801 auto DestSrc =
TII.isCopyInstr(*It);
802 bool IsCopyToDeadReg =
803 DestSrc && DestSrc->Destination->getReg() ==
Reg;
804 if (!IsCopyToDeadReg) {
805 OnlyDeadCopies =
false;
809 if (OnlyDeadCopies) {
811 It != EndIt && It->isBundledWithPred(); ++It) {
812 It->addRegisterDead(
Reg, &
TRI);
814 DeadDefs.push_back(&*It);
823 if (DeadDefs.empty())
825 LLVM_DEBUG(
dbgs() <<
"Remat created " << DeadDefs.size() <<
" dead defs.\n");
826 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
834 unsigned ResultPos = 0;
836 if (
MRI.reg_nodbg_empty(
Reg)) {
837 Edit->eraseVirtReg(
Reg);
838 RegsReplaced.push_back(
Reg);
843 (!LIS.getInterval(
Reg).empty() || !
MRI.reg_nodbg_empty(
Reg)) &&
844 "Empty and not used live-range?!");
846 RegsToSpill[ResultPos++] =
Reg;
848 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
850 <<
" registers to spill after remat.\n");
861 bool IsLoad = InstrReg.
isValid();
866 if (InstrReg !=
Reg || FI != StackSlot)
870 HSpiller.rmFromMergeableSpills(*
MI, StackSlot);
873 LIS.RemoveMachineInstrFromMaps(*
MI);
874 MI->eraseFromParent();
887#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
893 const char *
const header,
895 char NextLine =
'\n';
896 char SlotIndent =
'\t';
898 if (std::next(
B) ==
E) {
903 dbgs() <<
'\t' << header <<
": " << NextLine;
917 dbgs() << SlotIndent << Idx <<
'\t' << *
I;
929foldMemoryOperand(
ArrayRef<std::pair<MachineInstr *, unsigned>>
Ops,
935 if (
Ops.back().first !=
MI ||
MI->isBundled())
938 bool WasCopy =
TII.isCopyInstr(*MI).has_value();
947 bool UntieRegs =
MI->getOpcode() == TargetOpcode::STATEPOINT;
951 bool SpillSubRegs =
TII.isSubregFoldable() ||
952 MI->getOpcode() == TargetOpcode::STATEPOINT ||
953 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
954 MI->getOpcode() == TargetOpcode::STACKMAP;
959 for (
const auto &OpPair :
Ops) {
960 unsigned Idx = OpPair.second;
961 assert(
MI == OpPair.first &&
"Instruction conflict during operand folding");
978 if (LoadMI && MO.
isDef())
981 if (UntieRegs || !
MI->isRegTiedToDefOperand(Idx))
994 for (
unsigned Idx : FoldOps) {
998 unsigned Tied =
MI->findTiedOperandIdx(Idx);
1005 MI->untieRegOperand(Idx);
1009 LoadMI ?
TII.foldMemoryOperand(*
MI, FoldOps, *LoadMI, &LIS)
1010 :
TII.foldMemoryOperand(*
MI, FoldOps, StackSlot, &LIS, &VRM);
1013 for (
auto Tied : TiedOps)
1014 MI->tieOperands(Tied.first, Tied.second);
1040 HSpiller.rmFromMergeableSpills(*
MI, FI))
1044 if (
MI->isCandidateForAdditionalCallInfo())
1045 MI->getMF()->moveAdditionalCallInfo(
MI, FoldMI);
1052 if (
MI->peekDebugInstrNum() &&
Ops[0].second == 0) {
1054 auto MakeSubstitution = [
this,FoldMI,
MI,&
Ops]() {
1056 unsigned OldOperandNum =
Ops[0].second;
1058 unsigned OldNum =
MI->getDebugInstrNum();
1059 MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
1064 if (
Ops.size() == 1 && Op0.
isDef()) {
1066 }
else if (
Ops.size() == 2 && Op0.
isDef() &&
MI->getOperand(1).isTied() &&
1067 Op0.
getReg() ==
MI->getOperand(1).getReg()) {
1070 }
else if (
MI->peekDebugInstrNum()) {
1076 MF.substituteDebugValuesForInst(*
MI, *FoldMI,
Ops[0].second);
1079 MI->eraseFromParent();
1082 assert(!MIS.empty() &&
"Unexpected empty span of instructions!");
1094 if (MO.
getReg() == ImpReg)
1103 else if (
Ops.front().second == 0) {
1108 if (std::distance(MIS.begin(), MIS.end()) <= 1)
1109 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
1115void InlineSpiller::insertReload(
Register NewVReg,
1135 if (!Def.isImplicitDef())
1141 return Def.getOperand(0).getSubReg();
1145void InlineSpiller::insertSpill(
Register NewVReg,
bool isKill,
1149 assert(!
MI->isTerminator() &&
"Inserting a spill after a terminator");
1164 BuildMI(
MBB, SpillBefore,
MI->getDebugLoc(),
TII.get(TargetOpcode::KILL))
1178 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1179 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1183void InlineSpiller::spillAroundUses(
Register Reg) {
1190 if (
MI.isDebugValue()) {
1199 assert(!
MI.isDebugInstr() &&
"Did not expect to find a use in debug "
1200 "instruction that isn't a DBG_VALUE");
1203 if (SnippetCopies.count(&
MI))
1207 if (coalesceStackAccess(&
MI,
Reg))
1223 if (SibReg && isSibling(SibReg)) {
1225 if (isRegToSpill(SibReg)) {
1227 SnippetCopies.insert(&
MI);
1231 if (hoistSpillInsideBB(OldLI,
MI)) {
1233 MI.getOperand(0).setIsDead();
1234 DeadDefs.push_back(&
MI);
1240 eliminateRedundantSpills(SibLI, SibLI.
getVNInfoAt(Idx));
1246 if (foldMemoryOperand(
Ops))
1254 insertReload(NewVReg, Idx, &
MI);
1257 bool hasLiveDef =
false;
1258 for (
const auto &OpPair :
Ops) {
1262 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1274 insertSpill(NewVReg,
true, &
MI);
1279void InlineSpiller::spillAll() {
1282 StackSlot = VRM.assignVirt2StackSlot(Original);
1283 StackInt = &LSS.getOrCreateInterval(StackSlot,
MRI.getRegClass(Original));
1284 StackInt->getNextValue(
SlotIndex(), LSS.getVNInfoAllocator());
1286 StackInt = &LSS.getInterval(StackSlot);
1288 if (Original != Edit->getReg())
1289 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1291 assert(StackInt->getNumValNums() == 1 &&
"Bad stack interval values");
1295 LLVM_DEBUG(
dbgs() <<
"Merged spilled regs: " << *StackInt <<
'\n');
1299 spillAroundUses(
Reg);
1303 VRM.assignVirt2StackSlot(
Reg, StackSlot);
1307 if (!DeadDefs.empty()) {
1308 LLVM_DEBUG(
dbgs() <<
"Eliminating " << DeadDefs.size() <<
" dead defs\n");
1309 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
1316 assert(SnippetCopies.count(&
MI) &&
"Remaining use wasn't a snippet copy");
1319 MI.eraseFromBundle();
1325 Edit->eraseVirtReg(
Reg);
1334 Original = VRM.getOriginal(edit.
getReg());
1335 StackSlot = VRM.getStackSlot(Original);
1340 <<
':' << edit.
getParent() <<
"\nFrom original "
1343 "Attempting to spill already spilled value.");
1344 assert(DeadDefs.empty() &&
"Previous spill didn't remove dead defs");
1346 collectRegsToSpill();
1350 if (!RegsToSpill.empty())
1353 Edit->calculateRegClassAndHint(MF, VRAI);
1357void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1360void HoistSpillHelper::addToMergeableSpills(
MachineInstr &Spill,
int StackSlot,
1367 auto [Place,
Inserted] = StackSlotToOrigLI.try_emplace(StackSlot);
1369 auto LI = std::make_unique<LiveInterval>(OrigLI.
reg(), OrigLI.
weight());
1371 Place->second = std::move(LI);
1376 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1377 MergeableSpills[MIdx].insert(&Spill);
1382bool HoistSpillHelper::rmFromMergeableSpills(
MachineInstr &Spill,
1384 auto It = StackSlotToOrigLI.find(StackSlot);
1385 if (It == StackSlotToOrigLI.end())
1389 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1390 return MergeableSpills[MIdx].erase(&Spill);
1397 SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1400 if (Idx < OrigVNI.
def) {
1403 LLVM_DEBUG(
dbgs() <<
"can't spill in root block - def after LIP\n");
1410 for (
const Register &SibReg : Siblings) {
1423void HoistSpillHelper::rmRedundantSpills(
1430 for (
auto *
const CurrentSpill : Spills) {
1437 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1438 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1440 SpillBBToSpill[MDT.getNode(
Block)] = SpillToKeep;
1442 SpillBBToSpill[MDT.getNode(
Block)] = CurrentSpill;
1445 for (
auto *
const SpillToRm : SpillsToRm)
1446 Spills.erase(SpillToRm);
1455void HoistSpillHelper::getVisitOrders(
1479 for (
auto *
const Spill : Spills) {
1483 while (Node != RootIDomNode) {
1486 if (Node != MDT[
Block] && SpillBBToSpill[Node]) {
1487 SpillToRm = SpillBBToSpill[MDT[
Block]];
1492 }
else if (WorkSet.
count(Node)) {
1495 NodesOnPath.
insert(Node);
1510 NodesOnPath.
clear();
1520 if (WorkSet.
count(Child))
1523 }
while (idx != Orders.
size());
1525 "Orders have different size with WorkSet");
1530 for (; RIt != Orders.
rend(); RIt++)
1531 LLVM_DEBUG(
dbgs() <<
"BB" << (*RIt)->getBlock()->getNumber() <<
",");
1539void HoistSpillHelper::runHoistSpills(
1555 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1558 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1565 using NodesCostPair =
1566 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>,
BlockFrequency>;
1574 for (; RIt != Orders.
rend(); RIt++) {
1578 if (
auto It = SpillsToKeep.
find(*RIt);
1579 It != SpillsToKeep.
end() && !It->second) {
1580 auto &SIt = SpillsInSubTreeMap[*RIt];
1583 SIt.second = MBFI.getBlockFreq(
Block);
1590 if (!SpillsInSubTreeMap.
contains(Child))
1598 auto &[SpillsInSubTree, SubTreeCost] = SpillsInSubTreeMap[*RIt];
1599 auto ChildIt = SpillsInSubTreeMap.
find(Child);
1600 SubTreeCost += ChildIt->second.second;
1601 auto BI = ChildIt->second.first.begin();
1602 auto EI = ChildIt->second.first.end();
1603 SpillsInSubTree.insert(BI, EI);
1604 SpillsInSubTreeMap.
erase(ChildIt);
1607 auto &[SpillsInSubTree, SubTreeCost] = SpillsInSubTreeMap[*RIt];
1609 if (SpillsInSubTree.empty())
1614 if (!isSpillCandBB(OrigLI, OrigVNI, *
Block, LiveReg))
1622 if (SubTreeCost > MBFI.getBlockFreq(
Block) * MarginProb) {
1624 for (auto *const SpillBB : SpillsInSubTree) {
1627 if (auto It = SpillsToKeep.find(SpillBB);
1628 It != SpillsToKeep.end() && !It->second) {
1629 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1630 SpillsToRm.push_back(SpillToRm);
1633 SpillsToKeep.erase(SpillBB);
1637 SpillsToKeep[*RIt] = LiveReg;
1639 dbgs() <<
"spills in BB: ";
1640 for (
const auto Rspill : SpillsInSubTree)
1641 dbgs() << Rspill->getBlock()->getNumber() <<
" ";
1642 dbgs() <<
"were promoted to BB" << (*RIt)->getBlock()->getNumber()
1645 SpillsInSubTree.clear();
1646 SpillsInSubTree.insert(*RIt);
1647 SubTreeCost = MBFI.getBlockFreq(
Block);
1652 for (
const auto &Ent : SpillsToKeep) {
1654 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1674void HoistSpillHelper::hoistAllSpills() {
1678 for (
unsigned i = 0, e =
MRI.getNumVirtRegs(); i != e; ++i) {
1682 Virt2SiblingsMap[Original].insert(
Reg);
1686 for (
auto &Ent : MergeableSpills) {
1687 int Slot = Ent.first.first;
1689 VNInfo *OrigVNI = Ent.first.second;
1691 if (Ent.second.empty())
1695 dbgs() <<
"\nFor Slot" <<
Slot <<
" and VN" << OrigVNI->
id <<
":\n"
1696 <<
"Equal spills in BB: ";
1697 for (
const auto spill : EqValSpills)
1698 dbgs() << spill->getParent()->getNumber() <<
" ";
1707 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1710 dbgs() <<
"Finally inserted spills in BB: ";
1711 for (
const auto &Ispill : SpillsToIns)
1712 dbgs() << Ispill.first->getNumber() <<
" ";
1713 dbgs() <<
"\nFinally removed spills in BB: ";
1714 for (
const auto Rspill : SpillsToRm)
1715 dbgs() << Rspill->getParent()->getNumber() <<
" ";
1721 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1723 StackIntvl.getValNumInfo(0));
1726 for (
auto const &Insert : SpillsToIns) {
1740 NumSpills -= SpillsToRm.size();
1741 for (
auto *
const RMEnt : SpillsToRm) {
1742 RMEnt->setDesc(
TII.get(TargetOpcode::KILL));
1743 for (
unsigned i = RMEnt->getNumOperands(); i; --i) {
1746 RMEnt->removeOperand(i - 1);
1749 Edit.eliminateDeadDefs(SpillsToRm, {});
1756 if (VRM.hasPhys(Old))
1757 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1759 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1762 if (VRM.hasShape(Old))
1763 VRM.assignVirt2Shape(New, VRM.getShape(Old));
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file defines the DenseMap class.
const HexagonInstrInfo * TII
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg=Register())
static Register isCopyOfBundle(const MachineInstr &FirstMI, Register Reg, const TargetInstrInfo &TII)
Check for a copy bundle as formed by SplitKit.
static bool isRealSpill(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
static Register isCopyOf(const MachineInstr &MI, Register Reg, const TargetInstrInfo &TII)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0.
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
Promote Memory to Register
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Load the specified register of the given register class from the specified stack frame index.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
LiveInterval - This class represents the liveness of a register, or stack slot.
bool isSpillable() const
isSpillable - Can this interval be spilled?
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
SlotIndexes * getSlotIndexes() const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
VNInfo::Allocator & getVNInfoAllocator()
LiveInterval & getInterval(Register Reg)
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E)
LLVM_ABI void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Result of a LiveRange query.
bool isKill() const
Return true if the live-in value is killed by this instruction.
Callback methods for LiveRangeEdit owners.
const LiveInterval & getParent() const
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
LLVM_ABI void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
iterator_range< vni_iterator > vnis()
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
unsigned getNumValNums() const
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
MIBundleOperands - Iterate over all operands in a bundle of machine instructions.
LLVM_ABI iterator SkipPHIsLabelsAndDebug(iterator I, Register Reg=Register(), bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
Instructions::iterator instr_iterator
Instructions::const_iterator const_instr_iterator
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< pred_iterator > predecessors()
MachineInstrBundleIterator< MachineInstr > iterator
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
static const unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
bool isBundled() const
Return true if this instruction part of a bundle.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, true, true, false > reg_bundle_nodbg_iterator
reg_bundle_nodbg_iterator/reg_bundle_nodbg_begin/reg_bundle_nodbg_end - Walk all defs and uses of the...
This class implements a map that also provides access to all stored values in a deterministic order.
Wrapper class representing virtual and physical registers.
constexpr bool isStack() const
Return true if this is a stack slot.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
LLVM_ABI void removeSingleMachineInstrFromMaps(MachineInstr &MI)
Removes a single machine instruction MI from the mapping.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
reverse_iterator rbegin()
std::reverse_iterator< iterator > reverse_iterator
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level Statepoint operands.
LLVM_ABI bool isFoldableReg(Register Reg) const
Return true if Reg is used only in operands which can be folded to stack usage.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
VNInfo - Value Number Information.
bool isUnused() const
Returns true if this value is unused.
unsigned id
The ID number of this value.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Calculate auxiliary information for a virtual register such as its spill weight and allocation hint.
static constexpr int NO_STACK_SLOT
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg, const TargetRegisterInfo *TRI)
AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses a physical register.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
DomTreeNodeBase< MachineBasicBlock > MachineDomTreeNode
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Spiller * createInlineSpiller(const Spiller::RequiredAnalyses &Analyses, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI, LiveRegMatrix *Matrix=nullptr)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Remat - Information needed to rematerialize at a specific location.
Information about how a physical register Reg is used by a set of operands.
bool FullyDefined
Reg or a super-register is defined.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Tied
Tied - Uses and defs must use the same register.
bool Writes
Writes - One of the operands writes the virtual register.