45#include "llvm/Config/llvm-config.h"
60#define DEBUG_TYPE "regalloc"
62STATISTIC(NumSpilledRanges,
"Number of spilled live ranges");
63STATISTIC(NumSnippets,
"Number of spilled snippets");
65STATISTIC(NumSpillsRemoved,
"Number of spills removed");
67STATISTIC(NumReloadsRemoved,
"Number of reloads removed");
68STATISTIC(NumFolded,
"Number of folded stack accesses");
70STATISTIC(NumRemats,
"Number of rematerialized defs for spilling");
75 cl::desc(
"Restrict remat for statepoint operands"));
101 using MergeableSpillsMap =
103 MergeableSpillsMap MergeableSpills;
113 void rmRedundantSpills(
136 VRM(vrm),
MRI(mf.getRegInfo()),
TII(*mf.getSubtarget().getInstrInfo()),
137 TRI(*mf.getSubtarget().getRegisterInfo()),
140 IPA(LIS, mf.getNumBlockIDs()) {}
142 void addToMergeableSpills(
MachineInstr &Spill,
int StackSlot,
144 bool rmFromMergeableSpills(
MachineInstr &Spill,
int StackSlot);
145 void hoistAllSpills();
149class InlineSpiller :
public Spiller {
185 HoistSpillHelper HSpiller;
190 ~InlineSpiller()
override =
default;
198 VRM(VRM),
MRI(MF.getRegInfo()),
TII(*MF.getSubtarget().getInstrInfo()),
199 TRI(*MF.getSubtarget().getRegisterInfo()),
202 HSpiller(
Pass, MF, VRM), VRAI(VRAI) {}
211 void collectRegsToSpill();
222 void reMaterializeAll();
225 bool foldMemoryOperand(
ArrayRef<std::pair<MachineInstr *, unsigned>>,
238void Spiller::anchor() {}
243 return new InlineSpiller(
Pass, MF, VRM, VRAI);
262 if (!
TII.isCopyInstr(
MI))
285 "expected to see first instruction in bundle");
289 while (
I->isBundledWithSucc()) {
291 auto CopyInst =
TII.isCopyInstr(
MI);
317 if (MO.getReg().isVirtual())
324bool InlineSpiller::isSnippet(
const LiveInterval &SnipLI) {
338 if (!LIS.intervalIsInOneMBB(SnipLI))
344 for (
auto *VNI : SnipLI.
vnis()) {
346 if (
MI->getOpcode() == TargetOpcode::STATEPOINT)
356 RI =
MRI.reg_bundle_nodbg_begin(SnipLI.
reg()),
357 E =
MRI.reg_bundle_nodbg_end();
387void InlineSpiller::collectRegsToSpill() {
391 RegsToSpill.assign(1, Reg);
392 SnippetCopies.clear();
393 RegsReplaced.clear();
402 if (!isSibling(SnipReg))
405 if (!isSnippet(SnipLI))
407 SnippetCopies.insert(&
MI);
408 if (isRegToSpill(SnipReg))
410 RegsToSpill.push_back(SnipReg);
416bool InlineSpiller::isSibling(
Register Reg) {
417 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
439bool InlineSpiller::hoistSpillInsideBB(
LiveInterval &SpillLI,
444 assert(VNI && VNI->
def ==
Idx.getRegSlot() &&
"Not defined by copy");
458 assert(StackInt &&
"No stack slot assigned yet.");
461 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
463 << *StackInt <<
'\n');
467 eliminateRedundantSpills(SrcLI, SrcVNI);
475 assert(
DefMI &&
"Defining instruction disappeared");
483 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
492 if (MIS.begin() == MII)
493 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
501 assert(VNI &&
"Missing value");
503 WorkList.
push_back(std::make_pair(&SLI, VNI));
504 assert(StackInt &&
"No stack slot assigned yet.");
511 << VNI->
def <<
" in " << *LI <<
'\n');
514 if (isRegToSpill(Reg))
518 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
519 LLVM_DEBUG(
dbgs() <<
"Merged to stack int: " << *StackInt <<
'\n');
524 if (!
MI.mayStore() && !
TII.isCopyInstr(
MI))
532 if (isSibling(DstReg)) {
535 assert(DstVNI &&
"Missing defined value");
536 assert(DstVNI->
def ==
Idx.getRegSlot() &&
"Wrong copy def slot");
538 WorkList.
push_back(std::make_pair(&DstLI, DstVNI));
548 MI.setDesc(
TII.get(TargetOpcode::KILL));
549 DeadDefs.push_back(&
MI);
551 if (HSpiller.rmFromMergeableSpills(
MI, StackSlot))
555 }
while (!WorkList.
empty());
566 WorkList.
push_back(std::make_pair(LI, VNI));
569 if (!UsedValues.insert(VNI).second)
577 WorkList.
push_back(std::make_pair(LI, PVNI));
584 if (!SnippetCopies.count(
MI))
586 LiveInterval &SnipLI = LIS.getInterval(
MI->getOperand(1).getReg());
587 assert(isRegToSpill(SnipLI.
reg()) &&
"Unexpected register in copy");
589 assert(SnipVNI &&
"Snippet undefined before copy");
590 WorkList.
push_back(std::make_pair(&SnipLI, SnipVNI));
591 }
while (!WorkList.
empty());
594bool InlineSpiller::canGuaranteeAssignmentAfterRemat(
Register VReg,
613 if (
MI.getOpcode() != TargetOpcode::STATEPOINT)
619 EndIdx =
MI.getNumOperands();
649 if (SnippetCopies.count(&
MI))
655 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->
def);
657 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx,
false)) {
658 markValueUsed(&VirtReg, ParentVNI);
666 markValueUsed(&VirtReg, ParentVNI);
673 if (
RM.OrigMI->canFoldAsLoad() &&
674 foldMemoryOperand(Ops,
RM.OrigMI)) {
675 Edit->markRematerialized(
RM.ParentVNI);
682 if (!canGuaranteeAssignmentAfterRemat(VirtReg.
reg(),
MI)) {
683 markValueUsed(&VirtReg, ParentVNI);
689 Register NewVReg = Edit->createFrom(Original);
693 Edit->rematerializeAt(*
MI.getParent(),
MI, NewVReg, RM,
TRI);
697 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
698 NewMI->setDebugLoc(
MI.getDebugLoc());
702 << *LIS.getInstructionFromIndex(DefIdx));
705 for (
const auto &OpPair : Ops) {
720void InlineSpiller::reMaterializeAll() {
721 if (!Edit->anyRematerializable())
727 bool anyRemat =
false;
732 if (
MI.isDebugValue())
735 assert(!
MI.isDebugInstr() &&
"Did not expect to find a use in debug "
736 "instruction that isn't a DBG_VALUE");
738 anyRemat |= reMaterializeFor(LI,
MI);
751 MI->addRegisterDead(Reg, &
TRI);
752 if (!
MI->allDefsAreDead())
755 DeadDefs.push_back(
MI);
759 if (
MI->isBundledWithSucc() && !
MI->isBundledWithPred()) {
761 EndIt =
MI->getParent()->instr_end();
764 bool OnlyDeadCopies =
true;
766 It != EndIt && It->isBundledWithPred(); ++It) {
768 auto DestSrc =
TII.isCopyInstr(*It);
769 bool IsCopyToDeadReg =
770 DestSrc && DestSrc->Destination->getReg() ==
Reg;
771 if (!IsCopyToDeadReg) {
772 OnlyDeadCopies =
false;
776 if (OnlyDeadCopies) {
778 It != EndIt && It->isBundledWithPred(); ++It) {
779 It->addRegisterDead(Reg, &
TRI);
781 DeadDefs.push_back(&*It);
790 if (DeadDefs.empty())
792 LLVM_DEBUG(
dbgs() <<
"Remat created " << DeadDefs.size() <<
" dead defs.\n");
793 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
801 unsigned ResultPos = 0;
803 if (
MRI.reg_nodbg_empty(Reg)) {
804 Edit->eraseVirtReg(Reg);
805 RegsReplaced.push_back(Reg);
809 assert(LIS.hasInterval(Reg) &&
810 (!LIS.getInterval(Reg).empty() || !
MRI.reg_nodbg_empty(Reg)) &&
811 "Empty and not used live-range?!");
813 RegsToSpill[ResultPos++] =
Reg;
815 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
817 <<
" registers to spill after remat.\n");
828 bool IsLoad = InstrReg;
833 if (InstrReg != Reg || FI != StackSlot)
837 HSpiller.rmFromMergeableSpills(*
MI, StackSlot);
840 LIS.RemoveMachineInstrFromMaps(*
MI);
841 MI->eraseFromParent();
854#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
860 const char *
const header,
862 char NextLine =
'\n';
863 char SlotIndent =
'\t';
865 if (std::next(
B) == E) {
870 dbgs() <<
'\t' << header <<
": " << NextLine;
881 Idx =
Idx.getRegSlot(
true);
884 dbgs() << SlotIndent <<
Idx <<
'\t' << *
I;
896foldMemoryOperand(
ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
902 if (Ops.back().first !=
MI ||
MI->isBundled())
905 bool WasCopy =
TII.isCopyInstr(*MI).has_value();
914 bool UntieRegs =
MI->getOpcode() == TargetOpcode::STATEPOINT;
918 bool SpillSubRegs =
TII.isSubregFoldable() ||
919 MI->getOpcode() == TargetOpcode::STATEPOINT ||
920 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
921 MI->getOpcode() == TargetOpcode::STACKMAP;
926 for (
const auto &OpPair : Ops) {
927 unsigned Idx = OpPair.second;
928 assert(
MI == OpPair.first &&
"Instruction conflict during operand folding");
945 if (LoadMI && MO.
isDef())
948 if (UntieRegs || !
MI->isRegTiedToDefOperand(
Idx))
961 for (
unsigned Idx : FoldOps) {
965 unsigned Tied =
MI->findTiedOperandIdx(
Idx);
972 MI->untieRegOperand(
Idx);
976 LoadMI ?
TII.foldMemoryOperand(*
MI, FoldOps, *LoadMI, &LIS)
977 :
TII.foldMemoryOperand(*
MI, FoldOps, StackSlot, &LIS, &VRM);
980 for (
auto Tied : TiedOps)
981 MI->tieOperands(Tied.first, Tied.second);
990 if (!Reg ||
Reg.isVirtual() ||
MRI.isReserved(Reg)) {
1007 HSpiller.rmFromMergeableSpills(*
MI, FI))
1011 if (
MI->isCandidateForCallSiteEntry())
1012 MI->getMF()->moveCallSiteInfo(
MI, FoldMI);
1019 if (
MI->peekDebugInstrNum() && Ops[0].second == 0) {
1021 auto MakeSubstitution = [
this,FoldMI,
MI,&Ops]() {
1023 unsigned OldOperandNum = Ops[0].second;
1025 unsigned OldNum =
MI->getDebugInstrNum();
1026 MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
1031 if (Ops.size() == 1 && Op0.
isDef()) {
1033 }
else if (Ops.size() == 2 && Op0.
isDef() &&
MI->getOperand(1).isTied() &&
1034 Op0.
getReg() ==
MI->getOperand(1).getReg()) {
1037 }
else if (
MI->peekDebugInstrNum()) {
1043 MF.substituteDebugValuesForInst(*
MI, *FoldMI, Ops[0].second);
1046 MI->eraseFromParent();
1049 assert(!MIS.empty() &&
"Unexpected empty span of instructions!");
1061 if (MO.
getReg() == ImpReg)
1070 else if (Ops.front().second == 0) {
1075 if (std::distance(MIS.begin(), MIS.end()) <= 1)
1076 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
1082void InlineSpiller::insertReload(
Register NewVReg,
1102 if (!Def.isImplicitDef())
1108 return Def.getOperand(0).getSubReg();
1112void InlineSpiller::insertSpill(
Register NewVReg,
bool isKill,
1116 assert(!
MI->isTerminator() &&
"Inserting a spill after a terminator");
1131 BuildMI(
MBB, SpillBefore,
MI->getDebugLoc(),
TII.get(TargetOpcode::KILL))
1145 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1146 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1150void InlineSpiller::spillAroundUses(
Register Reg) {
1157 if (
MI.isDebugValue()) {
1166 assert(!
MI.isDebugInstr() &&
"Did not expect to find a use in debug "
1167 "instruction that isn't a DBG_VALUE");
1170 if (SnippetCopies.count(&
MI))
1174 if (coalesceStackAccess(&
MI, Reg))
1190 if (SibReg && isSibling(SibReg)) {
1192 if (isRegToSpill(SibReg)) {
1194 SnippetCopies.insert(&
MI);
1198 if (hoistSpillInsideBB(OldLI,
MI)) {
1200 MI.getOperand(0).setIsDead();
1201 DeadDefs.push_back(&
MI);
1213 if (foldMemoryOperand(Ops))
1218 Register NewVReg = Edit->createFrom(Reg);
1221 insertReload(NewVReg,
Idx, &
MI);
1224 bool hasLiveDef =
false;
1225 for (
const auto &OpPair : Ops) {
1229 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1241 insertSpill(NewVReg,
true, &
MI);
1246void InlineSpiller::spillAll() {
1249 StackSlot = VRM.assignVirt2StackSlot(Original);
1250 StackInt = &LSS.getOrCreateInterval(StackSlot,
MRI.getRegClass(Original));
1251 StackInt->getNextValue(
SlotIndex(), LSS.getVNInfoAllocator());
1253 StackInt = &LSS.getInterval(StackSlot);
1255 if (Original != Edit->getReg())
1256 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1258 assert(StackInt->getNumValNums() == 1 &&
"Bad stack interval values");
1260 StackInt->MergeSegmentsInAsValue(LIS.
getInterval(Reg),
1262 LLVM_DEBUG(
dbgs() <<
"Merged spilled regs: " << *StackInt <<
'\n');
1266 spillAroundUses(Reg);
1270 VRM.assignVirt2StackSlot(Reg, StackSlot);
1274 if (!DeadDefs.empty()) {
1275 LLVM_DEBUG(
dbgs() <<
"Eliminating " << DeadDefs.size() <<
" dead defs\n");
1276 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
1283 assert(SnippetCopies.count(&
MI) &&
"Remaining use wasn't a snippet copy");
1286 MI.eraseFromBundle();
1292 Edit->eraseVirtReg(Reg);
1299 "Trying to spill a stack slot.");
1301 Original = VRM.getOriginal(edit.
getReg());
1302 StackSlot = VRM.getStackSlot(Original);
1307 <<
':' << edit.
getParent() <<
"\nFrom original "
1310 "Attempting to spill already spilled value.");
1311 assert(DeadDefs.empty() &&
"Previous spill didn't remove dead defs");
1313 collectRegsToSpill();
1317 if (!RegsToSpill.empty())
1320 Edit->calculateRegClassAndHint(MF, VRAI);
1324void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1327void HoistSpillHelper::addToMergeableSpills(
MachineInstr &Spill,
int StackSlot,
1328 unsigned Original) {
1333 if (!StackSlotToOrigLI.contains(StackSlot)) {
1334 auto LI = std::make_unique<LiveInterval>(OrigLI.
reg(), OrigLI.
weight());
1335 LI->
assign(OrigLI, Allocator);
1336 StackSlotToOrigLI[StackSlot] = std::move(LI);
1339 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(
Idx.getRegSlot());
1340 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1341 MergeableSpills[MIdx].insert(&Spill);
1346bool HoistSpillHelper::rmFromMergeableSpills(
MachineInstr &Spill,
1348 auto It = StackSlotToOrigLI.find(StackSlot);
1349 if (It == StackSlotToOrigLI.end())
1352 VNInfo *OrigVNI = It->second->getVNInfoAt(
Idx.getRegSlot());
1353 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1354 return MergeableSpills[MIdx].erase(&Spill);
1367 LLVM_DEBUG(
dbgs() <<
"can't spill in root block - def after LIP\n");
1374 for (
const Register &SibReg : Siblings) {
1387void HoistSpillHelper::rmRedundantSpills(
1394 for (
auto *
const CurrentSpill : Spills) {
1401 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1402 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1404 SpillBBToSpill[MDT.getNode(
Block)] = SpillToKeep;
1406 SpillBBToSpill[MDT.getNode(
Block)] = CurrentSpill;
1409 for (
auto *
const SpillToRm : SpillsToRm)
1410 Spills.
erase(SpillToRm);
1419void HoistSpillHelper::getVisitOrders(
1443 for (
auto *
const Spill : Spills) {
1447 while (
Node != RootIDomNode) {
1451 SpillToRm = SpillBBToSpill[MDT[
Block]];
1471 SpillsToKeep[MDT[
Block]] = 0;
1474 NodesOnPath.
clear();
1484 if (WorkSet.
count(Child))
1487 }
while (idx != Orders.
size());
1489 "Orders have different size with WorkSet");
1494 for (; RIt != Orders.
rend(); RIt++)
1495 LLVM_DEBUG(
dbgs() <<
"BB" << (*RIt)->getBlock()->getNumber() <<
",");
1503void HoistSpillHelper::runHoistSpills(
1519 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1522 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1529 using NodesCostPair =
1530 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>,
BlockFrequency>;
1538 for (; RIt != Orders.
rend(); RIt++) {
1542 if (SpillsToKeep.
contains(*RIt) && !SpillsToKeep[*RIt]) {
1543 SpillsInSubTreeMap[*RIt].first.
insert(*RIt);
1545 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(
Block);
1552 if (!SpillsInSubTreeMap.
contains(Child))
1560 SpillsInSubTreeMap[*RIt].first;
1562 SubTreeCost += SpillsInSubTreeMap[Child].second;
1563 auto BI = SpillsInSubTreeMap[Child].first.
begin();
1564 auto EI = SpillsInSubTreeMap[Child].first.
end();
1565 SpillsInSubTree.
insert(BI, EI);
1566 SpillsInSubTreeMap.
erase(Child);
1570 SpillsInSubTreeMap[*RIt].first;
1573 if (SpillsInSubTree.
empty())
1578 if (!isSpillCandBB(OrigLI, OrigVNI, *
Block, LiveReg))
1586 if (SubTreeCost > MBFI.getBlockFreq(
Block) * MarginProb) {
1588 for (
auto *
const SpillBB : SpillsInSubTree) {
1591 if (SpillsToKeep.
contains(SpillBB) && !SpillsToKeep[SpillBB]) {
1596 SpillsToKeep.
erase(SpillBB);
1600 SpillsToKeep[*RIt] = LiveReg;
1602 dbgs() <<
"spills in BB: ";
1603 for (
const auto Rspill : SpillsInSubTree)
1604 dbgs() << Rspill->getBlock()->getNumber() <<
" ";
1605 dbgs() <<
"were promoted to BB" << (*RIt)->getBlock()->getNumber()
1608 SpillsInSubTree.clear();
1609 SpillsInSubTree.insert(*RIt);
1610 SubTreeCost = MBFI.getBlockFreq(
Block);
1615 for (
const auto &Ent : SpillsToKeep) {
1617 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1637void HoistSpillHelper::hoistAllSpills() {
1641 for (
unsigned i = 0, e =
MRI.getNumVirtRegs(); i != e; ++i) {
1643 Register Original = VRM.getPreSplitReg(Reg);
1644 if (!
MRI.def_empty(Reg))
1645 Virt2SiblingsMap[Original].insert(Reg);
1649 for (
auto &Ent : MergeableSpills) {
1650 int Slot = Ent.first.first;
1652 VNInfo *OrigVNI = Ent.first.second;
1654 if (Ent.second.empty())
1658 dbgs() <<
"\nFor Slot" <<
Slot <<
" and VN" << OrigVNI->
id <<
":\n"
1659 <<
"Equal spills in BB: ";
1660 for (
const auto spill : EqValSpills)
1661 dbgs() << spill->getParent()->getNumber() <<
" ";
1670 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1673 dbgs() <<
"Finally inserted spills in BB: ";
1674 for (
const auto &Ispill : SpillsToIns)
1675 dbgs() << Ispill.first->getNumber() <<
" ";
1676 dbgs() <<
"\nFinally removed spills in BB: ";
1677 for (
const auto Rspill : SpillsToRm)
1678 dbgs() << Rspill->getParent()->getNumber() <<
" ";
1684 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1686 StackIntvl.getValNumInfo(0));
1689 for (
auto const &Insert : SpillsToIns) {
1703 NumSpills -= SpillsToRm.size();
1704 for (
auto *
const RMEnt : SpillsToRm) {
1705 RMEnt->setDesc(
TII.get(TargetOpcode::KILL));
1706 for (
unsigned i = RMEnt->getNumOperands(); i; --i) {
1709 RMEnt->removeOperand(i - 1);
1712 Edit.eliminateDeadDefs(SpillsToRm, {});
1719 if (VRM.hasPhys(Old))
1720 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1722 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1725 if (VRM.hasShape(Old))
1726 VRM.assignVirt2Shape(New, VRM.getShape(Old));
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
global merge Global merge function pass
const HexagonInstrInfo * TII
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg=Register())
static Register isCopyOfBundle(const MachineInstr &FirstMI, Register Reg, const TargetInstrInfo &TII)
Check for a copy bundle as formed by SplitKit.
static bool isRealSpill(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
static Register isCopyOf(const MachineInstr &MI, Register Reg, const TargetInstrInfo &TII)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0.
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS)
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Allocate memory in an ever growing pool, as if by bump-pointer.
bool erase(const KeyT &Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Base class for the actual dominator tree node.
iterator_range< iterator > children()
DomTreeNodeBase * getIDom() const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
LiveInterval - This class represents the liveness of a register, or stack slot.
bool isSpillable() const
isSpillable - Can this interval be spilled?
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
SlotIndexes * getSlotIndexes() const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
VNInfo::Allocator & getVNInfoAllocator()
LiveInterval & getInterval(Register Reg)
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E)
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Result of a LiveRange query.
bool isKill() const
Return true if the live-in value is killed by this instruction.
Callback methods for LiveRangeEdit owners.
virtual void LRE_DidCloneVirtReg(Register New, Register Old)
Called after cloning a virtual register.
const LiveInterval & getParent() const
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
iterator_range< vni_iterator > vnis()
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
unsigned getNumValNums() const
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
MIBundleOperands - Iterate over all operands in a bundle of machine instructions.
iterator SkipPHIsLabelsAndDebug(iterator I, Register Reg=Register(), bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
Instructions::iterator instr_iterator
Instructions::const_iterator const_instr_iterator
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< pred_iterator > predecessors()
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
static const unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
bool isBundled() const
Return true if this instruction part of a bundle.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements a map that also provides access to all stored values in a deterministic order.
Pass interface - Implemented by all 'passes'.
Wrapper class representing virtual and physical registers.
static constexpr bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful to be able to store a non-negative frame index in a variable tha...
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
SlotIndex - An opaque wrapper around machine indexes.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
void removeSingleMachineInstrFromMaps(MachineInstr &MI)
Removes a single machine instruction MI from the mapping.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
reverse_iterator rbegin()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
virtual void spill(LiveRangeEdit &LRE)=0
spill - Spill the LRE.getParent() live interval.
virtual ArrayRef< Register > getReplacedRegs()=0
Return registers that were not spilled, but otherwise replaced (e.g.
virtual ArrayRef< Register > getSpilledRegs()=0
Return the registers that were spilled.
virtual void postOptimization()
MI-level Statepoint operands.
bool isFoldableReg(Register Reg) const
Return true if Reg is used only in operands which can be folded to stack usage.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
VNInfo - Value Number Information.
bool isUnused() const
Returns true if this value is unused.
unsigned id
The ID number of this value.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Calculate auxiliary information for a virtual register such as its spill weight and allocation hint.
static constexpr int NO_STACK_SLOT
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg, const TargetRegisterInfo *TRI)
AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses a physical register.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Spiller * createInlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
unsigned getKillRegState(bool B)
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Remat - Information needed to rematerialize at a specific location.
Information about how a physical register Reg is used by a set of operands.
bool FullyDefined
Reg or a super-register is defined.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Tied
Tied - Uses and defs must use the same register.
bool Writes
Writes - One of the operands writes the virtual register.