66#define DEBUG_TYPE "regalloc"
68STATISTIC(numJoins,
"Number of interval joins performed");
69STATISTIC(numCrossRCs,
"Number of cross class joins performed");
70STATISTIC(numCommutes,
"Number of instruction commuting performed");
72STATISTIC(NumReMats,
"Number of instructions re-materialized");
73STATISTIC(NumInflated,
"Number of register classes inflated");
74STATISTIC(NumLaneConflicts,
"Number of dead lane conflicts tested");
75STATISTIC(NumLaneResolves,
"Number of dead lane conflicts resolved");
76STATISTIC(NumShrinkToUses,
"Number of shrinkToUses called");
79 cl::desc(
"Coalesce copies (default=true)"),
94 cl::desc(
"Coalesce copies that span blocks (default=subtarget)"),
99 cl::desc(
"Verify machine instrs before and after register coalescing"),
104 cl::desc(
"During rematerialization for a copy, if the def instruction has "
105 "many other copy uses to be rematerialized, delay the multiple "
106 "separate live interval update work and do them all at once after "
107 "all those rematerialization are done. It will save a lot of "
113 cl::desc(
"If the valnos size of an interval is larger than the threshold, "
114 "it is regarded as a large interval. "),
119 cl::desc(
"For a large interval, if it is coalesced with other live "
120 "intervals many times more than the threshold, stop its "
121 "coalescing to control the compile time. "),
146 DenseMap<unsigned, PHIValPos> PHIValToPos;
150 DenseMap<Register, SmallVector<unsigned, 2>> RegToPHIIdx;
155 using DbgValueLoc = std::pair<SlotIndex, MachineInstr *>;
156 DenseMap<Register, std::vector<DbgValueLoc>> DbgVRegToValues;
160 LaneBitmask ShrinkMask;
164 bool ShrinkMainRange =
false;
168 bool JoinGlobalCopies =
false;
172 bool JoinSplitEdges =
false;
175 SmallVector<MachineInstr *, 8> WorkList;
176 SmallVector<MachineInstr *, 8> LocalWorkList;
180 SmallPtrSet<MachineInstr *, 8> ErasedInstrs;
183 SmallVector<MachineInstr *, 8> DeadDefs;
191 DenseSet<Register> ToBeUpdated;
195 DenseMap<Register, unsigned long> LargeLIVisitCounter;
198 void eliminateDeadDefs(LiveRangeEdit *Edit =
nullptr);
201 void LRE_WillEraseInstruction(MachineInstr *
MI)
override;
204 void coalesceLocals();
207 void joinAllIntervals();
211 void copyCoalesceInMBB(MachineBasicBlock *
MBB);
222 void lateLiveIntervalUpdate();
227 bool copyValueUndefInPredecessors(
LiveRange &S,
const MachineBasicBlock *
MBB,
228 LiveQueryResult SLRQ);
232 void setUndefOnPrunedSubRegUses(LiveInterval &LI,
Register Reg,
233 LaneBitmask PrunedLanes);
240 bool joinCopy(MachineInstr *CopyMI,
bool &Again,
241 SmallPtrSetImpl<MachineInstr *> &CurrentErasedInstrs);
246 bool joinIntervals(CoalescerPair &CP);
249 bool joinVirtRegs(CoalescerPair &CP);
254 bool isHighCostLiveInterval(LiveInterval &LI);
257 bool joinReservedPhysReg(CoalescerPair &CP);
264 void mergeSubRangeInto(LiveInterval &LI,
const LiveRange &ToMerge,
265 LaneBitmask LaneMask, CoalescerPair &CP,
271 LaneBitmask LaneMask,
const CoalescerPair &CP);
277 bool adjustCopiesBackFrom(
const CoalescerPair &CP, MachineInstr *CopyMI);
281 bool hasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB,
282 VNInfo *AValNo, VNInfo *BValNo);
292 std::pair<bool, bool> removeCopyByCommutingDef(
const CoalescerPair &CP,
293 MachineInstr *CopyMI);
296 bool removePartialRedundancy(
const CoalescerPair &CP, MachineInstr &CopyMI);
300 bool reMaterializeDef(
const CoalescerPair &CP, MachineInstr *CopyMI,
304 bool canJoinPhys(
const CoalescerPair &CP);
319 void addUndefFlag(
const LiveInterval &
Int, SlotIndex UseIdx,
320 MachineOperand &MO,
unsigned SubRegIdx);
326 MachineInstr *eliminateUndefCopy(MachineInstr *CopyMI);
340 bool applyTerminalRule(
const MachineInstr &Copy)
const;
346 SmallVectorImpl<MachineInstr *> *Dead =
nullptr) {
348 if (LIS->shrinkToUses(LI, Dead)) {
352 LIS->splitSeparateComponents(*LI, SplitLIs);
360 void deleteInstr(MachineInstr *
MI) {
361 ErasedInstrs.insert(
MI);
362 LIS->RemoveMachineInstrFromMaps(*
MI);
363 MI->eraseFromParent();
372 void checkMergingChangesDbgValues(CoalescerPair &CP,
LiveRange &
LHS,
381 RegisterCoalescer() =
default;
382 RegisterCoalescer &operator=(RegisterCoalescer &&
Other) =
default;
384 RegisterCoalescer(LiveIntervals *LIS, SlotIndexes *SI,
385 const MachineLoopInfo *Loops)
386 : LIS(LIS), SI(SI), Loops(Loops) {}
388 bool run(MachineFunction &MF);
395 RegisterCoalescerLegacy() : MachineFunctionPass(ID) {
399 void getAnalysisUsage(AnalysisUsage &AU)
const override;
401 MachineFunctionProperties getClearedProperties()
const override {
402 return MachineFunctionProperties().setIsSSA();
406 bool runOnMachineFunction(MachineFunction &)
override;
411char RegisterCoalescerLegacy::ID = 0;
416 "Register Coalescer",
false,
false)
428 Dst = MI->getOperand(0).getReg();
429 DstSub = MI->getOperand(0).getSubReg();
430 Src = MI->getOperand(1).getReg();
431 SrcSub = MI->getOperand(1).getSubReg();
432 }
else if (
MI->isSubregToReg()) {
433 Dst = MI->getOperand(0).getReg();
434 DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(),
435 MI->getOperand(3).getImm());
436 Src = MI->getOperand(2).getReg();
437 SrcSub = MI->getOperand(2).getSubReg();
449 if (
MBB->pred_size() != 1 ||
MBB->succ_size() != 1)
452 for (
const auto &
MI : *
MBB) {
453 if (!
MI.isCopyLike() && !
MI.isUnconditionalBranch())
463 Flipped = CrossClass =
false;
466 unsigned SrcSub = 0, DstSub = 0;
469 Partial = SrcSub || DstSub;
472 if (Src.isPhysical()) {
473 if (Dst.isPhysical())
483 if (Dst.isPhysical()) {
486 Dst = TRI.getSubReg(Dst, DstSub);
494 Dst = TRI.getMatchingSuperReg(Dst, SrcSub, SrcRC);
505 if (SrcSub && DstSub) {
507 if (Src == Dst && SrcSub != DstSub)
510 NewRC = TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub, SrcIdx,
517 NewRC = TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
521 NewRC = TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSub);
524 NewRC = TRI.getCommonSubClass(DstRC, SrcRC);
533 if (DstIdx && !SrcIdx) {
539 CrossClass = NewRC != DstRC || NewRC != SrcRC;
542 assert(Src.isVirtual() &&
"Src must be virtual");
543 assert(!(Dst.isPhysical() && DstSub) &&
"Cannot have a physical SubIdx");
550 if (DstReg.isPhysical())
562 unsigned SrcSub = 0, DstSub = 0;
570 }
else if (Src != SrcReg) {
575 if (DstReg.isPhysical()) {
576 if (!Dst.isPhysical())
578 assert(!DstIdx && !SrcIdx &&
"Inconsistent CoalescerPair state.");
581 Dst = TRI.getSubReg(Dst, DstSub);
584 return DstReg == Dst;
586 return Register(TRI.getSubReg(DstReg, SrcSub)) == Dst;
593 return TRI.composeSubRegIndices(SrcIdx, SrcSub) ==
594 TRI.composeSubRegIndices(DstIdx, DstSub);
597void RegisterCoalescerLegacy::getAnalysisUsage(
AnalysisUsage &AU)
const {
609void RegisterCoalescer::eliminateDeadDefs(
LiveRangeEdit *Edit) {
619void RegisterCoalescer::LRE_WillEraseInstruction(
MachineInstr *
MI) {
624bool RegisterCoalescer::adjustCopiesBackFrom(
const CoalescerPair &CP,
626 assert(!
CP.isPartial() &&
"This doesn't work for partial copies.");
627 assert(!
CP.isPhys() &&
"This doesn't work for physreg copies.");
652 if (BS == IntB.
end())
654 VNInfo *BValNo = BS->valno;
659 if (BValNo->
def != CopyIdx)
666 if (AS == IntA.
end())
668 VNInfo *AValNo = AS->valno;
674 if (!
CP.isCoalescable(ACopyMI) || !ACopyMI->
isFullCopy())
680 if (ValS == IntB.
end())
698 SlotIndex FillerStart = ValS->end, FillerEnd = BS->start;
702 BValNo->
def = FillerStart;
710 if (BValNo != ValS->valno)
719 S.removeSegment(*SS,
true);
723 if (!S.getVNInfoAt(FillerStart)) {
726 S.extendInBlock(BBStart, FillerStart);
728 VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
731 if (SubBValNo != SubValSNo)
732 S.MergeValueNumberInto(SubBValNo, SubValSNo);
749 bool RecomputeLiveRange = AS->end == CopyIdx;
750 if (!RecomputeLiveRange) {
753 if (SS != S.end() &&
SS->end == CopyIdx) {
754 RecomputeLiveRange =
true;
759 if (RecomputeLiveRange)
766bool RegisterCoalescer::hasOtherReachingDefs(
LiveInterval &IntA,
775 if (ASeg.
valno != AValNo)
778 if (BI != IntB.
begin())
780 for (; BI != IntB.
end() && ASeg.
end >= BI->start; ++BI) {
781 if (BI->valno == BValNo)
783 if (BI->start <= ASeg.
start && BI->end > ASeg.
start)
785 if (BI->start > ASeg.
start && BI->start < ASeg.
end)
799 bool MergedWithDead =
false;
801 if (S.
valno != SrcValNo)
812 MergedWithDead =
true;
815 return std::make_pair(
Changed, MergedWithDead);
819RegisterCoalescer::removeCopyByCommutingDef(
const CoalescerPair &CP,
852 assert(BValNo !=
nullptr && BValNo->
def == CopyIdx);
858 return {
false,
false};
861 return {
false,
false};
863 return {
false,
false};
870 return {
false,
false};
876 return DefMO.getReg() == IntA.reg() && !DefMO.getSubReg();
878 return {
false,
false};
890 if (!
TII->findCommutedOpIndices(*
DefMI, UseOpIdx, NewDstIdx))
891 return {
false,
false};
896 return {
false,
false};
900 if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
901 return {
false,
false};
910 if (US == IntA.
end() || US->valno != AValNo)
914 return {
false,
false};
924 TII->commuteInstruction(*
DefMI,
false, UseOpIdx, NewDstIdx);
926 return {
false,
false};
928 !
MRI->constrainRegClass(IntB.
reg(),
MRI->getRegClass(IntA.
reg())))
929 return {
false,
false};
930 if (NewMI !=
DefMI) {
955 UseMO.setReg(NewReg);
960 assert(US != IntA.
end() &&
"Use must be live");
961 if (US->valno != AValNo)
964 UseMO.setIsKill(
false);
966 UseMO.substPhysReg(NewReg, *
TRI);
968 UseMO.setReg(NewReg);
987 VNInfo *SubDVNI = S.getVNInfoAt(DefIdx);
990 VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
992 S.MergeValueNumberInto(SubDVNI, SubBValNo);
1000 bool ShrinkB =
false;
1014 VNInfo *ASubValNo = SA.getVNInfoAt(AIdx);
1023 MaskA |= SA.LaneMask;
1029 VNInfo *BSubValNo = SR.empty() ? SR.getNextValue(CopyIdx, Allocator)
1030 : SR.getVNInfoAt(CopyIdx);
1031 assert(BSubValNo != nullptr);
1032 auto P = addSegmentsWithValNo(SR, BSubValNo, SA, ASubValNo);
1033 ShrinkB |= P.second;
1035 BSubValNo->def = ASubValNo->def;
1043 if ((SB.LaneMask & MaskA).any())
1047 SB.removeSegment(*S,
true);
1051 BValNo->
def = AValNo->
def;
1053 ShrinkB |=
P.second;
1060 return {
true, ShrinkB};
1110bool RegisterCoalescer::removePartialRedundancy(
const CoalescerPair &CP,
1143 bool FoundReverseCopy =
false;
1162 bool ValB_Changed =
false;
1163 for (
auto *VNI : IntB.
valnos) {
1164 if (VNI->isUnused())
1167 ValB_Changed =
true;
1175 FoundReverseCopy =
true;
1179 if (!FoundReverseCopy)
1189 if (CopyLeftBB && CopyLeftBB->
succ_size() > 1)
1200 if (InsPos != CopyLeftBB->
end()) {
1206 LLVM_DEBUG(
dbgs() <<
"\tremovePartialRedundancy: Move the copy to "
1211 TII->get(TargetOpcode::COPY), IntB.
reg())
1222 ErasedInstrs.
erase(NewCopyMI);
1224 LLVM_DEBUG(
dbgs() <<
"\tremovePartialRedundancy: Remove the copy from "
1235 deleteInstr(&CopyMI);
1251 if (!IntB.
liveAt(UseIdx))
1252 MO.setIsUndef(
true);
1262 VNInfo *BValNo = SR.Query(CopyIdx).valueOutOrDead();
1263 assert(BValNo &&
"All sublanes should be live");
1272 for (
unsigned I = 0;
I != EndPoints.
size();) {
1274 EndPoints[
I] = EndPoints.
back();
1296 assert(!
Reg.isPhysical() &&
"This code cannot handle physreg aliasing");
1299 if (
Op.getReg() !=
Reg)
1303 if (
Op.getSubReg() == 0 ||
Op.isUndef())
1309bool RegisterCoalescer::reMaterializeDef(
const CoalescerPair &CP,
1313 Register SrcReg =
CP.isFlipped() ?
CP.getDstReg() :
CP.getSrcReg();
1314 unsigned SrcIdx =
CP.isFlipped() ?
CP.getDstIdx() :
CP.getSrcIdx();
1315 Register DstReg =
CP.isFlipped() ?
CP.getSrcReg() :
CP.getDstReg();
1316 unsigned DstIdx =
CP.isFlipped() ?
CP.getSrcIdx() :
CP.getDstIdx();
1337 if (!
TII->isReMaterializable(*
DefMI))
1342 bool SawStore =
false;
1346 if (
MCID.getNumDefs() != 1)
1354 if (SrcIdx && DstIdx)
1370 for (MCRegUnit Unit :
TRI->regunits(DstReg)) {
1389 unsigned NewDstIdx =
TRI->composeSubRegIndices(
CP.getSrcIdx(), DefSubIdx);
1391 NewDstReg =
TRI->getSubReg(DstReg, NewDstIdx);
1401 "Only expect to deal with virtual or physical registers");
1415 LiveRangeEdit Edit(&SrcInt, NewRegs, *MF, *LIS,
nullptr,
this);
1429 assert(SrcIdx == 0 &&
CP.isFlipped() &&
1430 "Shouldn't have SrcIdx+DstIdx at this point");
1433 TRI->getCommonSubClass(DefRC, DstRC);
1434 if (CommonRC !=
nullptr) {
1442 if (MO.isReg() && MO.getReg() == DstReg && MO.getSubReg() == DstIdx) {
1464 "No explicit operands after implicit operands.");
1467 "unexpected implicit virtual register def");
1473 ErasedInstrs.
insert(CopyMI);
1497 ((
TRI->getSubReg(MO.
getReg(), DefSubIdx) ==
1510 assert(!
MRI->shouldTrackSubRegLiveness(DstReg) &&
1511 "subrange update for implicit-def of super register may not be "
1512 "properly handled");
1520 if (DefRC !=
nullptr) {
1522 NewRC =
TRI->getMatchingSuperRegClass(NewRC, DefRC, NewIdx);
1524 NewRC =
TRI->getCommonSubClass(NewRC, DefRC);
1525 assert(NewRC &&
"subreg chosen for remat incompatible with instruction");
1531 SR.LaneMask =
TRI->composeSubRegIndexLaneMask(DstIdx, SR.LaneMask);
1533 MRI->setRegClass(DstReg, NewRC);
1536 updateRegDefsUses(DstReg, DstReg, DstIdx);
1555 MRI->shouldTrackSubRegLiveness(DstReg)) {
1585 if (!SR.liveAt(DefIndex))
1586 SR.createDeadDef(DefIndex,
Alloc);
1587 MaxMask &= ~SR.LaneMask;
1589 if (MaxMask.
any()) {
1607 bool UpdatedSubRanges =
false;
1622 if (!SR.
liveAt(DefIndex))
1628 if ((SR.
LaneMask & DstMask).none()) {
1630 <<
"Removing undefined SubRange "
1643 UpdatedSubRanges =
true;
1646 if (UpdatedSubRanges)
1653 "Only expect virtual or physical registers in remat");
1661 bool HasDefMatchingCopy =
false;
1668 if (DstReg != CopyDstReg)
1671 HasDefMatchingCopy =
true;
1675 if (!HasDefMatchingCopy)
1677 CopyDstReg,
true ,
true ,
false ));
1719 if (
MRI->use_nodbg_empty(SrcReg)) {
1725 UseMO.substPhysReg(DstReg, *
TRI);
1727 UseMO.setReg(DstReg);
1736 if (ToBeUpdated.
count(SrcReg))
1739 unsigned NumCopyUses = 0;
1741 if (UseMO.getParent()->isCopyLike())
1747 if (!DeadDefs.
empty())
1748 eliminateDeadDefs(&Edit);
1750 ToBeUpdated.
insert(SrcReg);
1768 unsigned SrcSubIdx = 0, DstSubIdx = 0;
1769 if (!
isMoveInstr(*
TRI, CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
1778 if ((SR.
LaneMask & SrcMask).none())
1783 }
else if (SrcLI.
liveAt(Idx))
1791 assert(Seg !=
nullptr &&
"No segment for defining instruction");
1796 if (((V &&
V->isPHIDef()) || (!V && !DstLI.
liveAt(Idx)))) {
1804 CopyMI->
getOpcode() == TargetOpcode::SUBREG_TO_REG);
1809 CopyMI->
setDesc(
TII->get(TargetOpcode::IMPLICIT_DEF));
1826 if ((SR.
LaneMask & DstMask).none())
1848 if ((SR.
LaneMask & UseMask).none())
1856 isLive = DstLI.
liveAt(UseIdx);
1869 if (MO.
getReg() == DstReg)
1881 bool IsUndef =
true;
1883 if ((S.LaneMask & Mask).none())
1885 if (S.liveAt(UseIdx)) {
1898 ShrinkMainRange =
true;
1907 if (DstInt && DstInt->
hasSubRanges() && DstReg != SrcReg) {
1916 if (
MI.isDebugInstr())
1919 addUndefFlag(*DstInt, UseIdx, MO,
SubReg);
1925 E =
MRI->reg_instr_end();
1934 if (SrcReg == DstReg && !Visited.
insert(
UseMI).second)
1947 for (
unsigned Op :
Ops) {
1953 if (SubIdx && MO.
isDef())
1959 unsigned SubUseIdx =
TRI->composeSubRegIndices(SubIdx, MO.
getSubReg());
1960 if (SubUseIdx != 0 &&
MRI->shouldTrackSubRegLiveness(DstReg)) {
1977 addUndefFlag(*DstInt, UseIdx, MO, SubUseIdx);
1988 dbgs() <<
"\t\tupdated: ";
1996bool RegisterCoalescer::canJoinPhys(
const CoalescerPair &CP) {
2000 if (!
MRI->isReserved(
CP.getDstReg())) {
2001 LLVM_DEBUG(
dbgs() <<
"\tCan only merge into reserved registers.\n");
2010 dbgs() <<
"\tCannot join complex intervals into reserved register.\n");
2014bool RegisterCoalescer::copyValueUndefInPredecessors(
2028void RegisterCoalescer::setUndefOnPrunedSubRegUses(
LiveInterval &LI,
2035 if (SubRegIdx == 0 || MO.
isUndef())
2041 if (!S.
liveAt(Pos) && (PrunedLanes & SubRegMask).any()) {
2057bool RegisterCoalescer::joinCopy(
2064 if (!
CP.setRegisters(CopyMI)) {
2069 if (
CP.getNewRC()) {
2072 <<
"are available for allocation\n");
2076 auto SrcRC =
MRI->getRegClass(
CP.getSrcReg());
2077 auto DstRC =
MRI->getRegClass(
CP.getDstReg());
2078 unsigned SrcIdx =
CP.getSrcIdx();
2079 unsigned DstIdx =
CP.getDstIdx();
2080 if (
CP.isFlipped()) {
2084 if (!
TRI->shouldCoalesce(CopyMI, SrcRC, SrcIdx, DstRC, DstIdx,
2085 CP.getNewRC(), *LIS)) {
2097 eliminateDeadDefs();
2104 if (
MachineInstr *UndefMI = eliminateUndefCopy(CopyMI)) {
2105 if (UndefMI->isImplicitDef())
2107 deleteInstr(CopyMI);
2115 if (
CP.getSrcReg() ==
CP.getDstReg()) {
2117 LLVM_DEBUG(
dbgs() <<
"\tCopy already coalesced: " << LI <<
'\n');
2122 assert(ReadVNI &&
"No value before copy and no <undef> flag.");
2123 assert(ReadVNI != DefVNI &&
"Cannot read and define the same value.");
2138 if (copyValueUndefInPredecessors(S,
MBB, SLRQ)) {
2139 LLVM_DEBUG(
dbgs() <<
"Incoming sublane value is undef at copy\n");
2140 PrunedLanes |= S.LaneMask;
2147 if (PrunedLanes.
any()) {
2148 LLVM_DEBUG(
dbgs() <<
"Pruning undef incoming lanes: " << PrunedLanes
2150 setUndefOnPrunedSubRegUses(LI,
CP.getSrcReg(), PrunedLanes);
2155 deleteInstr(CopyMI);
2164 if (!canJoinPhys(CP)) {
2167 bool IsDefCopy =
false;
2168 if (reMaterializeDef(CP, CopyMI, IsDefCopy))
2181 dbgs() <<
"\tConsidering merging to "
2182 <<
TRI->getRegClassName(
CP.getNewRC()) <<
" with ";
2183 if (
CP.getDstIdx() &&
CP.getSrcIdx())
2185 <<
TRI->getSubRegIndexName(
CP.getDstIdx()) <<
" and "
2187 <<
TRI->getSubRegIndexName(
CP.getSrcIdx()) <<
'\n';
2195 ShrinkMainRange =
false;
2201 if (!joinIntervals(CP)) {
2205 bool IsDefCopy =
false;
2206 if (reMaterializeDef(CP, CopyMI, IsDefCopy))
2211 if (!
CP.isPartial() && !
CP.isPhys()) {
2212 bool Changed = adjustCopiesBackFrom(CP, CopyMI);
2213 bool Shrink =
false;
2215 std::tie(
Changed, Shrink) = removeCopyByCommutingDef(CP, CopyMI);
2217 deleteInstr(CopyMI);
2219 Register DstReg =
CP.isFlipped() ?
CP.getSrcReg() :
CP.getDstReg();
2231 if (!
CP.isPartial() && !
CP.isPhys())
2232 if (removePartialRedundancy(CP, *CopyMI))
2243 if (
CP.isCrossClass()) {
2245 MRI->setRegClass(
CP.getDstReg(),
CP.getNewRC());
2256 if (ErasedInstrs.
erase(CopyMI))
2258 CurrentErasedInstrs.
insert(CopyMI);
2263 updateRegDefsUses(
CP.getDstReg(),
CP.getDstReg(),
CP.getDstIdx());
2264 updateRegDefsUses(
CP.getSrcReg(),
CP.getDstReg(),
CP.getSrcIdx());
2267 if (ShrinkMask.
any()) {
2270 if ((S.LaneMask & ShrinkMask).none())
2275 ShrinkMainRange =
true;
2283 if (ToBeUpdated.
count(
CP.getSrcReg()))
2284 ShrinkMainRange =
true;
2286 if (ShrinkMainRange) {
2296 TRI->updateRegAllocHint(
CP.getSrcReg(),
CP.getDstReg(), *MF);
2301 dbgs() <<
"\tResult = ";
2313bool RegisterCoalescer::joinReservedPhysReg(
CoalescerPair &CP) {
2316 assert(
CP.isPhys() &&
"Must be a physreg copy");
2317 assert(
MRI->isReserved(DstReg) &&
"Not a reserved register");
2321 assert(
RHS.containsOneValue() &&
"Invalid join with reserved register");
2330 if (!
MRI->isConstantPhysReg(DstReg)) {
2331 for (MCRegUnit Unit :
TRI->regunits(DstReg)) {
2334 if (!
MRI->isReserved(*RI))
2347 !RegMaskUsable.
test(DstReg.
id())) {
2360 if (
CP.isFlipped()) {
2368 CopyMI =
MRI->getVRegDef(SrcReg);
2369 deleteInstr(CopyMI);
2378 if (!
MRI->hasOneNonDBGUse(SrcReg)) {
2389 CopyMI = &*
MRI->use_instr_nodbg_begin(SrcReg);
2393 if (!
MRI->isConstantPhysReg(DstReg)) {
2401 if (
MI->readsRegister(DstReg,
TRI)) {
2411 <<
printReg(DstReg,
TRI) <<
" at " << CopyRegIdx <<
"\n");
2414 deleteInstr(CopyMI);
2417 for (MCRegUnit Unit :
TRI->regunits(DstReg)) {
2424 MRI->clearKillFlags(
CP.getSrcReg());
2509 const unsigned SubIdx;
2513 const LaneBitmask LaneMask;
2517 const bool SubRangeJoin;
2520 const bool TrackSubRegLiveness;
2523 SmallVectorImpl<VNInfo *> &NewVNInfo;
2525 const CoalescerPair &
CP;
2527 SlotIndexes *Indexes;
2528 const TargetRegisterInfo *
TRI;
2532 SmallVector<int, 8> Assignments;
2536 enum ConflictResolution {
2568 ConflictResolution Resolution = CR_Keep;
2571 LaneBitmask WriteLanes;
2575 LaneBitmask ValidLanes;
2578 VNInfo *RedefVNI =
nullptr;
2581 VNInfo *OtherVNI =
nullptr;
2594 bool ErasableImplicitDef =
false;
2598 bool Pruned =
false;
2601 bool PrunedComputed =
false;
2608 bool Identical =
false;
2612 bool isAnalyzed()
const {
return WriteLanes.
any(); }
2616 void mustKeepImplicitDef(
const TargetRegisterInfo &
TRI,
2617 const MachineInstr &ImpDef) {
2619 ErasableImplicitDef =
false;
2630 LaneBitmask computeWriteLanes(
const MachineInstr *
DefMI,
bool &Redef)
const;
2633 std::pair<const VNInfo *, Register> followCopyChain(
const VNInfo *VNI)
const;
2635 bool valuesIdentical(VNInfo *Value0, VNInfo *Value1,
2636 const JoinVals &
Other)
const;
2645 ConflictResolution analyzeValue(
unsigned ValNo, JoinVals &
Other);
2650 void computeAssignment(
unsigned ValNo, JoinVals &
Other);
2668 taintExtent(
unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &
Other,
2669 SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent);
2673 bool usesLanes(
const MachineInstr &
MI,
Register,
unsigned, LaneBitmask)
const;
2681 bool isPrunedValue(
unsigned ValNo, JoinVals &
Other);
2685 SmallVectorImpl<VNInfo *> &newVNInfo,
const CoalescerPair &cp,
2686 LiveIntervals *lis,
const TargetRegisterInfo *
TRI,
bool SubRangeJoin,
2687 bool TrackSubRegLiveness)
2688 : LR(LR),
Reg(
Reg), SubIdx(SubIdx), LaneMask(LaneMask),
2689 SubRangeJoin(SubRangeJoin), TrackSubRegLiveness(TrackSubRegLiveness),
2690 NewVNInfo(newVNInfo),
CP(cp), LIS(lis), Indexes(LIS->getSlotIndexes()),
2691 TRI(
TRI), Assignments(LR.getNumValNums(), -1),
2692 Vals(LR.getNumValNums()) {}
2696 bool mapValues(JoinVals &
Other);
2700 bool resolveConflicts(JoinVals &
Other);
2705 void pruneValues(JoinVals &
Other, SmallVectorImpl<SlotIndex> &EndPoints,
2711 void pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask);
2720 void pruneMainSegments(LiveInterval &LI,
bool &ShrinkMainRange);
2726 void eraseInstrs(SmallPtrSetImpl<MachineInstr *> &ErasedInstrs,
2727 SmallVectorImpl<Register> &ShrinkRegs,
2728 LiveInterval *LI =
nullptr);
2731 void removeImplicitDefs();
2734 const int *getAssignments()
const {
return Assignments.
data(); }
2737 ConflictResolution getResolution(
unsigned Num)
const {
2738 return Vals[Num].Resolution;
2745 bool &Redef)
const {
2750 L |=
TRI->getSubRegIndexLaneMask(
2758std::pair<const VNInfo *, Register>
2759JoinVals::followCopyChain(
const VNInfo *VNI)
const {
2765 assert(
MI &&
"No defining instruction");
2766 if (!
MI->isFullCopy())
2767 return std::make_pair(VNI, TrackReg);
2768 Register SrcReg =
MI->getOperand(1).getReg();
2770 return std::make_pair(VNI, TrackReg);
2784 LaneBitmask SMask =
TRI->composeSubRegIndexLaneMask(SubIdx, S.LaneMask);
2785 if ((SMask & LaneMask).
none())
2793 return std::make_pair(VNI, TrackReg);
2796 if (ValueIn ==
nullptr) {
2803 return std::make_pair(
nullptr, SrcReg);
2808 return std::make_pair(VNI, TrackReg);
2811bool JoinVals::valuesIdentical(
VNInfo *Value0,
VNInfo *Value1,
2812 const JoinVals &
Other)
const {
2815 std::tie(Orig0, Reg0) = followCopyChain(Value0);
2816 if (Orig0 == Value1 && Reg0 ==
Other.Reg)
2821 std::tie(Orig1, Reg1) =
Other.followCopyChain(Value1);
2825 if (Orig0 ==
nullptr || Orig1 ==
nullptr)
2826 return Orig0 == Orig1 && Reg0 == Reg1;
2832 return Orig0->
def == Orig1->
def && Reg0 == Reg1;
2835JoinVals::ConflictResolution JoinVals::analyzeValue(
unsigned ValNo,
2837 Val &
V = Vals[ValNo];
2838 assert(!
V.isAnalyzed() &&
"Value has already been analyzed!");
2850 :
TRI->getSubRegIndexLaneMask(SubIdx);
2851 V.ValidLanes =
V.WriteLanes = Lanes;
2860 V.ErasableImplicitDef =
true;
2864 V.ValidLanes =
V.WriteLanes = computeWriteLanes(
DefMI, Redef);
2883 assert((TrackSubRegLiveness ||
V.RedefVNI) &&
2884 "Instruction is reading nonexistent value");
2885 if (
V.RedefVNI !=
nullptr) {
2886 computeAssignment(
V.RedefVNI->id,
Other);
2887 V.ValidLanes |= Vals[
V.RedefVNI->id].ValidLanes;
2899 V.ErasableImplicitDef =
true;
2916 if (OtherVNI->
def < VNI->
def)
2917 Other.computeAssignment(OtherVNI->
id, *
this);
2922 return CR_Impossible;
2924 V.OtherVNI = OtherVNI;
2925 Val &OtherV =
Other.Vals[OtherVNI->
id];
2929 if (!OtherV.isAnalyzed() ||
Other.Assignments[OtherVNI->
id] == -1)
2936 if ((
V.ValidLanes & OtherV.ValidLanes).any())
2938 return CR_Impossible;
2952 Other.computeAssignment(
V.OtherVNI->id, *
this);
2953 Val &OtherV =
Other.Vals[
V.OtherVNI->id];
2955 if (OtherV.ErasableImplicitDef) {
2975 <<
", keeping it.\n");
2976 OtherV.mustKeepImplicitDef(*
TRI, *OtherImpDef);
2983 dbgs() <<
"IMPLICIT_DEF defined at " <<
V.OtherVNI->def
2984 <<
" may be live into EH pad successors, keeping it.\n");
2985 OtherV.mustKeepImplicitDef(*
TRI, *OtherImpDef);
2988 OtherV.ValidLanes &= ~OtherV.WriteLanes;
3003 if (
CP.isCoalescable(
DefMI)) {
3006 V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes;
3021 valuesIdentical(VNI,
V.OtherVNI,
Other)) {
3044 if ((
V.WriteLanes & OtherV.ValidLanes).none())
3057 "Only early clobber defs can overlap a kill");
3058 return CR_Impossible;
3065 if ((
TRI->getSubRegIndexLaneMask(
Other.SubIdx) & ~
V.WriteLanes).none())
3066 return CR_Impossible;
3068 if (TrackSubRegLiveness) {
3073 if (!OtherLI.hasSubRanges()) {
3075 return (OtherMask &
V.WriteLanes).none() ? CR_Replace : CR_Impossible;
3083 TRI->composeSubRegIndexLaneMask(
Other.SubIdx, OtherSR.LaneMask);
3084 if ((OtherMask &
V.WriteLanes).none())
3087 auto OtherSRQ = OtherSR.Query(VNI->
def);
3088 if (OtherSRQ.valueIn() && OtherSRQ.endPoint() > VNI->
def) {
3090 return CR_Impossible;
3103 return CR_Impossible;
3112 return CR_Unresolved;
3115void JoinVals::computeAssignment(
unsigned ValNo, JoinVals &
Other) {
3116 Val &
V = Vals[ValNo];
3117 if (
V.isAnalyzed()) {
3120 assert(Assignments[ValNo] != -1 &&
"Bad recursion?");
3123 switch ((
V.Resolution = analyzeValue(ValNo,
Other))) {
3127 assert(
V.OtherVNI &&
"OtherVNI not assigned, can't merge.");
3128 assert(
Other.Vals[
V.OtherVNI->id].isAnalyzed() &&
"Missing recursion");
3129 Assignments[ValNo] =
Other.Assignments[
V.OtherVNI->id];
3133 <<
V.OtherVNI->def <<
" --> @"
3134 << NewVNInfo[Assignments[ValNo]]->def <<
'\n');
3137 case CR_Unresolved: {
3139 assert(
V.OtherVNI &&
"OtherVNI not assigned, can't prune");
3140 Val &OtherV =
Other.Vals[
V.OtherVNI->id];
3141 OtherV.Pruned =
true;
3146 Assignments[ValNo] = NewVNInfo.
size();
3152bool JoinVals::mapValues(JoinVals &
Other) {
3154 computeAssignment(i,
Other);
3155 if (Vals[i].Resolution == CR_Impossible) {
3164bool JoinVals::taintExtent(
3173 assert(OtherI !=
Other.LR.end() &&
"No conflict?");
3178 if (End >= MBBEnd) {
3180 << OtherI->valno->id <<
'@' << OtherI->start <<
'\n');
3184 << OtherI->valno->id <<
'@' << OtherI->start <<
" to "
3189 TaintExtent.push_back(std::make_pair(End, TaintedLanes));
3192 if (++OtherI ==
Other.LR.end() || OtherI->start >= MBBEnd)
3196 const Val &OV =
Other.Vals[OtherI->valno->id];
3197 TaintedLanes &= ~OV.WriteLanes;
3200 }
while (TaintedLanes.
any());
3206 if (
MI.isDebugOrPseudoInstr())
3213 unsigned S =
TRI->composeSubRegIndices(SubIdx, MO.
getSubReg());
3214 if ((Lanes &
TRI->getSubRegIndexLaneMask(S)).any())
3220bool JoinVals::resolveConflicts(JoinVals &
Other) {
3223 assert(
V.Resolution != CR_Impossible &&
"Unresolvable conflict");
3224 if (
V.Resolution != CR_Unresolved)
3233 assert(
V.OtherVNI &&
"Inconsistent conflict resolution.");
3235 const Val &OtherV =
Other.Vals[
V.OtherVNI->id];
3240 LaneBitmask TaintedLanes =
V.WriteLanes & OtherV.ValidLanes;
3242 if (!taintExtent(i, TaintedLanes,
Other, TaintExtent))
3246 assert(!TaintExtent.
empty() &&
"There should be at least one conflict.");
3259 "Interference ends on VNI->def. Should have been handled earlier");
3262 assert(LastMI &&
"Range must end at a proper instruction");
3263 unsigned TaintNum = 0;
3266 if (usesLanes(*
MI,
Other.Reg,
Other.SubIdx, TaintedLanes)) {
3271 if (&*
MI == LastMI) {
3272 if (++TaintNum == TaintExtent.
size())
3275 assert(LastMI &&
"Range must end at a proper instruction");
3276 TaintedLanes = TaintExtent[TaintNum].second;
3282 V.Resolution = CR_Replace;
3288bool JoinVals::isPrunedValue(
unsigned ValNo, JoinVals &
Other) {
3289 Val &
V = Vals[ValNo];
3290 if (
V.Pruned ||
V.PrunedComputed)
3293 if (
V.Resolution != CR_Erase &&
V.Resolution != CR_Merge)
3298 V.PrunedComputed =
true;
3299 V.Pruned =
Other.isPrunedValue(
V.OtherVNI->id, *
this);
3303void JoinVals::pruneValues(JoinVals &
Other,
3305 bool changeInstrs) {
3308 switch (Vals[i].Resolution) {
3318 Val &OtherV =
Other.Vals[Vals[i].OtherVNI->id];
3320 OtherV.ErasableImplicitDef && OtherV.Resolution == CR_Keep;
3321 if (!
Def.isBlock()) {
3341 <<
": " <<
Other.LR <<
'\n');
3346 if (isPrunedValue(i,
Other)) {
3353 << Def <<
": " << LR <<
'\n');
3411 bool DidPrune =
false;
3416 if (
V.Resolution != CR_Erase &&
3417 (
V.Resolution != CR_Keep || !
V.ErasableImplicitDef || !
V.Pruned))
3424 OtherDef =
V.OtherVNI->def;
3427 LLVM_DEBUG(
dbgs() <<
"\t\tExpecting instruction removal at " << Def
3435 if (ValueOut !=
nullptr &&
3437 (
V.Identical &&
V.Resolution == CR_Erase && ValueOut->
def == Def))) {
3439 <<
" at " << Def <<
"\n");
3446 if (
V.Identical && S.Query(OtherDef).valueOutOrDead()) {
3456 ShrinkMask |= S.LaneMask;
3470 ShrinkMask |= S.LaneMask;
3482 if (VNI->
def == Def)
3488void JoinVals::pruneMainSegments(
LiveInterval &LI,
bool &ShrinkMainRange) {
3492 if (Vals[i].Resolution != CR_Keep)
3497 Vals[i].Pruned =
true;
3498 ShrinkMainRange =
true;
3502void JoinVals::removeImplicitDefs() {
3505 if (
V.Resolution != CR_Keep || !
V.ErasableImplicitDef || !
V.Pruned)
3521 switch (Vals[i].Resolution) {
3526 if (!Vals[i].ErasableImplicitDef || !Vals[i].Pruned)
3538 if (LI !=
nullptr) {
3563 ED = ED.
isValid() ? std::min(ED,
I->start) :
I->start;
3565 LE =
LE.isValid() ? std::max(LE,
I->end) :
I->
end;
3568 NewEnd = std::min(NewEnd, LE);
3570 NewEnd = std::min(NewEnd, ED);
3576 if (S != LR.
begin())
3577 std::prev(S)->end = NewEnd;
3581 dbgs() <<
"\t\tremoved " << i <<
'@' <<
Def <<
": " << LR <<
'\n';
3583 dbgs() <<
"\t\t LHS = " << *LI <<
'\n';
3590 assert(
MI &&
"No instruction to erase");
3599 MI->eraseFromParent();
3612 JoinVals RHSVals(RRange,
CP.getSrcReg(),
CP.getSrcIdx(), LaneMask, NewVNInfo,
3613 CP, LIS,
TRI,
true,
true);
3614 JoinVals LHSVals(LRange,
CP.getDstReg(),
CP.getDstIdx(), LaneMask, NewVNInfo,
3615 CP, LIS,
TRI,
true,
true);
3622 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals)) {
3627 if (!LHSVals.resolveConflicts(RHSVals) ||
3628 !RHSVals.resolveConflicts(LHSVals)) {
3639 LHSVals.pruneValues(RHSVals, EndPoints,
false);
3640 RHSVals.pruneValues(LHSVals, EndPoints,
false);
3642 LHSVals.removeImplicitDefs();
3643 RHSVals.removeImplicitDefs();
3648 LRange.
join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(),
3653 if (EndPoints.
empty())
3659 dbgs() <<
"\t\trestoring liveness to " << EndPoints.
size() <<
" points: ";
3660 for (
unsigned i = 0, n = EndPoints.
size(); i != n; ++i) {
3661 dbgs() << EndPoints[i];
3665 dbgs() <<
": " << LRange <<
'\n';
3670void RegisterCoalescer::mergeSubRangeInto(
LiveInterval &LI,
3674 unsigned ComposeSubRegIdx) {
3684 joinSubRegRanges(SR, RangeCopy, SR.
LaneMask, CP);
3690bool RegisterCoalescer::isHighCostLiveInterval(
LiveInterval &LI) {
3693 auto &Counter = LargeLIVisitCounter[LI.
reg()];
3705 bool TrackSubRegLiveness =
MRI->shouldTrackSubRegLiveness(*
CP.getNewRC());
3707 NewVNInfo, CP, LIS,
TRI,
false, TrackSubRegLiveness);
3709 NewVNInfo, CP, LIS,
TRI,
false, TrackSubRegLiveness);
3713 if (isHighCostLiveInterval(
LHS) || isHighCostLiveInterval(
RHS))
3718 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals))
3722 if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals))
3726 if (
RHS.hasSubRanges() ||
LHS.hasSubRanges()) {
3731 unsigned DstIdx =
CP.getDstIdx();
3732 if (!
LHS.hasSubRanges()) {
3734 :
TRI->getSubRegIndexLaneMask(DstIdx);
3738 }
else if (DstIdx != 0) {
3749 unsigned SrcIdx =
CP.getSrcIdx();
3750 if (!
RHS.hasSubRanges()) {
3752 :
TRI->getSubRegIndexLaneMask(SrcIdx);
3753 mergeSubRangeInto(
LHS,
RHS, Mask, CP, DstIdx);
3758 mergeSubRangeInto(
LHS, R, Mask, CP, DstIdx);
3765 LHSVals.pruneMainSegments(
LHS, ShrinkMainRange);
3767 LHSVals.pruneSubRegValues(
LHS, ShrinkMask);
3768 RHSVals.pruneSubRegValues(
LHS, ShrinkMask);
3769 }
else if (TrackSubRegLiveness && !
CP.getDstIdx() &&
CP.getSrcIdx()) {
3771 CP.getNewRC()->getLaneMask(),
LHS);
3772 mergeSubRangeInto(
LHS,
RHS,
TRI->getSubRegIndexLaneMask(
CP.getSrcIdx()), CP,
3774 LHSVals.pruneMainSegments(
LHS, ShrinkMainRange);
3775 LHSVals.pruneSubRegValues(
LHS, ShrinkMask);
3783 LHSVals.pruneValues(RHSVals, EndPoints,
true);
3784 RHSVals.pruneValues(LHSVals, EndPoints,
true);
3789 LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs, &
LHS);
3790 RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
3791 while (!ShrinkRegs.
empty())
3795 checkMergingChangesDbgValues(CP,
LHS, LHSVals,
RHS, RHSVals);
3799 auto RegIt = RegToPHIIdx.
find(
CP.getSrcReg());
3800 if (RegIt != RegToPHIIdx.
end()) {
3802 for (
unsigned InstID : RegIt->second) {
3803 auto PHIIt = PHIValToPos.
find(InstID);
3808 auto LII =
RHS.find(
SI);
3809 if (LII ==
RHS.end() || LII->start >
SI)
3824 if (
CP.getSrcIdx() != 0 ||
CP.getDstIdx() != 0)
3827 if (PHIIt->second.SubReg && PHIIt->second.SubReg !=
CP.getSrcIdx())
3831 PHIIt->second.Reg =
CP.getDstReg();
3835 if (
CP.getSrcIdx() != 0)
3836 PHIIt->second.SubReg =
CP.getSrcIdx();
3842 auto InstrNums = RegIt->second;
3843 RegToPHIIdx.
erase(RegIt);
3847 RegIt = RegToPHIIdx.
find(
CP.getDstReg());
3848 if (RegIt != RegToPHIIdx.
end())
3851 RegToPHIIdx.
insert({
CP.getDstReg(), InstrNums});
3855 LHS.join(
RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo);
3860 MRI->clearKillFlags(
LHS.reg());
3861 MRI->clearKillFlags(
RHS.reg());
3863 if (!EndPoints.
empty()) {
3867 dbgs() <<
"\t\trestoring liveness to " << EndPoints.
size() <<
" points: ";
3868 for (
unsigned i = 0, n = EndPoints.
size(); i != n; ++i) {
3869 dbgs() << EndPoints[i];
3873 dbgs() <<
": " <<
LHS <<
'\n';
3882 return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(
CP);
3892 for (
auto *
X : ToInsert) {
3893 for (
const auto &
Op :
X->debug_operands()) {
3894 if (
Op.isReg() &&
Op.getReg().isVirtual())
3895 DbgVRegToValues[
Op.getReg()].push_back({
Slot,
X});
3905 for (
auto &
MBB : MF) {
3908 for (
auto &
MI :
MBB) {
3909 if (
MI.isDebugValue()) {
3911 return MO.isReg() && MO.getReg().isVirtual();
3913 ToInsert.push_back(&
MI);
3914 }
else if (!
MI.isDebugOrPseudoInstr()) {
3916 CloseNewDVRange(CurrentSlot);
3925 for (
auto &Pair : DbgVRegToValues)
3929void RegisterCoalescer::checkMergingChangesDbgValues(
CoalescerPair &CP,
3933 JoinVals &RHSVals) {
3935 checkMergingChangesDbgValuesImpl(
Reg,
RHS,
LHS, LHSVals);
3939 checkMergingChangesDbgValuesImpl(
Reg,
LHS,
RHS, RHSVals);
3943 ScanForSrcReg(
CP.getSrcReg());
3944 ScanForDstReg(
CP.getDstReg());
3947void RegisterCoalescer::checkMergingChangesDbgValuesImpl(
Register Reg,
3950 JoinVals &RegVals) {
3952 auto VRegMapIt = DbgVRegToValues.
find(
Reg);
3953 if (VRegMapIt == DbgVRegToValues.
end())
3956 auto &DbgValueSet = VRegMapIt->second;
3957 auto DbgValueSetIt = DbgValueSet.begin();
3958 auto SegmentIt = OtherLR.
begin();
3960 bool LastUndefResult =
false;
3965 auto ShouldUndef = [&RegVals, &
RegLR, &LastUndefResult,
3970 if (LastUndefIdx == Idx)
3971 return LastUndefResult;
3977 auto OtherIt =
RegLR.find(Idx);
3978 if (OtherIt ==
RegLR.end())
3987 auto Resolution = RegVals.getResolution(OtherIt->valno->id);
3989 Resolution != JoinVals::CR_Keep && Resolution != JoinVals::CR_Erase;
3991 return LastUndefResult;
3997 while (DbgValueSetIt != DbgValueSet.end() && SegmentIt != OtherLR.
end()) {
3998 if (DbgValueSetIt->first < SegmentIt->end) {
4001 if (DbgValueSetIt->first >= SegmentIt->start) {
4002 bool HasReg = DbgValueSetIt->second->hasDebugOperandForReg(
Reg);
4003 bool ShouldUndefReg = ShouldUndef(DbgValueSetIt->first);
4004 if (HasReg && ShouldUndefReg) {
4006 DbgValueSetIt->second->setDebugValueUndef();
4020struct MBBPriorityInfo {
4021 MachineBasicBlock *
MBB;
4025 MBBPriorityInfo(MachineBasicBlock *mbb,
unsigned depth,
bool issplit)
4026 :
MBB(mbb),
Depth(depth), IsSplit(issplit) {}
4036 const MBBPriorityInfo *
RHS) {
4038 if (
LHS->Depth !=
RHS->Depth)
4039 return LHS->Depth >
RHS->Depth ? -1 : 1;
4042 if (
LHS->IsSplit !=
RHS->IsSplit)
4043 return LHS->IsSplit ? -1 : 1;
4047 unsigned cl =
LHS->MBB->pred_size() +
LHS->MBB->succ_size();
4048 unsigned cr =
RHS->MBB->pred_size() +
RHS->MBB->succ_size();
4050 return cl > cr ? -1 : 1;
4053 return LHS->MBB->getNumber() <
RHS->MBB->getNumber() ? -1 : 1;
4058 if (!Copy->isCopy())
4061 if (Copy->getOperand(1).isUndef())
4064 Register SrcReg = Copy->getOperand(1).getReg();
4065 Register DstReg = Copy->getOperand(0).getReg();
4073void RegisterCoalescer::lateLiveIntervalUpdate() {
4079 if (!DeadDefs.
empty())
4080 eliminateDeadDefs();
4082 ToBeUpdated.
clear();
4085bool RegisterCoalescer::copyCoalesceWorkList(
4087 bool Progress =
false;
4099 bool Success = joinCopy(
MI, Again, CurrentErasedInstrs);
4105 if (!CurrentErasedInstrs.
empty()) {
4107 if (
MI && CurrentErasedInstrs.
count(
MI))
4111 if (
MI && CurrentErasedInstrs.
count(
MI))
4122 assert(Copy.isCopyLike());
4125 if (&
MI != &Copy &&
MI.isCopyLike())
4130bool RegisterCoalescer::applyTerminalRule(
const MachineInstr &Copy)
const {
4135 unsigned SrcSubReg = 0, DstSubReg = 0;
4136 if (!
isMoveInstr(*
TRI, &Copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
4157 if (&
MI == &Copy || !
MI.isCopyLike() ||
MI.getParent() != OrigBB)
4160 unsigned OtherSrcSubReg = 0, OtherSubReg = 0;
4164 if (OtherReg == SrcReg)
4165 OtherReg = OtherSrcReg;
4184 const unsigned PrevSize = WorkList.
size();
4185 if (JoinGlobalCopies) {
4191 if (!
MI.isCopyLike())
4193 bool ApplyTerminalRule = applyTerminalRule(
MI);
4195 if (ApplyTerminalRule)
4200 if (ApplyTerminalRule)
4207 LocalWorkList.
append(LocalTerminals.
begin(), LocalTerminals.
end());
4214 if (MII.isCopyLike()) {
4215 if (applyTerminalRule(MII))
4228 if (copyCoalesceWorkList(CurrList))
4230 std::remove(WorkList.
begin() + PrevSize, WorkList.
end(),
nullptr),
4234void RegisterCoalescer::coalesceLocals() {
4235 copyCoalesceWorkList(LocalWorkList);
4240 LocalWorkList.clear();
4243void RegisterCoalescer::joinAllIntervals() {
4244 LLVM_DEBUG(
dbgs() <<
"********** JOINING INTERVALS ***********\n");
4245 assert(WorkList.
empty() && LocalWorkList.empty() &&
"Old data still around.");
4247 std::vector<MBBPriorityInfo> MBBs;
4248 MBBs.reserve(MF->size());
4250 MBBs.push_back(MBBPriorityInfo(&
MBB,
Loops->getLoopDepth(&
MBB),
4256 unsigned CurrDepth = std::numeric_limits<unsigned>::max();
4257 for (MBBPriorityInfo &
MBB : MBBs) {
4259 if (JoinGlobalCopies &&
MBB.Depth < CurrDepth) {
4261 CurrDepth =
MBB.Depth;
4263 copyCoalesceInMBB(
MBB.MBB);
4265 lateLiveIntervalUpdate();
4270 while (copyCoalesceWorkList(WorkList))
4272 lateLiveIntervalUpdate();
4282 RegisterCoalescer Impl(&LIS,
SI, &
Loops);
4294bool RegisterCoalescerLegacy::runOnMachineFunction(
MachineFunction &MF) {
4295 auto *LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
4296 auto *
Loops = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
4297 auto *SIWrapper = getAnalysisIfAvailable<SlotIndexesWrapperPass>();
4298 SlotIndexes *
SI = SIWrapper ? &SIWrapper->getSI() :
nullptr;
4299 RegisterCoalescer Impl(LIS,
SI,
Loops);
4300 return Impl.run(MF);
4304 LLVM_DEBUG(
dbgs() <<
"********** REGISTER COALESCER **********\n"
4305 <<
"********** Function: " << fn.
getName() <<
'\n');
4317 dbgs() <<
"* Skipped as it exposes functions that returns twice.\n");
4337 unsigned SubReg = DebugPHI.second.SubReg;
4340 PHIValToPos.
insert(std::make_pair(DebugPHI.first,
P));
4341 RegToPHIIdx[
Reg].push_back(DebugPHI.first);
4350 MF->
verify(LIS,
SI,
"Before register coalescing", &
errs());
4352 DbgVRegToValues.
clear();
4369 if (
MRI->reg_nodbg_empty(
Reg))
4371 if (
MRI->recomputeRegClass(
Reg)) {
4373 <<
TRI->getRegClassName(
MRI->getRegClass(
Reg)) <<
'\n');
4380 if (!
MRI->shouldTrackSubRegLiveness(
Reg)) {
4388 assert((S.LaneMask & ~MaxMask).none());
4399 auto it = PHIValToPos.
find(
p.first);
4401 p.second.Reg = it->second.Reg;
4402 p.second.SubReg = it->second.SubReg;
4405 PHIValToPos.
clear();
4406 RegToPHIIdx.
clear();
4411 MF->
verify(LIS,
SI,
"After register coalescing", &
errs());
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file defines the DenseSet and SmallDenseSet classes.
const HexagonInstrInfo * TII
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
A common definition of LaneBitmask for use in TableGen and CodeGen.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static cl::opt< bool > UseTerminalRule("terminal-rule", cl::desc("Apply the terminal rule"), cl::init(true), cl::Hidden)
static cl::opt< cl::boolOrDefault > EnableGlobalCopies("join-globalcopies", cl::desc("Coalesce copies that span blocks (default=subtarget)"), cl::init(cl::BOU_UNSET), cl::Hidden)
Temporary flag to test global copy optimization.
static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS)
static bool isSplitEdge(const MachineBasicBlock *MBB)
Return true if this block should be vacated by the coalescer to eliminate branches.
static int compareMBBPriority(const MBBPriorityInfo *LHS, const MBBPriorityInfo *RHS)
C-style comparator that sorts first based on the loop depth of the basic block (the unsigned),...
static cl::opt< unsigned > LargeIntervalSizeThreshold("large-interval-size-threshold", cl::Hidden, cl::desc("If the valnos size of an interval is larger than the threshold, " "it is regarded as a large interval. "), cl::init(100))
static bool isDefInSubRange(LiveInterval &LI, SlotIndex Def)
Check if any of the subranges of LI contain a definition at Def.
static std::pair< bool, bool > addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo, const LiveRange &Src, const VNInfo *SrcValNo)
Copy segments with value number SrcValNo from liverange Src to live range @Dst and use value number D...
static bool isLiveThrough(const LiveQueryResult Q)
static bool isTerminalReg(Register DstReg, const MachineInstr &Copy, const MachineRegisterInfo *MRI)
Check if DstReg is a terminal node.
static cl::opt< bool > VerifyCoalescing("verify-coalescing", cl::desc("Verify machine instrs before and after register coalescing"), cl::Hidden)
register Register static false bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, Register &Src, Register &Dst, unsigned &SrcSub, unsigned &DstSub)
static cl::opt< bool > EnableJoinSplits("join-splitedges", cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden)
Temporary flag to test critical edge unsplitting.
static cl::opt< bool > EnableJoining("join-liveintervals", cl::desc("Coalesce copies (default=true)"), cl::init(true), cl::Hidden)
static cl::opt< unsigned > LargeIntervalFreqThreshold("large-interval-freq-threshold", cl::Hidden, cl::desc("For a large interval, if it is coalesced with other live " "intervals many times more than the threshold, stop its " "coalescing to control the compile time. "), cl::init(256))
static bool definesFullReg(const MachineInstr &MI, Register Reg)
Returns true if MI defines the full vreg Reg, as opposed to just defining a subregister.
static cl::opt< unsigned > LateRematUpdateThreshold("late-remat-update-threshold", cl::Hidden, cl::desc("During rematerialization for a copy, if the def instruction has " "many other copy uses to be rematerialized, delay the multiple " "separate live interval update work and do them all at once after " "all those rematerialization are done. It will save a lot of " "repeated work. "), cl::init(100))
SI Optimize VGPR LiveRange
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static DenseMap< Register, std::vector< std::pair< SlotIndex, MachineInstr * > > > buildVRegToDbgValueMap(MachineFunction &MF, const LiveIntervals *Liveness)
static void shrinkToUses(LiveInterval &LI, LiveIntervals &LIS)
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addPreservedID(const void *ID)
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
bool test(unsigned Idx) const
Represents analyses that only rely on functions' control flow.
A helper class for register coalescers.
bool flip()
Swap SrcReg and DstReg.
bool isCoalescable(const MachineInstr *) const
Return true if MI is a copy instruction that will become an identity copy after coalescing.
bool setRegisters(const MachineInstr *)
Set registers to match the copy instruction MI.
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool isAsCheapAsAMove(const MachineInstr &MI) const override
A live range for subregisters.
LiveInterval - This class represents the liveness of a register, or stack slot.
LLVM_ABI void removeEmptySubRanges()
Removes all subranges without any segments (subranges without segments are not considered valid and s...
bool hasSubRanges() const
Returns true if subregister liveness information is available.
SubRange * createSubRangeFrom(BumpPtrAllocator &Allocator, LaneBitmask LaneMask, const LiveRange &CopyFrom)
Like createSubRange() but the new range is filled with a copy of the liveness information in CopyFrom...
iterator_range< subrange_iterator > subranges()
LLVM_ABI void refineSubRanges(BumpPtrAllocator &Allocator, LaneBitmask LaneMask, std::function< void(LiveInterval::SubRange &)> Apply, const SlotIndexes &Indexes, const TargetRegisterInfo &TRI, unsigned ComposeSubRegIdx=0)
Refines the subranges to support LaneMask.
LLVM_ABI void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
SubRange * createSubRange(BumpPtrAllocator &Allocator, LaneBitmask LaneMask)
Creates a new empty subregister live range.
LLVM_ABI void clearSubRanges()
Removes all subregister liveness information.
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
LLVM_ABI bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const
Returns true if VNI is killed by any PHI-def values in LI.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
LLVM_ABI bool checkRegMaskInterference(const LiveInterval &LI, BitVector &UsableRegs)
Test if LI is live across any register mask instructions, and compute a bit mask of physical register...
SlotIndexes * getSlotIndexes() const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
VNInfo::Allocator & getVNInfoAllocator()
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveInterval & getInterval(Register Reg)
LLVM_ABI void pruneValue(LiveRange &LR, SlotIndex Kill, SmallVectorImpl< SlotIndex > *EndPoints)
If LR has a live value at Kill, prune its live range by removing any liveness reachable from Kill.
void removeInterval(Register Reg)
Interval removal.
LiveRange & getRegUnit(MCRegUnit Unit)
Return the live range for register unit Unit.
LLVM_ABI MachineBasicBlock * intervalIsInOneMBB(const LiveInterval &LI) const
If LI is confined to a single basic block, return a pointer to that block.
LiveRange * getCachedRegUnit(MCRegUnit Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LLVM_ABI void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos)
Remove value number and related live segments of LI and its subranges that start at position Pos.
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
LLVM_ABI void extendToIndices(LiveRange &LR, ArrayRef< SlotIndex > Indices, ArrayRef< SlotIndex > Undefs)
Extend the live range LR to reach all points in Indices.
LLVM_ABI void dump() const
LLVM_ABI void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Result of a LiveRange query.
VNInfo * valueOutOrDead() const
Returns the value alive at the end of the instruction, if any.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
VNInfo * valueDefined() const
Return the value defined by this instruction, if any.
SlotIndex endPoint() const
Return the end point of the last live range segment to interact with the instruction,...
bool isKill() const
Return true if the live-in value is killed by this instruction.
Callback methods for LiveRangeEdit owners.
SlotIndex rematerializeAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, const Remat &RM, const TargetRegisterInfo &, bool Late=false, unsigned SubIdx=0, MachineInstr *ReplaceIndexMI=nullptr)
rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an instruction into MBB before...
void eliminateDeadDefs(SmallVectorImpl< MachineInstr * > &Dead, ArrayRef< Register > RegsBeingSpilled={})
eliminateDeadDefs - Try to delete machine instructions that are now dead (allDefsAreDead returns true...
This class represents the liveness of a register, stack slot, etc.
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
LLVM_ABI iterator addSegment(Segment S)
Add the specified Segment to this range, merging segments as appropriate.
Segments::iterator iterator
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void join(LiveRange &Other, const int *ValNoAssignments, const int *RHSValNoAssignments, SmallVectorImpl< VNInfo * > &NewVNInfo)
join - Join two live ranges (this, and other) together.
bool liveAt(SlotIndex index) const
LLVM_ABI VNInfo * createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc)
createDeadDef - Make sure the range has a value defined at Def.
LLVM_ABI void removeValNo(VNInfo *ValNo)
removeValNo - Remove all the segments defined by the specified value#.
bool overlaps(const LiveRange &other) const
overlaps - Return true if the intersection of the two live ranges is not empty.
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
bool verify() const
Walk the range and assert if any invariants fail to hold.
LLVM_ABI VNInfo * MergeValueNumberInto(VNInfo *V1, VNInfo *V2)
MergeValueNumberInto - This method is called when two value numbers are found to be equivalent.
unsigned getNumValNums() const
bool containsOneValue() const
iterator FindSegmentContaining(SlotIndex Idx)
Return an iterator to the segment that contains the specified index, or end() if there is none.
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
LLVM_ABI iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
MCRegUnitRootIterator enumerates the root registers of a register unit.
bool isValid() const
Check if the iterator is at the end of the list.
Wrapper class representing physical registers. Should be passed by value.
An RAII based helper class to modify MachineFunctionProperties when running pass.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
LLVM_ABI bool hasEHPadSuccessor() const
bool isEHPad() const
Returns true if the block is a landing pad.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
Analysis pass which computes a MachineDominatorTree.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
bool exposesReturnsTwice() const
exposesReturnsTwice - Returns true if the function calls setjmp or any other similar functions with a...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
DenseMap< unsigned, DebugPHIRegallocPos > DebugPHIPositions
Map of debug instruction numbers to the position of their PHI instructions during register allocation...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
LLVM_ABI std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
bool isDebugInstr() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx=nullptr) const
Given the index of a register def operand, check if the register def is tied to a source operand,...
LLVM_ABI int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
LLVM_ABI bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
LLVM_ABI void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
LLVM_ABI void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, true, false, true > reg_instr_iterator
reg_instr_iterator/reg_instr_begin/reg_instr_end - Walk all defs and uses of the specified register,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
bool isProperSubClass(const TargetRegisterClass *RC) const
isProperSubClass - Returns true if RC has a legal super-class with more allocatable registers.
unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const
getNumAllocatableRegs - Returns the number of actually allocatable registers in RC in the current fun...
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr unsigned id() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
SlotIndex - An opaque wrapper around machine indexes.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
bool isValid() const
Returns true if this is a valid index.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
Returns the basic block which the given index falls in.
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the index past the last valid index in the given basic block.
SlotIndex getNextNonNullIndex(SlotIndex Index)
Returns the next non-null index, if one exists.
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
SlotIndex getIndexBefore(const MachineInstr &MI) const
getIndexBefore - Returns the index of the last indexed instruction before MI, or the start index of i...
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction for the given index, or null if the given index has no instruction associated...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
static const unsigned CommuteAnyOperandIndex
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool enableJoinGlobalCopies() const
True if the subtarget should enable joining global copies.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
VNInfo - Value Number Information.
void markUnused()
Mark this value as unused.
BumpPtrAllocator Allocator
bool isUnused() const
Returns true if this value is unused.
unsigned id
The ID number of this value.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
static bool allUsesAvailableAt(const MachineInstr *MI, SlotIndex UseIdx, const LiveIntervals &LIS, const MachineRegisterInfo &MRI, const TargetInstrInfo &TII)
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
This namespace contains all of the command line option processing machinery.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI char & RegisterCoalescerID
RegisterCoalescer - This pass merges live ranges to eliminate copies.
LLVM_ABI char & MachineDominatorsID
MachineDominators - This pass is a machine dominators analysis pass.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
LLVM_ABI Printable printRegUnit(MCRegUnit Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
auto unique(Range &&R, Predicate P)
auto upper_bound(R &&Range, T &&Value)
Provide wrappers to std::upper_bound which take ranges instead of having to pass begin/end explicitly...
LLVM_ABI void initializeRegisterCoalescerLegacyPass(PassRegistry &)
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ Success
The lock was released successfully.
MutableArrayRef(T &OneElt) -> MutableArrayRef< T >
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
DWARFExpression::Operation Op
auto make_second_range(ContainerTy &&c)
Given a container of pairs, return a range over the second elements.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr LaneBitmask getLane(unsigned Lane)
static constexpr LaneBitmask getAll()
constexpr bool any() const
static constexpr LaneBitmask getNone()
Remat - Information needed to rematerialize at a specific location.
This represents a simple continuous liveness interval for a value.