63#define DEBUG_TYPE "regalloc"
65STATISTIC(numJoins ,
"Number of interval joins performed");
66STATISTIC(numCrossRCs ,
"Number of cross class joins performed");
67STATISTIC(numCommutes ,
"Number of instruction commuting performed");
69STATISTIC(NumReMats ,
"Number of instructions re-materialized");
70STATISTIC(NumInflated ,
"Number of register classes inflated");
71STATISTIC(NumLaneConflicts,
"Number of dead lane conflicts tested");
72STATISTIC(NumLaneResolves,
"Number of dead lane conflicts resolved");
73STATISTIC(NumShrinkToUses,
"Number of shrinkToUses called");
76 cl::desc(
"Coalesce copies (default=true)"),
91 cl::desc(
"Coalesce copies that span blocks (default=subtarget)"),
96 cl::desc(
"Verify machine instrs before and after register coalescing"),
101 cl::desc(
"During rematerialization for a copy, if the def instruction has "
102 "many other copy uses to be rematerialized, delay the multiple "
103 "separate live interval update work and do them all at once after "
104 "all those rematerialization are done. It will save a lot of "
110 cl::desc(
"If the valnos size of an interval is larger than the threshold, "
111 "it is regarded as a large interval. "),
116 cl::desc(
"For a large interval, if it is coalesed with other live "
117 "intervals many times more than the threshold, stop its "
118 "coalescing to control the compile time. "),
153 using DbgValueLoc = std::pair<SlotIndex, MachineInstr*>;
162 bool ShrinkMainRange =
false;
166 bool JoinGlobalCopies =
false;
170 bool JoinSplitEdges =
false;
202 void coalesceLocals();
205 void joinAllIntervals();
220 void lateLiveIntervalUpdate();
225 bool copyValueUndefInPredecessors(
LiveRange &S,
291 std::pair<bool,bool> removeCopyByCommutingDef(
const CoalescerPair &CP,
362 MI->eraseFromParent();
389 MachineFunctionProperties::Property::IsSSA);
403char RegisterCoalescer::ID = 0;
408 "Register Coalescer",
false,
false)
421 Dst =
MI->getOperand(0).getReg();
422 DstSub =
MI->getOperand(0).getSubReg();
423 Src =
MI->getOperand(1).getReg();
424 SrcSub =
MI->getOperand(1).getSubReg();
425 }
else if (
MI->isSubregToReg()) {
426 Dst =
MI->getOperand(0).getReg();
427 DstSub = tri.composeSubRegIndices(
MI->getOperand(0).getSubReg(),
428 MI->getOperand(3).getImm());
429 Src =
MI->getOperand(2).getReg();
430 SrcSub =
MI->getOperand(2).getSubReg();
445 for (
const auto &
MI : *
MBB) {
446 if (!
MI.isCopyLike() && !
MI.isUnconditionalBranch())
456 Flipped = CrossClass =
false;
459 unsigned SrcSub = 0, DstSub = 0;
462 Partial = SrcSub || DstSub;
465 if (Src.isPhysical()) {
466 if (Dst.isPhysical())
475 if (Dst.isPhysical()) {
478 Dst =
TRI.getSubReg(Dst, DstSub);
479 if (!Dst)
return false;
485 Dst =
TRI.getMatchingSuperReg(Dst, SrcSub,
MRI.getRegClass(Src));
486 if (!Dst)
return false;
487 }
else if (!
MRI.getRegClass(Src)->contains(Dst)) {
496 if (SrcSub && DstSub) {
498 if (Src == Dst && SrcSub != DstSub)
501 NewRC =
TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub,
508 NewRC =
TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
512 NewRC =
TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSub);
515 NewRC =
TRI.getCommonSubClass(DstRC, SrcRC);
524 if (DstIdx && !SrcIdx) {
530 CrossClass = NewRC != DstRC || NewRC != SrcRC;
533 assert(Src.isVirtual() &&
"Src must be virtual");
534 assert(!(Dst.isPhysical() && DstSub) &&
"Cannot have a physical SubIdx");
553 unsigned SrcSub = 0, DstSub = 0;
561 }
else if (Src != SrcReg) {
567 if (!Dst.isPhysical())
569 assert(!DstIdx && !SrcIdx &&
"Inconsistent CoalescerPair state.");
572 Dst =
TRI.getSubReg(Dst, DstSub);
575 return DstReg == Dst;
577 return Register(
TRI.getSubReg(DstReg, SrcSub)) == Dst;
583 return TRI.composeSubRegIndices(SrcIdx, SrcSub) ==
584 TRI.composeSubRegIndices(DstIdx, DstSub);
588void RegisterCoalescer::getAnalysisUsage(
AnalysisUsage &AU)
const {
600void RegisterCoalescer::eliminateDeadDefs(
LiveRangeEdit *Edit) {
610void RegisterCoalescer::LRE_WillEraseInstruction(
MachineInstr *
MI) {
615bool RegisterCoalescer::adjustCopiesBackFrom(
const CoalescerPair &CP,
617 assert(!
CP.isPartial() &&
"This doesn't work for partial copies.");
618 assert(!
CP.isPhys() &&
"This doesn't work for physreg copies.");
643 if (BS == IntB.
end())
return false;
644 VNInfo *BValNo = BS->valno;
649 if (BValNo->
def != CopyIdx)
return false;
655 if (AS == IntA.
end())
return false;
656 VNInfo *AValNo = AS->valno;
662 if (!
CP.isCoalescable(ACopyMI) || !ACopyMI->
isFullCopy())
668 if (ValS == IntB.
end())
681 if (ValS+1 != BS)
return false;
685 SlotIndex FillerStart = ValS->end, FillerEnd = BS->start;
689 BValNo->
def = FillerStart;
697 if (BValNo != ValS->valno)
706 S.removeSegment(*SS,
true);
710 if (!S.getVNInfoAt(FillerStart)) {
713 S.extendInBlock(BBStart, FillerStart);
715 VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
718 if (SubBValNo != SubValSNo)
719 S.MergeValueNumberInto(SubBValNo, SubValSNo);
735 bool RecomputeLiveRange = AS->end == CopyIdx;
736 if (!RecomputeLiveRange) {
739 if (SS != S.end() &&
SS->end == CopyIdx) {
740 RecomputeLiveRange =
true;
745 if (RecomputeLiveRange)
752bool RegisterCoalescer::hasOtherReachingDefs(
LiveInterval &IntA,
762 if (ASeg.
valno != AValNo)
continue;
764 if (BI != IntB.
begin())
766 for (; BI != IntB.
end() && ASeg.
end >= BI->start; ++BI) {
767 if (BI->valno == BValNo)
769 if (BI->start <= ASeg.
start && BI->end > ASeg.
start)
771 if (BI->start > ASeg.
start && BI->start < ASeg.
end)
780static std::pair<bool,bool>
783 bool Changed =
false;
784 bool MergedWithDead =
false;
786 if (S.
valno != SrcValNo)
797 MergedWithDead =
true;
800 return std::make_pair(Changed, MergedWithDead);
804RegisterCoalescer::removeCopyByCommutingDef(
const CoalescerPair &CP,
837 assert(BValNo !=
nullptr && BValNo->
def == CopyIdx);
843 return {
false,
false };
846 return {
false,
false };
848 return {
false,
false };
855 return {
false,
false };
867 if (!
TII->findCommutedOpIndices(*
DefMI, UseOpIdx, NewDstIdx))
868 return {
false,
false };
873 return {
false,
false };
877 if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
878 return {
false,
false };
887 if (US == IntA.
end() || US->valno != AValNo)
891 return {
false,
false };
901 TII->commuteInstruction(*
DefMI,
false, UseOpIdx, NewDstIdx);
903 return {
false,
false };
905 !
MRI->constrainRegClass(IntB.
reg(),
MRI->getRegClass(IntA.
reg())))
906 return {
false,
false };
907 if (NewMI !=
DefMI) {
932 UseMO.setReg(NewReg);
937 assert(US != IntA.
end() &&
"Use must be live");
938 if (US->valno != AValNo)
941 UseMO.setIsKill(
false);
943 UseMO.substPhysReg(NewReg, *
TRI);
945 UseMO.setReg(NewReg);
964 VNInfo *SubDVNI = S.getVNInfoAt(DefIdx);
967 VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
969 S.MergeValueNumberInto(SubDVNI, SubBValNo);
977 bool ShrinkB =
false;
991 VNInfo *ASubValNo = SA.getVNInfoAt(AIdx);
1000 MaskA |= SA.LaneMask;
1003 Allocator, SA.LaneMask,
1004 [&Allocator, &SA, CopyIdx, ASubValNo,
1006 VNInfo *BSubValNo = SR.empty() ? SR.getNextValue(CopyIdx, Allocator)
1007 : SR.getVNInfoAt(CopyIdx);
1008 assert(BSubValNo != nullptr);
1009 auto P = addSegmentsWithValNo(SR, BSubValNo, SA, ASubValNo);
1010 ShrinkB |= P.second;
1012 BSubValNo->def = ASubValNo->def;
1020 if ((SB.LaneMask & MaskA).any())
1024 SB.removeSegment(*S,
true);
1028 BValNo->
def = AValNo->
def;
1030 ShrinkB |=
P.second;
1037 return {
true, ShrinkB };
1087bool RegisterCoalescer::removePartialRedundancy(
const CoalescerPair &CP,
1120 bool FoundReverseCopy =
false;
1139 bool ValB_Changed =
false;
1140 for (
auto *VNI : IntB.
valnos) {
1141 if (VNI->isUnused())
1144 ValB_Changed =
true;
1152 FoundReverseCopy =
true;
1156 if (!FoundReverseCopy)
1166 if (CopyLeftBB && CopyLeftBB->
succ_size() > 1)
1177 if (InsPos != CopyLeftBB->
end()) {
1183 LLVM_DEBUG(
dbgs() <<
"\tremovePartialRedundancy: Move the copy to "
1188 TII->get(TargetOpcode::COPY), IntB.
reg())
1199 ErasedInstrs.
erase(NewCopyMI);
1201 LLVM_DEBUG(
dbgs() <<
"\tremovePartialRedundancy: Remove the copy from "
1212 deleteInstr(&CopyMI);
1228 if (!IntB.
liveAt(UseIdx))
1229 MO.setIsUndef(
true);
1239 VNInfo *BValNo = SR.Query(CopyIdx).valueOutOrDead();
1240 assert(BValNo &&
"All sublanes should be live");
1249 for (
unsigned I = 0;
I != EndPoints.
size(); ) {
1251 EndPoints[
I] = EndPoints.
back();
1273 assert(!Reg.isPhysical() &&
"This code cannot handle physreg aliasing");
1276 if (
Op.getReg() != Reg)
1280 if (
Op.getSubReg() == 0 ||
Op.isUndef())
1286bool RegisterCoalescer::reMaterializeTrivialDef(
const CoalescerPair &CP,
1290 Register SrcReg =
CP.isFlipped() ?
CP.getDstReg() :
CP.getSrcReg();
1291 unsigned SrcIdx =
CP.isFlipped() ?
CP.getDstIdx() :
CP.getSrcIdx();
1292 Register DstReg =
CP.isFlipped() ?
CP.getSrcReg() :
CP.getDstReg();
1293 unsigned DstIdx =
CP.isFlipped() ?
CP.getSrcIdx() :
CP.getDstIdx();
1315 LiveRangeEdit Edit(&SrcInt, NewRegs, *MF, *LIS,
nullptr,
this);
1321 bool SawStore =
false;
1338 if (SrcIdx && DstIdx)
1347 unsigned NewDstIdx =
TRI->composeSubRegIndices(
CP.getSrcIdx(),
1350 NewDstReg =
TRI->getSubReg(DstReg, NewDstIdx);
1360 "Only expect to deal with virtual or physical registers");
1386 assert(SrcIdx == 0 &&
CP.isFlipped()
1387 &&
"Shouldn't have SrcIdx+DstIdx at this point");
1390 TRI->getCommonSubClass(DefRC, DstRC);
1391 if (CommonRC !=
nullptr) {
1399 if (MO.isReg() && MO.getReg() == DstReg && MO.getSubReg() == DstIdx) {
1420 assert(MO.
isImplicit() &&
"No explicit operands after implicit operands.");
1423 "unexpected implicit virtual register def");
1429 ErasedInstrs.
insert(CopyMI);
1443 bool NewMIDefinesFullReg =
false;
1453 if (MO.
getReg() == DstReg)
1454 NewMIDefinesFullReg =
true;
1459 ((
TRI->getSubReg(MO.
getReg(), DefSubIdx) ==
1472 assert(!
MRI->shouldTrackSubRegLiveness(DstReg) &&
1473 "subrange update for implicit-def of super register may not be "
1474 "properly handled");
1482 if (DefRC !=
nullptr) {
1484 NewRC =
TRI->getMatchingSuperRegClass(NewRC, DefRC, NewIdx);
1486 NewRC =
TRI->getCommonSubClass(NewRC, DefRC);
1487 assert(NewRC &&
"subreg chosen for remat incompatible with instruction");
1492 SR.LaneMask =
TRI->composeSubRegIndexLaneMask(DstIdx, SR.LaneMask);
1494 MRI->setRegClass(DstReg, NewRC);
1497 updateRegDefsUses(DstReg, DstReg, DstIdx);
1525 if (!SR.liveAt(DefIndex))
1526 SR.createDeadDef(DefIndex,
Alloc);
1527 MaxMask &= ~SR.LaneMask;
1529 if (MaxMask.
any()) {
1547 bool UpdatedSubRanges =
false;
1552 if ((SR.
LaneMask & DstMask).none()) {
1554 <<
"Removing undefined SubRange "
1567 UpdatedSubRanges =
true;
1578 if (UpdatedSubRanges)
1585 "Only expect virtual or physical registers in remat");
1588 if (!NewMIDefinesFullReg) {
1590 CopyDstReg,
true ,
true ,
false ));
1633 if (
MRI->use_nodbg_empty(SrcReg)) {
1639 UseMO.substPhysReg(DstReg, *
TRI);
1641 UseMO.setReg(DstReg);
1650 if (ToBeUpdated.
count(SrcReg))
1653 unsigned NumCopyUses = 0;
1655 if (UseMO.getParent()->isCopyLike())
1661 if (!DeadDefs.
empty())
1662 eliminateDeadDefs(&Edit);
1664 ToBeUpdated.
insert(SrcReg);
1682 unsigned SrcSubIdx = 0, DstSubIdx = 0;
1683 if(!
isMoveInstr(*
TRI, CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
1692 if ((SR.
LaneMask & SrcMask).none())
1705 assert(Seg !=
nullptr &&
"No segment for defining instruction");
1710 if (((V &&
V->isPHIDef()) || (!V && !DstLI.
liveAt(
Idx)))) {
1718 CopyMI->
getOpcode() == TargetOpcode::SUBREG_TO_REG);
1723 CopyMI->
setDesc(
TII->get(TargetOpcode::IMPLICIT_DEF));
1740 if ((SR.
LaneMask & DstMask).none())
1762 if ((SR.
LaneMask & UseMask).none())
1770 isLive = DstLI.
liveAt(UseIdx);
1783 if (MO.
getReg() == DstReg)
1795 bool IsUndef =
true;
1797 if ((S.LaneMask & Mask).none())
1799 if (S.liveAt(UseIdx)) {
1812 ShrinkMainRange =
true;
1821 if (DstInt && DstInt->
hasSubRanges() && DstReg != SrcReg) {
1827 if (
MI.isDebugInstr())
1830 addUndefFlag(*DstInt, UseIdx, MO,
SubReg);
1836 I =
MRI->reg_instr_begin(SrcReg), E =
MRI->reg_instr_end();
1845 if (SrcReg == DstReg && !Visited.
insert(
UseMI).second)
1858 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
1864 if (SubIdx && MO.
isDef())
1869 if (MO.
isUse() && !DstIsPhys) {
1870 unsigned SubUseIdx =
TRI->composeSubRegIndices(SubIdx, MO.
getSubReg());
1871 if (SubUseIdx != 0 &&
MRI->shouldTrackSubRegLiveness(DstReg)) {
1888 addUndefFlag(*DstInt, UseIdx, MO, SubUseIdx);
1899 dbgs() <<
"\t\tupdated: ";
1907bool RegisterCoalescer::canJoinPhys(
const CoalescerPair &CP) {
1911 if (!
MRI->isReserved(
CP.getDstReg())) {
1912 LLVM_DEBUG(
dbgs() <<
"\tCan only merge into reserved registers.\n");
1921 dbgs() <<
"\tCannot join complex intervals into reserved register.\n");
1925bool RegisterCoalescer::copyValueUndefInPredecessors(
1939void RegisterCoalescer::setUndefOnPrunedSubRegUses(
LiveInterval &LI,
1946 if (SubRegIdx == 0 || MO.
isUndef())
1952 if (!S.
liveAt(Pos) && (PrunedLanes & SubRegMask).any()) {
1968bool RegisterCoalescer::joinCopy(
1975 if (!
CP.setRegisters(CopyMI)) {
1980 if (
CP.getNewRC()) {
1981 auto SrcRC =
MRI->getRegClass(
CP.getSrcReg());
1982 auto DstRC =
MRI->getRegClass(
CP.getDstReg());
1983 unsigned SrcIdx =
CP.getSrcIdx();
1984 unsigned DstIdx =
CP.getDstIdx();
1985 if (
CP.isFlipped()) {
1989 if (!
TRI->shouldCoalesce(CopyMI, SrcRC, SrcIdx, DstRC, DstIdx,
1990 CP.getNewRC(), *LIS)) {
2002 eliminateDeadDefs();
2009 if (
MachineInstr *UndefMI = eliminateUndefCopy(CopyMI)) {
2010 if (UndefMI->isImplicitDef())
2012 deleteInstr(CopyMI);
2020 if (
CP.getSrcReg() ==
CP.getDstReg()) {
2022 LLVM_DEBUG(
dbgs() <<
"\tCopy already coalesced: " << LI <<
'\n');
2027 assert(ReadVNI &&
"No value before copy and no <undef> flag.");
2028 assert(ReadVNI != DefVNI &&
"Cannot read and define the same value.");
2043 if (copyValueUndefInPredecessors(S,
MBB, SLRQ)) {
2044 LLVM_DEBUG(
dbgs() <<
"Incoming sublane value is undef at copy\n");
2045 PrunedLanes |= S.LaneMask;
2052 if (PrunedLanes.
any()) {
2054 << PrunedLanes <<
'\n');
2055 setUndefOnPrunedSubRegUses(LI,
CP.getSrcReg(), PrunedLanes);
2060 deleteInstr(CopyMI);
2069 if (!canJoinPhys(CP)) {
2072 bool IsDefCopy =
false;
2073 if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
2086 dbgs() <<
"\tConsidering merging to "
2087 <<
TRI->getRegClassName(
CP.getNewRC()) <<
" with ";
2088 if (
CP.getDstIdx() &&
CP.getSrcIdx())
2090 <<
TRI->getSubRegIndexName(
CP.getDstIdx()) <<
" and "
2092 <<
TRI->getSubRegIndexName(
CP.getSrcIdx()) <<
'\n';
2100 ShrinkMainRange =
false;
2106 if (!joinIntervals(CP)) {
2111 bool IsDefCopy =
false;
2112 if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
2117 if (!
CP.isPartial() && !
CP.isPhys()) {
2118 bool Changed = adjustCopiesBackFrom(CP, CopyMI);
2119 bool Shrink =
false;
2121 std::tie(Changed, Shrink) = removeCopyByCommutingDef(CP, CopyMI);
2123 deleteInstr(CopyMI);
2125 Register DstReg =
CP.isFlipped() ?
CP.getSrcReg() :
CP.getDstReg();
2137 if (!
CP.isPartial() && !
CP.isPhys())
2138 if (removePartialRedundancy(CP, *CopyMI))
2149 if (
CP.isCrossClass()) {
2151 MRI->setRegClass(
CP.getDstReg(),
CP.getNewRC());
2162 if (ErasedInstrs.
erase(CopyMI))
2164 CurrentErasedInstrs.
insert(CopyMI);
2169 updateRegDefsUses(
CP.getDstReg(),
CP.getDstReg(),
CP.getDstIdx());
2170 updateRegDefsUses(
CP.getSrcReg(),
CP.getDstReg(),
CP.getSrcIdx());
2173 if (ShrinkMask.
any()) {
2176 if ((S.LaneMask & ShrinkMask).none())
2181 ShrinkMainRange =
true;
2189 if (ToBeUpdated.
count(
CP.getSrcReg()))
2190 ShrinkMainRange =
true;
2192 if (ShrinkMainRange) {
2202 TRI->updateRegAllocHint(
CP.getSrcReg(),
CP.getDstReg(), *MF);
2207 dbgs() <<
"\tResult = ";
2219bool RegisterCoalescer::joinReservedPhysReg(
CoalescerPair &CP) {
2222 assert(
CP.isPhys() &&
"Must be a physreg copy");
2223 assert(
MRI->isReserved(DstReg) &&
"Not a reserved register");
2227 assert(
RHS.containsOneValue() &&
"Invalid join with reserved register");
2236 if (!
MRI->isConstantPhysReg(DstReg)) {
2240 if (!
MRI->isReserved(*RI))
2253 !RegMaskUsable.
test(DstReg)) {
2266 if (
CP.isFlipped()) {
2274 CopyMI =
MRI->getVRegDef(SrcReg);
2275 deleteInstr(CopyMI);
2284 if (!
MRI->hasOneNonDBGUse(SrcReg)) {
2295 CopyMI = &*
MRI->use_instr_nodbg_begin(SrcReg);
2299 if (!
MRI->isConstantPhysReg(DstReg)) {
2307 if (
MI->readsRegister(DstReg,
TRI)) {
2317 <<
printReg(DstReg,
TRI) <<
" at " << CopyRegIdx <<
"\n");
2320 deleteInstr(CopyMI);
2330 MRI->clearKillFlags(
CP.getSrcReg());
2415 const unsigned SubIdx;
2423 const bool SubRangeJoin;
2426 const bool TrackSubRegLiveness;
2442 enum ConflictResolution {
2474 ConflictResolution Resolution = CR_Keep;
2484 VNInfo *RedefVNI =
nullptr;
2487 VNInfo *OtherVNI =
nullptr;
2500 bool ErasableImplicitDef =
false;
2504 bool Pruned =
false;
2507 bool PrunedComputed =
false;
2514 bool Identical =
false;
2518 bool isAnalyzed()
const {
return WriteLanes.
any(); }
2525 ErasableImplicitDef =
false;
2539 std::pair<const VNInfo *, Register> followCopyChain(
const VNInfo *VNI)
const;
2541 bool valuesIdentical(
VNInfo *Value0,
VNInfo *Value1,
const JoinVals &
Other)
const;
2550 ConflictResolution analyzeValue(
unsigned ValNo, JoinVals &
Other);
2555 void computeAssignment(
unsigned ValNo, JoinVals &
Other);
2586 bool isPrunedValue(
unsigned ValNo, JoinVals &
Other);
2592 bool TrackSubRegLiveness)
2593 : LR(LR),
Reg(
Reg), SubIdx(SubIdx), LaneMask(LaneMask),
2594 SubRangeJoin(SubRangeJoin), TrackSubRegLiveness(TrackSubRegLiveness),
2595 NewVNInfo(newVNInfo),
CP(cp), LIS(lis), Indexes(LIS->getSlotIndexes()),
2596 TRI(
TRI), Assignments(LR.getNumValNums(), -1),
2597 Vals(LR.getNumValNums()) {}
2601 bool mapValues(JoinVals &
Other);
2605 bool resolveConflicts(JoinVals &
Other);
2625 void pruneMainSegments(
LiveInterval &LI,
bool &ShrinkMainRange);
2636 void removeImplicitDefs();
2639 const int *getAssignments()
const {
return Assignments.
data(); }
2642 ConflictResolution getResolution(
unsigned Num)
const {
2643 return Vals[Num].Resolution;
2655 L |=
TRI->getSubRegIndexLaneMask(
2663std::pair<const VNInfo *, Register>
2664JoinVals::followCopyChain(
const VNInfo *VNI)
const {
2670 assert(
MI &&
"No defining instruction");
2671 if (!
MI->isFullCopy())
2672 return std::make_pair(VNI, TrackReg);
2673 Register SrcReg =
MI->getOperand(1).getReg();
2675 return std::make_pair(VNI, TrackReg);
2689 LaneBitmask SMask =
TRI->composeSubRegIndexLaneMask(SubIdx, S.LaneMask);
2690 if ((SMask & LaneMask).
none())
2698 return std::make_pair(VNI, TrackReg);
2701 if (ValueIn ==
nullptr) {
2708 return std::make_pair(
nullptr, SrcReg);
2713 return std::make_pair(VNI, TrackReg);
2716bool JoinVals::valuesIdentical(
VNInfo *Value0,
VNInfo *Value1,
2717 const JoinVals &
Other)
const {
2720 std::tie(Orig0, Reg0) = followCopyChain(Value0);
2721 if (Orig0 == Value1 && Reg0 ==
Other.Reg)
2726 std::tie(Orig1, Reg1) =
Other.followCopyChain(Value1);
2730 if (Orig0 ==
nullptr || Orig1 ==
nullptr)
2731 return Orig0 == Orig1 && Reg0 == Reg1;
2737 return Orig0->
def == Orig1->
def && Reg0 == Reg1;
2740JoinVals::ConflictResolution
2741JoinVals::analyzeValue(
unsigned ValNo, JoinVals &
Other) {
2742 Val &
V = Vals[ValNo];
2743 assert(!
V.isAnalyzed() &&
"Value has already been analyzed!");
2755 :
TRI->getSubRegIndexLaneMask(SubIdx);
2756 V.ValidLanes =
V.WriteLanes = Lanes;
2765 V.ErasableImplicitDef =
true;
2769 V.ValidLanes =
V.WriteLanes = computeWriteLanes(
DefMI, Redef);
2788 assert((TrackSubRegLiveness ||
V.RedefVNI) &&
2789 "Instruction is reading nonexistent value");
2790 if (
V.RedefVNI !=
nullptr) {
2791 computeAssignment(
V.RedefVNI->id,
Other);
2792 V.ValidLanes |= Vals[
V.RedefVNI->id].ValidLanes;
2804 V.ErasableImplicitDef =
true;
2821 if (OtherVNI->
def < VNI->
def)
2822 Other.computeAssignment(OtherVNI->
id, *
this);
2827 return CR_Impossible;
2829 V.OtherVNI = OtherVNI;
2830 Val &OtherV =
Other.Vals[OtherVNI->
id];
2834 if (!OtherV.isAnalyzed() ||
Other.Assignments[OtherVNI->
id] == -1)
2841 if ((
V.ValidLanes & OtherV.ValidLanes).any())
2843 return CR_Impossible;
2858 Other.computeAssignment(
V.OtherVNI->id, *
this);
2859 Val &OtherV =
Other.Vals[
V.OtherVNI->id];
2861 if (OtherV.ErasableImplicitDef) {
2881 <<
", keeping it.\n");
2882 OtherV.mustKeepImplicitDef(*
TRI, *OtherImpDef);
2889 dbgs() <<
"IMPLICIT_DEF defined at " <<
V.OtherVNI->def
2890 <<
" may be live into EH pad successors, keeping it.\n");
2891 OtherV.mustKeepImplicitDef(*
TRI, *OtherImpDef);
2894 OtherV.ValidLanes &= ~OtherV.WriteLanes;
2909 if (
CP.isCoalescable(
DefMI)) {
2912 V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes;
2927 valuesIdentical(VNI,
V.OtherVNI,
Other)) {
2950 if ((
V.WriteLanes & OtherV.ValidLanes).none())
2963 "Only early clobber defs can overlap a kill");
2964 return CR_Impossible;
2971 if ((
TRI->getSubRegIndexLaneMask(
Other.SubIdx) & ~
V.WriteLanes).none())
2972 return CR_Impossible;
2974 if (TrackSubRegLiveness) {
2979 if (!OtherLI.hasSubRanges()) {
2981 return (OtherMask &
V.WriteLanes).none() ? CR_Replace : CR_Impossible;
2989 TRI->composeSubRegIndexLaneMask(
Other.SubIdx, OtherSR.LaneMask);
2990 if ((OtherMask &
V.WriteLanes).none())
2993 auto OtherSRQ = OtherSR.Query(VNI->
def);
2994 if (OtherSRQ.valueIn() && OtherSRQ.endPoint() > VNI->
def) {
2996 return CR_Impossible;
3009 return CR_Impossible;
3018 return CR_Unresolved;
3021void JoinVals::computeAssignment(
unsigned ValNo, JoinVals &
Other) {
3022 Val &
V = Vals[ValNo];
3023 if (
V.isAnalyzed()) {
3026 assert(Assignments[ValNo] != -1 &&
"Bad recursion?");
3029 switch ((
V.Resolution = analyzeValue(ValNo,
Other))) {
3033 assert(
V.OtherVNI &&
"OtherVNI not assigned, can't merge.");
3034 assert(
Other.Vals[
V.OtherVNI->id].isAnalyzed() &&
"Missing recursion");
3035 Assignments[ValNo] =
Other.Assignments[
V.OtherVNI->id];
3039 <<
V.OtherVNI->def <<
" --> @"
3040 << NewVNInfo[Assignments[ValNo]]->def <<
'\n');
3043 case CR_Unresolved: {
3045 assert(
V.OtherVNI &&
"OtherVNI not assigned, can't prune");
3046 Val &OtherV =
Other.Vals[
V.OtherVNI->id];
3047 OtherV.Pruned =
true;
3052 Assignments[ValNo] = NewVNInfo.
size();
3058bool JoinVals::mapValues(JoinVals &
Other) {
3060 computeAssignment(i,
Other);
3061 if (Vals[i].Resolution == CR_Impossible) {
3079 assert(OtherI !=
Other.LR.end() &&
"No conflict?");
3084 if (
End >= MBBEnd) {
3086 << OtherI->valno->id <<
'@' << OtherI->start <<
'\n');
3090 << OtherI->valno->id <<
'@' << OtherI->start <<
" to "
3095 TaintExtent.push_back(std::make_pair(
End, TaintedLanes));
3098 if (++OtherI ==
Other.LR.end() || OtherI->start >= MBBEnd)
3102 const Val &OV =
Other.Vals[OtherI->valno->id];
3103 TaintedLanes &= ~OV.WriteLanes;
3106 }
while (TaintedLanes.
any());
3112 if (
MI.isDebugOrPseudoInstr())
3119 unsigned S =
TRI->composeSubRegIndices(SubIdx, MO.
getSubReg());
3120 if ((Lanes &
TRI->getSubRegIndexLaneMask(S)).any())
3126bool JoinVals::resolveConflicts(JoinVals &
Other) {
3129 assert(
V.Resolution != CR_Impossible &&
"Unresolvable conflict");
3130 if (
V.Resolution != CR_Unresolved)
3139 assert(
V.OtherVNI &&
"Inconsistent conflict resolution.");
3141 const Val &OtherV =
Other.Vals[
V.OtherVNI->id];
3146 LaneBitmask TaintedLanes =
V.WriteLanes & OtherV.ValidLanes;
3148 if (!taintExtent(i, TaintedLanes,
Other, TaintExtent))
3152 assert(!TaintExtent.
empty() &&
"There should be at least one conflict.");
3165 "Interference ends on VNI->def. Should have been handled earlier");
3168 assert(LastMI &&
"Range must end at a proper instruction");
3169 unsigned TaintNum = 0;
3172 if (usesLanes(*
MI,
Other.Reg,
Other.SubIdx, TaintedLanes)) {
3177 if (&*
MI == LastMI) {
3178 if (++TaintNum == TaintExtent.
size())
3181 assert(LastMI &&
"Range must end at a proper instruction");
3182 TaintedLanes = TaintExtent[TaintNum].second;
3188 V.Resolution = CR_Replace;
3194bool JoinVals::isPrunedValue(
unsigned ValNo, JoinVals &
Other) {
3195 Val &
V = Vals[ValNo];
3196 if (
V.Pruned ||
V.PrunedComputed)
3199 if (
V.Resolution != CR_Erase &&
V.Resolution != CR_Merge)
3204 V.PrunedComputed =
true;
3205 V.Pruned =
Other.isPrunedValue(
V.OtherVNI->id, *
this);
3209void JoinVals::pruneValues(JoinVals &
Other,
3211 bool changeInstrs) {
3214 switch (Vals[i].Resolution) {
3224 Val &OtherV =
Other.Vals[Vals[i].OtherVNI->id];
3225 bool EraseImpDef = OtherV.ErasableImplicitDef &&
3226 OtherV.Resolution == CR_Keep;
3227 if (!
Def.isBlock()) {
3247 <<
": " <<
Other.LR <<
'\n');
3252 if (isPrunedValue(i,
Other)) {
3259 << Def <<
": " << LR <<
'\n');
3317 bool DidPrune =
false;
3322 if (
V.Resolution != CR_Erase &&
3323 (
V.Resolution != CR_Keep || !
V.ErasableImplicitDef || !
V.Pruned))
3330 OtherDef =
V.OtherVNI->def;
3333 LLVM_DEBUG(
dbgs() <<
"\t\tExpecting instruction removal at " << Def
3341 if (ValueOut !=
nullptr && (Q.
valueIn() ==
nullptr ||
3342 (
V.Identical &&
V.Resolution == CR_Erase &&
3343 ValueOut->
def == Def))) {
3345 <<
" at " << Def <<
"\n");
3352 if (
V.Identical && S.Query(OtherDef).valueOutOrDead()) {
3362 ShrinkMask |= S.LaneMask;
3376 ShrinkMask |= S.LaneMask;
3388 if (VNI->
def == Def)
3394void JoinVals::pruneMainSegments(
LiveInterval &LI,
bool &ShrinkMainRange) {
3398 if (Vals[i].Resolution != CR_Keep)
3403 Vals[i].Pruned =
true;
3404 ShrinkMainRange =
true;
3408void JoinVals::removeImplicitDefs() {
3411 if (
V.Resolution != CR_Keep || !
V.ErasableImplicitDef || !
V.Pruned)
3427 switch (Vals[i].Resolution) {
3432 if (!Vals[i].ErasableImplicitDef || !Vals[i].Pruned)
3444 if (LI !=
nullptr) {
3469 ED = ED.
isValid() ? std::min(ED,
I->start) :
I->start;
3471 LE =
LE.isValid() ? std::max(LE,
I->end) :
I->
end;
3474 NewEnd = std::min(NewEnd, LE);
3476 NewEnd = std::min(NewEnd, ED);
3482 if (S != LR.
begin())
3483 std::prev(S)->end = NewEnd;
3487 dbgs() <<
"\t\tremoved " << i <<
'@' <<
Def <<
": " << LR <<
'\n';
3489 dbgs() <<
"\t\t LHS = " << *LI <<
'\n';
3496 assert(
MI &&
"No instruction to erase");
3499 if (
Reg.isVirtual() && Reg !=
CP.getSrcReg() && Reg !=
CP.getDstReg())
3505 MI->eraseFromParent();
3518 JoinVals RHSVals(RRange,
CP.getSrcReg(),
CP.getSrcIdx(), LaneMask,
3519 NewVNInfo, CP, LIS,
TRI,
true,
true);
3520 JoinVals LHSVals(LRange,
CP.getDstReg(),
CP.getDstIdx(), LaneMask,
3521 NewVNInfo, CP, LIS,
TRI,
true,
true);
3528 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals)) {
3533 if (!LHSVals.resolveConflicts(RHSVals) ||
3534 !RHSVals.resolveConflicts(LHSVals)) {
3545 LHSVals.pruneValues(RHSVals, EndPoints,
false);
3546 RHSVals.pruneValues(LHSVals, EndPoints,
false);
3548 LHSVals.removeImplicitDefs();
3549 RHSVals.removeImplicitDefs();
3555 LRange.
join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(),
3559 <<
' ' << LRange <<
"\n");
3560 if (EndPoints.
empty())
3566 dbgs() <<
"\t\trestoring liveness to " << EndPoints.
size() <<
" points: ";
3567 for (
unsigned i = 0, n = EndPoints.
size(); i != n; ++i) {
3568 dbgs() << EndPoints[i];
3572 dbgs() <<
": " << LRange <<
'\n';
3577void RegisterCoalescer::mergeSubRangeInto(
LiveInterval &LI,
3581 unsigned ComposeSubRegIdx) {
3584 Allocator, LaneMask,
3587 SR.assign(ToMerge, Allocator);
3590 LiveRange RangeCopy(ToMerge, Allocator);
3591 joinSubRegRanges(SR, RangeCopy, SR.LaneMask, CP);
3597bool RegisterCoalescer::isHighCostLiveInterval(
LiveInterval &LI) {
3600 auto &Counter = LargeLIVisitCounter[LI.
reg()];
3612 bool TrackSubRegLiveness =
MRI->shouldTrackSubRegLiveness(*
CP.getNewRC());
3614 NewVNInfo, CP, LIS,
TRI,
false, TrackSubRegLiveness);
3616 NewVNInfo, CP, LIS,
TRI,
false, TrackSubRegLiveness);
3618 LLVM_DEBUG(
dbgs() <<
"\t\tRHS = " << RHS <<
"\n\t\tLHS = " << LHS <<
'\n');
3620 if (isHighCostLiveInterval(LHS) || isHighCostLiveInterval(RHS))
3625 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals))
3629 if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals))
3633 if (
RHS.hasSubRanges() ||
LHS.hasSubRanges()) {
3638 unsigned DstIdx =
CP.getDstIdx();
3639 if (!
LHS.hasSubRanges()) {
3641 :
TRI->getSubRegIndexLaneMask(DstIdx);
3644 LHS.createSubRangeFrom(Allocator, Mask, LHS);
3645 }
else if (DstIdx != 0) {
3656 unsigned SrcIdx =
CP.getSrcIdx();
3657 if (!
RHS.hasSubRanges()) {
3659 :
TRI->getSubRegIndexLaneMask(SrcIdx);
3660 mergeSubRangeInto(LHS, RHS, Mask, CP, DstIdx);
3665 mergeSubRangeInto(LHS, R, Mask, CP, DstIdx);
3672 LHSVals.pruneMainSegments(LHS, ShrinkMainRange);
3674 LHSVals.pruneSubRegValues(LHS, ShrinkMask);
3675 RHSVals.pruneSubRegValues(LHS, ShrinkMask);
3683 LHSVals.pruneValues(RHSVals, EndPoints,
true);
3684 RHSVals.pruneValues(LHSVals, EndPoints,
true);
3689 LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs, &LHS);
3690 RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
3691 while (!ShrinkRegs.
empty())
3695 checkMergingChangesDbgValues(CP, LHS, LHSVals, RHS, RHSVals);
3699 auto RegIt = RegToPHIIdx.
find(
CP.getSrcReg());
3700 if (RegIt != RegToPHIIdx.
end()) {
3702 for (
unsigned InstID : RegIt->second) {
3703 auto PHIIt = PHIValToPos.
find(InstID);
3708 auto LII =
RHS.find(SI);
3709 if (LII ==
RHS.end() || LII->start > SI)
3724 if (
CP.getSrcIdx() != 0 ||
CP.getDstIdx() != 0)
3727 if (PHIIt->second.SubReg && PHIIt->second.SubReg !=
CP.getSrcIdx())
3731 PHIIt->second.Reg =
CP.getDstReg();
3735 if (
CP.getSrcIdx() != 0)
3736 PHIIt->second.SubReg =
CP.getSrcIdx();
3742 auto InstrNums = RegIt->second;
3743 RegToPHIIdx.
erase(RegIt);
3747 RegIt = RegToPHIIdx.
find(
CP.getDstReg());
3748 if (RegIt != RegToPHIIdx.
end())
3749 RegIt->second.insert(RegIt->second.end(), InstrNums.begin(),
3752 RegToPHIIdx.
insert({
CP.getDstReg(), InstrNums});
3756 LHS.join(RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo);
3761 MRI->clearKillFlags(
LHS.reg());
3762 MRI->clearKillFlags(
RHS.reg());
3764 if (!EndPoints.
empty()) {
3768 dbgs() <<
"\t\trestoring liveness to " << EndPoints.
size() <<
" points: ";
3769 for (
unsigned i = 0, n = EndPoints.
size(); i != n; ++i) {
3770 dbgs() << EndPoints[i];
3774 dbgs() <<
": " <<
LHS <<
'\n';
3783 return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(
CP);
3794 for (
auto *
X : ToInsert) {
3795 for (
const auto &
Op :
X->debug_operands()) {
3796 if (
Op.isReg() &&
Op.getReg().isVirtual())
3807 for (
auto &
MBB : MF) {
3810 for (
auto &
MI :
MBB) {
3811 if (
MI.isDebugValue()) {
3813 return MO.isReg() && MO.getReg().isVirtual();
3815 ToInsert.push_back(&
MI);
3816 }
else if (!
MI.isDebugOrPseudoInstr()) {
3818 CloseNewDVRange(CurrentSlot);
3827 for (
auto &Pair : DbgVRegToValues)
3831void RegisterCoalescer::checkMergingChangesDbgValues(
CoalescerPair &CP,
3835 JoinVals &RHSVals) {
3837 checkMergingChangesDbgValuesImpl(Reg, RHS, LHS, LHSVals);
3841 checkMergingChangesDbgValuesImpl(Reg, LHS, RHS, RHSVals);
3845 ScanForSrcReg(
CP.getSrcReg());
3846 ScanForDstReg(
CP.getDstReg());
3849void RegisterCoalescer::checkMergingChangesDbgValuesImpl(
Register Reg,
3852 JoinVals &RegVals) {
3854 auto VRegMapIt = DbgVRegToValues.
find(Reg);
3855 if (VRegMapIt == DbgVRegToValues.
end())
3858 auto &DbgValueSet = VRegMapIt->second;
3859 auto DbgValueSetIt = DbgValueSet.begin();
3860 auto SegmentIt = OtherLR.
begin();
3862 bool LastUndefResult =
false;
3867 auto ShouldUndef = [&RegVals, &
RegLR, &LastUndefResult,
3872 if (LastUndefIdx ==
Idx)
3873 return LastUndefResult;
3880 if (OtherIt ==
RegLR.end())
3889 auto Resolution = RegVals.getResolution(OtherIt->valno->id);
3890 LastUndefResult = Resolution != JoinVals::CR_Keep &&
3891 Resolution != JoinVals::CR_Erase;
3893 return LastUndefResult;
3899 while (DbgValueSetIt != DbgValueSet.end() && SegmentIt != OtherLR.
end()) {
3900 if (DbgValueSetIt->first < SegmentIt->end) {
3903 if (DbgValueSetIt->first >= SegmentIt->start) {
3904 bool HasReg = DbgValueSetIt->second->hasDebugOperandForReg(Reg);
3905 bool ShouldUndefReg = ShouldUndef(DbgValueSetIt->first);
3906 if (HasReg && ShouldUndefReg) {
3908 DbgValueSetIt->second->setDebugValueUndef();
3922struct MBBPriorityInfo {
3928 :
MBB(mbb),
Depth(depth), IsSplit(issplit) {}
3938 const MBBPriorityInfo *RHS) {
3940 if (
LHS->Depth !=
RHS->Depth)
3941 return LHS->Depth >
RHS->Depth ? -1 : 1;
3944 if (
LHS->IsSplit !=
RHS->IsSplit)
3945 return LHS->IsSplit ? -1 : 1;
3949 unsigned cl =
LHS->MBB->pred_size() +
LHS->MBB->succ_size();
3950 unsigned cr =
RHS->MBB->pred_size() +
RHS->MBB->succ_size();
3952 return cl > cr ? -1 : 1;
3955 return LHS->MBB->getNumber() <
RHS->MBB->getNumber() ? -1 : 1;
3960 if (!Copy->isCopy())
3963 if (Copy->getOperand(1).isUndef())
3966 Register SrcReg = Copy->getOperand(1).getReg();
3967 Register DstReg = Copy->getOperand(0).getReg();
3975void RegisterCoalescer::lateLiveIntervalUpdate() {
3981 if (!DeadDefs.
empty())
3982 eliminateDeadDefs();
3984 ToBeUpdated.clear();
3987bool RegisterCoalescer::
3989 bool Progress =
false;
4001 bool Success = joinCopy(
MI, Again, CurrentErasedInstrs);
4007 if (!CurrentErasedInstrs.
empty()) {
4009 if (
MI && CurrentErasedInstrs.
count(
MI))
4013 if (
MI && CurrentErasedInstrs.
count(
MI))
4024 assert(Copy.isCopyLike());
4027 if (&
MI != &Copy &&
MI.isCopyLike())
4032bool RegisterCoalescer::applyTerminalRule(
const MachineInstr &Copy)
const {
4037 unsigned SrcSubReg = 0, DstSubReg = 0;
4038 if (!
isMoveInstr(*
TRI, &Copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
4059 if (&
MI == &Copy || !
MI.isCopyLike() ||
MI.getParent() != OrigBB)
4062 unsigned OtherSrcSubReg = 0, OtherSubReg = 0;
4063 if (!
isMoveInstr(*
TRI, &Copy, OtherSrcReg, OtherReg, OtherSrcSubReg,
4066 if (OtherReg == SrcReg)
4067 OtherReg = OtherSrcReg;
4087 const unsigned PrevSize = WorkList.
size();
4088 if (JoinGlobalCopies) {
4096 if (!
MI.isCopyLike())
4098 bool ApplyTerminalRule = applyTerminalRule(
MI);
4100 if (ApplyTerminalRule)
4105 if (ApplyTerminalRule)
4112 LocalWorkList.
append(LocalTerminals.
begin(), LocalTerminals.
end());
4118 if (MII.isCopyLike()) {
4119 if (applyTerminalRule(MII))
4131 CurrList(WorkList.
begin() + PrevSize, WorkList.
end());
4132 if (copyCoalesceWorkList(CurrList))
4133 WorkList.
erase(std::remove(WorkList.
begin() + PrevSize, WorkList.
end(),
4134 nullptr), WorkList.
end());
4137void RegisterCoalescer::coalesceLocals() {
4138 copyCoalesceWorkList(LocalWorkList);
4139 for (
unsigned j = 0, je = LocalWorkList.
size(); j != je; ++j) {
4140 if (LocalWorkList[j])
4143 LocalWorkList.
clear();
4146void RegisterCoalescer::joinAllIntervals() {
4147 LLVM_DEBUG(
dbgs() <<
"********** JOINING INTERVALS ***********\n");
4148 assert(WorkList.
empty() && LocalWorkList.
empty() &&
"Old data still around.");
4150 std::vector<MBBPriorityInfo> MBBs;
4151 MBBs.reserve(MF->size());
4153 MBBs.push_back(MBBPriorityInfo(&
MBB,
Loops->getLoopDepth(&
MBB),
4159 unsigned CurrDepth = std::numeric_limits<unsigned>::max();
4160 for (MBBPriorityInfo &
MBB : MBBs) {
4162 if (JoinGlobalCopies &&
MBB.Depth < CurrDepth) {
4164 CurrDepth =
MBB.Depth;
4166 copyCoalesceInMBB(
MBB.MBB);
4168 lateLiveIntervalUpdate();
4173 while (copyCoalesceWorkList(WorkList))
4175 lateLiveIntervalUpdate();
4178void RegisterCoalescer::releaseMemory() {
4179 ErasedInstrs.
clear();
4182 InflateRegs.
clear();
4183 LargeLIVisitCounter.
clear();
4187 LLVM_DEBUG(
dbgs() <<
"********** REGISTER COALESCER **********\n"
4188 <<
"********** Function: " << fn.
getName() <<
'\n');
4200 dbgs() <<
"* Skipped as it exposes functions that returns twice.\n");
4209 LIS = &getAnalysis<LiveIntervals>();
4210 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4211 Loops = &getAnalysis<MachineLoopInfo>();
4220 for (
const auto &DebugPHI : MF->DebugPHIPositions) {
4223 unsigned SubReg = DebugPHI.second.SubReg;
4226 PHIValToPos.
insert(std::make_pair(DebugPHI.first,
P));
4227 RegToPHIIdx[
Reg].push_back(DebugPHI.first);
4236 MF->verify(
this,
"Before register coalescing");
4238 DbgVRegToValues.
clear();
4251 InflateRegs.
erase(std::unique(InflateRegs.
begin(), InflateRegs.
end()),
4256 if (
MRI->reg_nodbg_empty(Reg))
4258 if (
MRI->recomputeRegClass(Reg)) {
4260 <<
TRI->getRegClassName(
MRI->getRegClass(Reg)) <<
'\n');
4267 if (!
MRI->shouldTrackSubRegLiveness(Reg)) {
4275 assert((S.LaneMask & ~MaxMask).none());
4285 for (
auto &p : MF->DebugPHIPositions) {
4286 auto it = PHIValToPos.
find(
p.first);
4288 p.second.Reg = it->second.Reg;
4289 p.second.SubReg = it->second.SubReg;
4292 PHIValToPos.
clear();
4293 RegToPHIIdx.
clear();
4297 MF->verify(
this,
"After register coalescing");
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseSet and SmallDenseSet classes.
std::optional< std::vector< StOtherPiece > > Other
SmallVector< uint32_t, 0 > Writes
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
A common definition of LaneBitmask for use in TableGen and CodeGen.
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static cl::opt< cl::boolOrDefault > EnableGlobalCopies("join-globalcopies", cl::desc("Coalesce copies that span blocks (default=subtarget)"), cl::init(cl::BOU_UNSET), cl::Hidden)
Temporary flag to test global copy optimization.
static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS)
static bool isSplitEdge(const MachineBasicBlock *MBB)
Return true if this block should be vacated by the coalescer to eliminate branches.
static int compareMBBPriority(const MBBPriorityInfo *LHS, const MBBPriorityInfo *RHS)
C-style comparator that sorts first based on the loop depth of the basic block (the unsigned),...
register Register Coalescer
static cl::opt< unsigned > LargeIntervalSizeThreshold("large-interval-size-threshold", cl::Hidden, cl::desc("If the valnos size of an interval is larger than the threshold, " "it is regarded as a large interval. "), cl::init(100))
static bool isDefInSubRange(LiveInterval &LI, SlotIndex Def)
Check if any of the subranges of LI contain a definition at Def.
static cl::opt< unsigned > LargeIntervalFreqThreshold("large-interval-freq-threshold", cl::Hidden, cl::desc("For a large interval, if it is coalesed with other live " "intervals many times more than the threshold, stop its " "coalescing to control the compile time. "), cl::init(256))
static std::pair< bool, bool > addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo, const LiveRange &Src, const VNInfo *SrcValNo)
Copy segments with value number SrcValNo from liverange Src to live range @Dst and use value number D...
static bool isLiveThrough(const LiveQueryResult Q)
static bool isTerminalReg(Register DstReg, const MachineInstr &Copy, const MachineRegisterInfo *MRI)
Check if DstReg is a terminal node.
static cl::opt< bool > VerifyCoalescing("verify-coalescing", cl::desc("Verify machine instrs before and after register coalescing"), cl::Hidden)
register Register static false bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, Register &Src, Register &Dst, unsigned &SrcSub, unsigned &DstSub)
static cl::opt< bool > EnableJoinSplits("join-splitedges", cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden)
Temporary flag to test critical edge unsplitting.
static cl::opt< bool > EnableJoining("join-liveintervals", cl::desc("Coalesce copies (default=true)"), cl::init(true), cl::Hidden)
static bool definesFullReg(const MachineInstr &MI, Register Reg)
Returns true if MI defines the full vreg Reg, as opposed to just defining a subregister.
static cl::opt< unsigned > LateRematUpdateThreshold("late-remat-update-threshold", cl::Hidden, cl::desc("During rematerialization for a copy, if the def instruction has " "many other copy uses to be rematerialized, delay the multiple " "separate live interval update work and do them all at once after " "all those rematerialization are done. It will save a lot of " "repeated work. "), cl::init(100))
static cl::opt< bool > UseTerminalRule("terminal-rule", cl::desc("Apply the terminal rule"), cl::init(false), cl::Hidden)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static DenseMap< Register, std::vector< std::pair< SlotIndex, MachineInstr * > > > buildVRegToDbgValueMap(MachineFunction &MF, const LiveIntervals *Liveness)
static void shrinkToUses(LiveInterval &LI, LiveIntervals &LIS)
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Represent the analysis usage information of a pass.
AnalysisUsage & addPreservedID(const void *ID)
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
bool test(unsigned Idx) const
Allocate memory in an ever growing pool, as if by bump-pointer.
A helper class for register coalescers.
bool flip()
Swap SrcReg and DstReg.
bool isCoalescable(const MachineInstr *) const
Return true if MI is a copy instruction that will become an identity copy after coalescing.
bool setRegisters(const MachineInstr *)
Set registers to match the copy instruction MI.
This class represents an Operation in the Expression.
The location of a single variable, composed of an expression and 0 or more DbgValueLocEntries.
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
bool isAsCheapAsAMove(const MachineInstr &MI) const override
A live range for subregisters.
LiveInterval - This class represents the liveness of a register, or stack slot.
void removeEmptySubRanges()
Removes all subranges without any segments (subranges without segments are not considered valid and s...
bool hasSubRanges() const
Returns true if subregister liveness information is available.
SubRange * createSubRangeFrom(BumpPtrAllocator &Allocator, LaneBitmask LaneMask, const LiveRange &CopyFrom)
Like createSubRange() but the new range is filled with a copy of the liveness information in CopyFrom...
iterator_range< subrange_iterator > subranges()
void refineSubRanges(BumpPtrAllocator &Allocator, LaneBitmask LaneMask, std::function< void(LiveInterval::SubRange &)> Apply, const SlotIndexes &Indexes, const TargetRegisterInfo &TRI, unsigned ComposeSubRegIdx=0)
Refines the subranges to support LaneMask.
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
SubRange * createSubRange(BumpPtrAllocator &Allocator, LaneBitmask LaneMask)
Creates a new empty subregister live range.
void clearSubRanges()
Removes all subregister liveness information.
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const
Returns true if VNI is killed by any PHI-def values in LI.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
bool checkRegMaskInterference(const LiveInterval &LI, BitVector &UsableRegs)
Test if LI is live across any register mask instructions, and compute a bit mask of physical register...
SlotIndexes * getSlotIndexes() const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
VNInfo::Allocator & getVNInfoAllocator()
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveRange & getRegUnit(unsigned Unit)
Return the live range for register unit Unit.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LiveInterval & getInterval(Register Reg)
void pruneValue(LiveRange &LR, SlotIndex Kill, SmallVectorImpl< SlotIndex > *EndPoints)
If LR has a live value at Kill, prune its live range by removing any liveness reachable from Kill.
void removeInterval(Register Reg)
Interval removal.
MachineBasicBlock * intervalIsInOneMBB(const LiveInterval &LI) const
If LI is confined to a single basic block, return a pointer to that block.
void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos)
Remove value number and related live segments of LI and its subranges that start at position Pos.
bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
void extendToIndices(LiveRange &LR, ArrayRef< SlotIndex > Indices, ArrayRef< SlotIndex > Undefs)
Extend the live range LR to reach all points in Indices.
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
void splitSeparateComponents(LiveInterval &LI, SmallVectorImpl< LiveInterval * > &SplitLIs)
Split separate components in LiveInterval LI into separate intervals.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
VNInfo * valueOutOrDead() const
Returns the value alive at the end of the instruction, if any.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
VNInfo * valueDefined() const
Return the value defined by this instruction, if any.
SlotIndex endPoint() const
Return the end point of the last live range segment to interact with the instruction,...
bool isKill() const
Return true if the live-in value is killed by this instruction.
Callback methods for LiveRangeEdit owners.
virtual void LRE_WillEraseInstruction(MachineInstr *MI)
Called immediately before erasing a dead machine instruction.
void eliminateDeadDefs(SmallVectorImpl< MachineInstr * > &Dead, ArrayRef< Register > RegsBeingSpilled=std::nullopt)
eliminateDeadDefs - Try to delete machine instructions that are now dead (allDefsAreDead returns true...
SlotIndex rematerializeAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, const Remat &RM, const TargetRegisterInfo &, bool Late=false, unsigned SubIdx=0, MachineInstr *ReplaceIndexMI=nullptr)
rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an instruction into MBB before...
bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI)
checkRematerializable - Manually add VNI to the list of rematerializable values if DefMI may be remat...
bool canRematerializeAt(Remat &RM, VNInfo *OrigVNI, SlotIndex UseIdx, bool cheapAsAMove)
canRematerializeAt - Determine if ParentVNI can be rematerialized at UseIdx.
This class represents the liveness of a register, stack slot, etc.
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
iterator addSegment(Segment S)
Add the specified Segment to this range, merging segments as appropriate.
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
void join(LiveRange &Other, const int *ValNoAssignments, const int *RHSValNoAssignments, SmallVectorImpl< VNInfo * > &NewVNInfo)
join - Join two live ranges (this, and other) together.
bool liveAt(SlotIndex index) const
VNInfo * createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc)
createDeadDef - Make sure the range has a value defined at Def.
void removeValNo(VNInfo *ValNo)
removeValNo - Remove all the segments defined by the specified value#.
void verify() const
Walk the range and assert if any invariants fail to hold.
bool overlaps(const LiveRange &other) const
overlaps - Return true if the intersection of the two live ranges is not empty.
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
VNInfo * MergeValueNumberInto(VNInfo *V1, VNInfo *V2)
MergeValueNumberInto - This method is called when two value numbers are found to be equivalent.
unsigned getNumValNums() const
bool containsOneValue() const
iterator FindSegmentContaining(SlotIndex Idx)
Return an iterator to the segment that contains the specified index, or end() if there is none.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
MCRegUnitRootIterator enumerates the root registers of a register unit.
bool isValid() const
Check if the iterator is at the end of the list.
Wrapper class representing physical registers. Should be passed by value.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool hasEHPadSuccessor() const
bool isEHPad() const
Returns true if the block is a landing pad.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual MachineFunctionProperties getClearedProperties() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
bool exposesReturnsTwice() const
exposesReturnsTwice - Returns true if the function calls setjmp or any other similar functions with a...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
int findRegisterUseOperandIdx(Register Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
bool isSafeToMove(AAResults *AA, bool &SawStore) const
Return true if it is safe to move this instruction.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
int findRegisterDefOperandIdx(Register Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
bool isDebugInstr() const
unsigned getNumOperands() const
Retuns the total number of operands.
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx=nullptr) const
Given the index of a register def operand, check if the register def is tied to a source operand,...
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
iterator_range< mop_iterator > operands()
void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
iterator_range< filtered_mop_iterator > all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
virtual void releaseMemory()
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
bool isProperSubClass(const TargetRegisterClass *RC) const
isProperSubClass - Returns true if RC has a legal super-class with more allocatable registers.
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
SlotIndex - An opaque wrapper around machine indexes.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
bool isValid() const
Returns true if this is a valid index.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
Returns the basic block which the given index falls in.
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
SlotIndex getNextNonNullIndex(SlotIndex Index)
Returns the next non-null index, if one exists.
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
SlotIndex getIndexBefore(const MachineInstr &MI) const
getIndexBefore - Returns the index of the last indexed instruction before MI, or the start index of i...
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction for the given index, or null if the given index has no instruction associated...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
static const unsigned CommuteAnyOperandIndex
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool enableJoinGlobalCopies() const
True if the subtarget should enable joining global copies.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
VNInfo - Value Number Information.
void markUnused()
Mark this value as unused.
bool isUnused() const
Returns true if this value is unused.
unsigned id
The ID number of this value.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< DefNode * > Def
const_iterator end(StringRef path)
Get end iterator over path.
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
char & RegisterCoalescerID
RegisterCoalescer - This pass merges live ranges to eliminate copies.
void initializeRegisterCoalescerPass(PassRegistry &)
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
auto upper_bound(R &&Range, T &&Value)
Provide wrappers to std::upper_bound which take ranges instead of having to pass begin/end explicitly...
char & MachineDominatorsID
MachineDominators - This pass is a machine dominators analysis pass.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr LaneBitmask getLane(unsigned Lane)
static constexpr LaneBitmask getAll()
constexpr bool any() const
static constexpr LaneBitmask getNone()
Remat - Information needed to rematerialize at a specific location.
This represents a simple continuous liveness interval for a value.