63#define DEBUG_TYPE "regalloc"
65STATISTIC(numJoins,
"Number of interval joins performed");
66STATISTIC(numCrossRCs,
"Number of cross class joins performed");
67STATISTIC(numCommutes,
"Number of instruction commuting performed");
69STATISTIC(NumReMats,
"Number of instructions re-materialized");
70STATISTIC(NumInflated,
"Number of register classes inflated");
71STATISTIC(NumLaneConflicts,
"Number of dead lane conflicts tested");
72STATISTIC(NumLaneResolves,
"Number of dead lane conflicts resolved");
73STATISTIC(NumShrinkToUses,
"Number of shrinkToUses called");
76 cl::desc(
"Coalesce copies (default=true)"),
91 cl::desc(
"Coalesce copies that span blocks (default=subtarget)"),
96 cl::desc(
"Verify machine instrs before and after register coalescing"),
101 cl::desc(
"During rematerialization for a copy, if the def instruction has "
102 "many other copy uses to be rematerialized, delay the multiple "
103 "separate live interval update work and do them all at once after "
104 "all those rematerialization are done. It will save a lot of "
110 cl::desc(
"If the valnos size of an interval is larger than the threshold, "
111 "it is regarded as a large interval. "),
116 cl::desc(
"For a large interval, if it is coalesced with other live "
117 "intervals many times more than the threshold, stop its "
118 "coalescing to control the compile time. "),
153 using DbgValueLoc = std::pair<SlotIndex, MachineInstr *>;
162 bool ShrinkMainRange =
false;
166 bool JoinGlobalCopies =
false;
170 bool JoinSplitEdges =
false;
202 void coalesceLocals();
205 void joinAllIntervals();
220 void lateLiveIntervalUpdate();
290 std::pair<bool, bool> removeCopyByCommutingDef(
const CoalescerPair &CP,
361 MI->eraseFromParent();
388 MachineFunctionProperties::Property::IsSSA);
402char RegisterCoalescer::ID = 0;
407 "Register Coalescer",
false,
false)
420 Dst =
MI->getOperand(0).getReg();
421 DstSub =
MI->getOperand(0).getSubReg();
422 Src =
MI->getOperand(1).getReg();
423 SrcSub =
MI->getOperand(1).getSubReg();
424 }
else if (
MI->isSubregToReg()) {
425 Dst =
MI->getOperand(0).getReg();
426 DstSub = tri.composeSubRegIndices(
MI->getOperand(0).getSubReg(),
427 MI->getOperand(3).getImm());
428 Src =
MI->getOperand(2).getReg();
429 SrcSub =
MI->getOperand(2).getSubReg();
444 for (
const auto &
MI : *
MBB) {
445 if (!
MI.isCopyLike() && !
MI.isUnconditionalBranch())
455 Flipped = CrossClass =
false;
458 unsigned SrcSub = 0, DstSub = 0;
461 Partial = SrcSub || DstSub;
464 if (Src.isPhysical()) {
465 if (Dst.isPhysical())
474 if (Dst.isPhysical()) {
477 Dst =
TRI.getSubReg(Dst, DstSub);
485 Dst =
TRI.getMatchingSuperReg(Dst, SrcSub,
MRI.getRegClass(Src));
488 }
else if (!
MRI.getRegClass(Src)->contains(Dst)) {
497 if (SrcSub && DstSub) {
499 if (Src == Dst && SrcSub != DstSub)
502 NewRC =
TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub, SrcIdx,
509 NewRC =
TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
513 NewRC =
TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSub);
516 NewRC =
TRI.getCommonSubClass(DstRC, SrcRC);
525 if (DstIdx && !SrcIdx) {
531 CrossClass = NewRC != DstRC || NewRC != SrcRC;
534 assert(Src.isVirtual() &&
"Src must be virtual");
535 assert(!(Dst.isPhysical() && DstSub) &&
"Cannot have a physical SubIdx");
554 unsigned SrcSub = 0, DstSub = 0;
562 }
else if (Src != SrcReg) {
568 if (!Dst.isPhysical())
570 assert(!DstIdx && !SrcIdx &&
"Inconsistent CoalescerPair state.");
573 Dst =
TRI.getSubReg(Dst, DstSub);
576 return DstReg == Dst;
578 return Register(
TRI.getSubReg(DstReg, SrcSub)) == Dst;
584 return TRI.composeSubRegIndices(SrcIdx, SrcSub) ==
585 TRI.composeSubRegIndices(DstIdx, DstSub);
589void RegisterCoalescer::getAnalysisUsage(
AnalysisUsage &AU)
const {
601void RegisterCoalescer::eliminateDeadDefs(
LiveRangeEdit *Edit) {
611void RegisterCoalescer::LRE_WillEraseInstruction(
MachineInstr *
MI) {
616bool RegisterCoalescer::adjustCopiesBackFrom(
const CoalescerPair &CP,
618 assert(!
CP.isPartial() &&
"This doesn't work for partial copies.");
619 assert(!
CP.isPhys() &&
"This doesn't work for physreg copies.");
644 if (BS == IntB.
end())
646 VNInfo *BValNo = BS->valno;
651 if (BValNo->
def != CopyIdx)
658 if (AS == IntA.
end())
660 VNInfo *AValNo = AS->valno;
666 if (!
CP.isCoalescable(ACopyMI) || !ACopyMI->
isFullCopy())
672 if (ValS == IntB.
end())
690 SlotIndex FillerStart = ValS->end, FillerEnd = BS->start;
694 BValNo->
def = FillerStart;
702 if (BValNo != ValS->valno)
711 S.removeSegment(*SS,
true);
715 if (!S.getVNInfoAt(FillerStart)) {
718 S.extendInBlock(BBStart, FillerStart);
720 VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
723 if (SubBValNo != SubValSNo)
724 S.MergeValueNumberInto(SubBValNo, SubValSNo);
741 bool RecomputeLiveRange = AS->end == CopyIdx;
742 if (!RecomputeLiveRange) {
745 if (SS != S.end() &&
SS->end == CopyIdx) {
746 RecomputeLiveRange =
true;
751 if (RecomputeLiveRange)
758bool RegisterCoalescer::hasOtherReachingDefs(
LiveInterval &IntA,
767 if (ASeg.
valno != AValNo)
770 if (BI != IntB.
begin())
772 for (; BI != IntB.
end() && ASeg.
end >= BI->start; ++BI) {
773 if (BI->valno == BValNo)
775 if (BI->start <= ASeg.
start && BI->end > ASeg.
start)
777 if (BI->start > ASeg.
start && BI->start < ASeg.
end)
790 bool Changed =
false;
791 bool MergedWithDead =
false;
793 if (S.
valno != SrcValNo)
804 MergedWithDead =
true;
807 return std::make_pair(Changed, MergedWithDead);
811RegisterCoalescer::removeCopyByCommutingDef(
const CoalescerPair &CP,
844 assert(BValNo !=
nullptr && BValNo->
def == CopyIdx);
850 return {
false,
false};
853 return {
false,
false};
855 return {
false,
false};
862 return {
false,
false};
874 if (!
TII->findCommutedOpIndices(*
DefMI, UseOpIdx, NewDstIdx))
875 return {
false,
false};
880 return {
false,
false};
884 if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
885 return {
false,
false};
894 if (US == IntA.
end() || US->valno != AValNo)
898 return {
false,
false};
908 TII->commuteInstruction(*
DefMI,
false, UseOpIdx, NewDstIdx);
910 return {
false,
false};
912 !
MRI->constrainRegClass(IntB.
reg(),
MRI->getRegClass(IntA.
reg())))
913 return {
false,
false};
914 if (NewMI !=
DefMI) {
939 UseMO.setReg(NewReg);
944 assert(US != IntA.
end() &&
"Use must be live");
945 if (US->valno != AValNo)
948 UseMO.setIsKill(
false);
950 UseMO.substPhysReg(NewReg, *
TRI);
952 UseMO.setReg(NewReg);
971 VNInfo *SubDVNI = S.getVNInfoAt(DefIdx);
974 VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
976 S.MergeValueNumberInto(SubDVNI, SubBValNo);
984 bool ShrinkB =
false;
998 VNInfo *ASubValNo = SA.getVNInfoAt(AIdx);
1007 MaskA |= SA.LaneMask;
1010 Allocator, SA.LaneMask,
1011 [&Allocator, &SA, CopyIdx, ASubValNo,
1013 VNInfo *BSubValNo = SR.empty() ? SR.getNextValue(CopyIdx, Allocator)
1014 : SR.getVNInfoAt(CopyIdx);
1015 assert(BSubValNo != nullptr);
1016 auto P = addSegmentsWithValNo(SR, BSubValNo, SA, ASubValNo);
1017 ShrinkB |= P.second;
1019 BSubValNo->def = ASubValNo->def;
1027 if ((SB.LaneMask & MaskA).any())
1031 SB.removeSegment(*S,
true);
1035 BValNo->
def = AValNo->
def;
1037 ShrinkB |=
P.second;
1044 return {
true, ShrinkB};
1094bool RegisterCoalescer::removePartialRedundancy(
const CoalescerPair &CP,
1127 bool FoundReverseCopy =
false;
1146 bool ValB_Changed =
false;
1147 for (
auto *VNI : IntB.
valnos) {
1148 if (VNI->isUnused())
1151 ValB_Changed =
true;
1159 FoundReverseCopy =
true;
1163 if (!FoundReverseCopy)
1173 if (CopyLeftBB && CopyLeftBB->
succ_size() > 1)
1184 if (InsPos != CopyLeftBB->
end()) {
1190 LLVM_DEBUG(
dbgs() <<
"\tremovePartialRedundancy: Move the copy to "
1195 TII->get(TargetOpcode::COPY), IntB.
reg())
1206 ErasedInstrs.
erase(NewCopyMI);
1208 LLVM_DEBUG(
dbgs() <<
"\tremovePartialRedundancy: Remove the copy from "
1219 deleteInstr(&CopyMI);
1235 if (!IntB.
liveAt(UseIdx))
1236 MO.setIsUndef(
true);
1246 VNInfo *BValNo = SR.Query(CopyIdx).valueOutOrDead();
1247 assert(BValNo &&
"All sublanes should be live");
1256 for (
unsigned I = 0;
I != EndPoints.
size();) {
1258 EndPoints[
I] = EndPoints.
back();
1280 assert(!Reg.isPhysical() &&
"This code cannot handle physreg aliasing");
1283 if (
Op.getReg() != Reg)
1287 if (
Op.getSubReg() == 0 ||
Op.isUndef())
1293bool RegisterCoalescer::reMaterializeTrivialDef(
const CoalescerPair &CP,
1297 Register SrcReg =
CP.isFlipped() ?
CP.getDstReg() :
CP.getSrcReg();
1298 unsigned SrcIdx =
CP.isFlipped() ?
CP.getDstIdx() :
CP.getSrcIdx();
1299 Register DstReg =
CP.isFlipped() ?
CP.getSrcReg() :
CP.getDstReg();
1300 unsigned DstIdx =
CP.isFlipped() ?
CP.getSrcIdx() :
CP.getDstIdx();
1322 LiveRangeEdit Edit(&SrcInt, NewRegs, *MF, *LIS,
nullptr,
this);
1328 bool SawStore =
false;
1340 if (SrcIdx && DstIdx)
1375 unsigned NewDstIdx =
TRI->composeSubRegIndices(
CP.getSrcIdx(), DefSubIdx);
1377 NewDstReg =
TRI->getSubReg(DstReg, NewDstIdx);
1387 "Only expect to deal with virtual or physical registers");
1413 assert(SrcIdx == 0 &&
CP.isFlipped() &&
1414 "Shouldn't have SrcIdx+DstIdx at this point");
1417 TRI->getCommonSubClass(DefRC, DstRC);
1418 if (CommonRC !=
nullptr) {
1426 if (MO.isReg() && MO.getReg() == DstReg && MO.getSubReg() == DstIdx) {
1448 "No explicit operands after implicit operands.");
1451 "unexpected implicit virtual register def");
1457 ErasedInstrs.
insert(CopyMI);
1471 bool NewMIDefinesFullReg =
false;
1481 if (MO.
getReg() == DstReg)
1482 NewMIDefinesFullReg =
true;
1487 ((
TRI->getSubReg(MO.
getReg(), DefSubIdx) ==
1500 assert(!
MRI->shouldTrackSubRegLiveness(DstReg) &&
1501 "subrange update for implicit-def of super register may not be "
1502 "properly handled");
1510 if (DefRC !=
nullptr) {
1512 NewRC =
TRI->getMatchingSuperRegClass(NewRC, DefRC, NewIdx);
1514 NewRC =
TRI->getCommonSubClass(NewRC, DefRC);
1515 assert(NewRC &&
"subreg chosen for remat incompatible with instruction");
1521 SR.LaneMask =
TRI->composeSubRegIndexLaneMask(DstIdx, SR.LaneMask);
1523 MRI->setRegClass(DstReg, NewRC);
1526 updateRegDefsUses(DstReg, DstReg, DstIdx);
1545 MRI->shouldTrackSubRegLiveness(DstReg)) {
1575 if (!SR.liveAt(DefIndex))
1576 SR.createDeadDef(DefIndex,
Alloc);
1577 MaxMask &= ~SR.LaneMask;
1579 if (MaxMask.
any()) {
1597 bool UpdatedSubRanges =
false;
1602 if ((SR.
LaneMask & DstMask).none()) {
1604 <<
"Removing undefined SubRange "
1617 UpdatedSubRanges =
true;
1628 if (UpdatedSubRanges)
1635 "Only expect virtual or physical registers in remat");
1638 if (!NewMIDefinesFullReg) {
1640 CopyDstReg,
true ,
true ,
false ));
1683 if (
MRI->use_nodbg_empty(SrcReg)) {
1689 UseMO.substPhysReg(DstReg, *
TRI);
1691 UseMO.setReg(DstReg);
1700 if (ToBeUpdated.
count(SrcReg))
1703 unsigned NumCopyUses = 0;
1705 if (UseMO.getParent()->isCopyLike())
1711 if (!DeadDefs.
empty())
1712 eliminateDeadDefs(&Edit);
1714 ToBeUpdated.
insert(SrcReg);
1732 unsigned SrcSubIdx = 0, DstSubIdx = 0;
1733 if (!
isMoveInstr(*
TRI, CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
1742 if ((SR.
LaneMask & SrcMask).none())
1755 assert(Seg !=
nullptr &&
"No segment for defining instruction");
1760 if (((V &&
V->isPHIDef()) || (!V && !DstLI.
liveAt(
Idx)))) {
1768 CopyMI->
getOpcode() == TargetOpcode::SUBREG_TO_REG);
1773 CopyMI->
setDesc(
TII->get(TargetOpcode::IMPLICIT_DEF));
1790 if ((SR.
LaneMask & DstMask).none())
1812 if ((SR.
LaneMask & UseMask).none())
1820 isLive = DstLI.
liveAt(UseIdx);
1833 if (MO.
getReg() == DstReg)
1845 bool IsUndef =
true;
1847 if ((S.LaneMask & Mask).none())
1849 if (S.liveAt(UseIdx)) {
1862 ShrinkMainRange =
true;
1871 if (DstInt && DstInt->
hasSubRanges() && DstReg != SrcReg) {
1880 if (
MI.isDebugInstr())
1883 addUndefFlag(*DstInt, UseIdx, MO,
SubReg);
1889 E =
MRI->reg_instr_end();
1898 if (SrcReg == DstReg && !Visited.
insert(
UseMI).second)
1911 for (
unsigned Op : Ops) {
1917 if (SubIdx && MO.
isDef())
1923 unsigned SubUseIdx =
TRI->composeSubRegIndices(SubIdx, MO.
getSubReg());
1924 if (SubUseIdx != 0 &&
MRI->shouldTrackSubRegLiveness(DstReg)) {
1941 addUndefFlag(*DstInt, UseIdx, MO, SubUseIdx);
1952 dbgs() <<
"\t\tupdated: ";
1960bool RegisterCoalescer::canJoinPhys(
const CoalescerPair &CP) {
1964 if (!
MRI->isReserved(
CP.getDstReg())) {
1965 LLVM_DEBUG(
dbgs() <<
"\tCan only merge into reserved registers.\n");
1974 dbgs() <<
"\tCannot join complex intervals into reserved register.\n");
1978bool RegisterCoalescer::copyValueUndefInPredecessors(
1992void RegisterCoalescer::setUndefOnPrunedSubRegUses(
LiveInterval &LI,
1999 if (SubRegIdx == 0 || MO.
isUndef())
2005 if (!S.
liveAt(Pos) && (PrunedLanes & SubRegMask).any()) {
2021bool RegisterCoalescer::joinCopy(
2028 if (!
CP.setRegisters(CopyMI)) {
2033 if (
CP.getNewRC()) {
2034 auto SrcRC =
MRI->getRegClass(
CP.getSrcReg());
2035 auto DstRC =
MRI->getRegClass(
CP.getDstReg());
2036 unsigned SrcIdx =
CP.getSrcIdx();
2037 unsigned DstIdx =
CP.getDstIdx();
2038 if (
CP.isFlipped()) {
2042 if (!
TRI->shouldCoalesce(CopyMI, SrcRC, SrcIdx, DstRC, DstIdx,
2043 CP.getNewRC(), *LIS)) {
2055 eliminateDeadDefs();
2062 if (
MachineInstr *UndefMI = eliminateUndefCopy(CopyMI)) {
2063 if (UndefMI->isImplicitDef())
2065 deleteInstr(CopyMI);
2073 if (
CP.getSrcReg() ==
CP.getDstReg()) {
2075 LLVM_DEBUG(
dbgs() <<
"\tCopy already coalesced: " << LI <<
'\n');
2080 assert(ReadVNI &&
"No value before copy and no <undef> flag.");
2081 assert(ReadVNI != DefVNI &&
"Cannot read and define the same value.");
2096 if (copyValueUndefInPredecessors(S,
MBB, SLRQ)) {
2097 LLVM_DEBUG(
dbgs() <<
"Incoming sublane value is undef at copy\n");
2098 PrunedLanes |= S.LaneMask;
2105 if (PrunedLanes.
any()) {
2106 LLVM_DEBUG(
dbgs() <<
"Pruning undef incoming lanes: " << PrunedLanes
2108 setUndefOnPrunedSubRegUses(LI,
CP.getSrcReg(), PrunedLanes);
2113 deleteInstr(CopyMI);
2122 if (!canJoinPhys(CP)) {
2125 bool IsDefCopy =
false;
2126 if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
2139 dbgs() <<
"\tConsidering merging to "
2140 <<
TRI->getRegClassName(
CP.getNewRC()) <<
" with ";
2141 if (
CP.getDstIdx() &&
CP.getSrcIdx())
2143 <<
TRI->getSubRegIndexName(
CP.getDstIdx()) <<
" and "
2145 <<
TRI->getSubRegIndexName(
CP.getSrcIdx()) <<
'\n';
2153 ShrinkMainRange =
false;
2159 if (!joinIntervals(CP)) {
2164 bool IsDefCopy =
false;
2165 if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
2170 if (!
CP.isPartial() && !
CP.isPhys()) {
2171 bool Changed = adjustCopiesBackFrom(CP, CopyMI);
2172 bool Shrink =
false;
2174 std::tie(Changed, Shrink) = removeCopyByCommutingDef(CP, CopyMI);
2176 deleteInstr(CopyMI);
2178 Register DstReg =
CP.isFlipped() ?
CP.getSrcReg() :
CP.getDstReg();
2190 if (!
CP.isPartial() && !
CP.isPhys())
2191 if (removePartialRedundancy(CP, *CopyMI))
2202 if (
CP.isCrossClass()) {
2204 MRI->setRegClass(
CP.getDstReg(),
CP.getNewRC());
2215 if (ErasedInstrs.
erase(CopyMI))
2217 CurrentErasedInstrs.
insert(CopyMI);
2222 updateRegDefsUses(
CP.getDstReg(),
CP.getDstReg(),
CP.getDstIdx());
2223 updateRegDefsUses(
CP.getSrcReg(),
CP.getDstReg(),
CP.getSrcIdx());
2226 if (ShrinkMask.
any()) {
2229 if ((S.LaneMask & ShrinkMask).none())
2234 ShrinkMainRange =
true;
2242 if (ToBeUpdated.
count(
CP.getSrcReg()))
2243 ShrinkMainRange =
true;
2245 if (ShrinkMainRange) {
2255 TRI->updateRegAllocHint(
CP.getSrcReg(),
CP.getDstReg(), *MF);
2260 dbgs() <<
"\tResult = ";
2272bool RegisterCoalescer::joinReservedPhysReg(
CoalescerPair &CP) {
2275 assert(
CP.isPhys() &&
"Must be a physreg copy");
2276 assert(
MRI->isReserved(DstReg) &&
"Not a reserved register");
2280 assert(
RHS.containsOneValue() &&
"Invalid join with reserved register");
2289 if (!
MRI->isConstantPhysReg(DstReg)) {
2293 if (!
MRI->isReserved(*RI))
2306 !RegMaskUsable.
test(DstReg)) {
2319 if (
CP.isFlipped()) {
2327 CopyMI =
MRI->getVRegDef(SrcReg);
2328 deleteInstr(CopyMI);
2337 if (!
MRI->hasOneNonDBGUse(SrcReg)) {
2348 CopyMI = &*
MRI->use_instr_nodbg_begin(SrcReg);
2352 if (!
MRI->isConstantPhysReg(DstReg)) {
2360 if (
MI->readsRegister(DstReg,
TRI)) {
2370 <<
printReg(DstReg,
TRI) <<
" at " << CopyRegIdx <<
"\n");
2373 deleteInstr(CopyMI);
2383 MRI->clearKillFlags(
CP.getSrcReg());
2468 const unsigned SubIdx;
2476 const bool SubRangeJoin;
2479 const bool TrackSubRegLiveness;
2495 enum ConflictResolution {
2527 ConflictResolution Resolution = CR_Keep;
2537 VNInfo *RedefVNI =
nullptr;
2540 VNInfo *OtherVNI =
nullptr;
2553 bool ErasableImplicitDef =
false;
2557 bool Pruned =
false;
2560 bool PrunedComputed =
false;
2567 bool Identical =
false;
2571 bool isAnalyzed()
const {
return WriteLanes.
any(); }
2578 ErasableImplicitDef =
false;
2592 std::pair<const VNInfo *, Register> followCopyChain(
const VNInfo *VNI)
const;
2595 const JoinVals &
Other)
const;
2604 ConflictResolution analyzeValue(
unsigned ValNo, JoinVals &
Other);
2609 void computeAssignment(
unsigned ValNo, JoinVals &
Other);
2640 bool isPrunedValue(
unsigned ValNo, JoinVals &
Other);
2646 bool TrackSubRegLiveness)
2647 : LR(LR),
Reg(
Reg), SubIdx(SubIdx), LaneMask(LaneMask),
2648 SubRangeJoin(SubRangeJoin), TrackSubRegLiveness(TrackSubRegLiveness),
2649 NewVNInfo(newVNInfo),
CP(cp), LIS(lis), Indexes(LIS->getSlotIndexes()),
2650 TRI(
TRI), Assignments(LR.getNumValNums(), -1),
2651 Vals(LR.getNumValNums()) {}
2655 bool mapValues(JoinVals &
Other);
2659 bool resolveConflicts(JoinVals &
Other);
2679 void pruneMainSegments(
LiveInterval &LI,
bool &ShrinkMainRange);
2690 void removeImplicitDefs();
2693 const int *getAssignments()
const {
return Assignments.
data(); }
2696 ConflictResolution getResolution(
unsigned Num)
const {
2697 return Vals[Num].Resolution;
2704 bool &Redef)
const {
2709 L |=
TRI->getSubRegIndexLaneMask(
2717std::pair<const VNInfo *, Register>
2718JoinVals::followCopyChain(
const VNInfo *VNI)
const {
2724 assert(
MI &&
"No defining instruction");
2725 if (!
MI->isFullCopy())
2726 return std::make_pair(VNI, TrackReg);
2727 Register SrcReg =
MI->getOperand(1).getReg();
2729 return std::make_pair(VNI, TrackReg);
2743 LaneBitmask SMask =
TRI->composeSubRegIndexLaneMask(SubIdx, S.LaneMask);
2744 if ((SMask & LaneMask).
none())
2752 return std::make_pair(VNI, TrackReg);
2755 if (ValueIn ==
nullptr) {
2762 return std::make_pair(
nullptr, SrcReg);
2767 return std::make_pair(VNI, TrackReg);
2770bool JoinVals::valuesIdentical(
VNInfo *Value0,
VNInfo *Value1,
2771 const JoinVals &
Other)
const {
2774 std::tie(Orig0, Reg0) = followCopyChain(Value0);
2775 if (Orig0 == Value1 && Reg0 ==
Other.Reg)
2780 std::tie(Orig1, Reg1) =
Other.followCopyChain(Value1);
2784 if (Orig0 ==
nullptr || Orig1 ==
nullptr)
2785 return Orig0 == Orig1 && Reg0 == Reg1;
2791 return Orig0->
def == Orig1->
def && Reg0 == Reg1;
2794JoinVals::ConflictResolution JoinVals::analyzeValue(
unsigned ValNo,
2796 Val &
V = Vals[ValNo];
2797 assert(!
V.isAnalyzed() &&
"Value has already been analyzed!");
2809 :
TRI->getSubRegIndexLaneMask(SubIdx);
2810 V.ValidLanes =
V.WriteLanes = Lanes;
2819 V.ErasableImplicitDef =
true;
2823 V.ValidLanes =
V.WriteLanes = computeWriteLanes(
DefMI, Redef);
2842 assert((TrackSubRegLiveness ||
V.RedefVNI) &&
2843 "Instruction is reading nonexistent value");
2844 if (
V.RedefVNI !=
nullptr) {
2845 computeAssignment(
V.RedefVNI->id,
Other);
2846 V.ValidLanes |= Vals[
V.RedefVNI->id].ValidLanes;
2858 V.ErasableImplicitDef =
true;
2875 if (OtherVNI->
def < VNI->
def)
2876 Other.computeAssignment(OtherVNI->
id, *
this);
2881 return CR_Impossible;
2883 V.OtherVNI = OtherVNI;
2884 Val &OtherV =
Other.Vals[OtherVNI->
id];
2888 if (!OtherV.isAnalyzed() ||
Other.Assignments[OtherVNI->
id] == -1)
2895 if ((
V.ValidLanes & OtherV.ValidLanes).any())
2897 return CR_Impossible;
2912 Other.computeAssignment(
V.OtherVNI->id, *
this);
2913 Val &OtherV =
Other.Vals[
V.OtherVNI->id];
2915 if (OtherV.ErasableImplicitDef) {
2935 <<
", keeping it.\n");
2936 OtherV.mustKeepImplicitDef(*
TRI, *OtherImpDef);
2943 dbgs() <<
"IMPLICIT_DEF defined at " <<
V.OtherVNI->def
2944 <<
" may be live into EH pad successors, keeping it.\n");
2945 OtherV.mustKeepImplicitDef(*
TRI, *OtherImpDef);
2948 OtherV.ValidLanes &= ~OtherV.WriteLanes;
2963 if (
CP.isCoalescable(
DefMI)) {
2966 V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes;
2981 valuesIdentical(VNI,
V.OtherVNI,
Other)) {
3004 if ((
V.WriteLanes & OtherV.ValidLanes).none())
3017 "Only early clobber defs can overlap a kill");
3018 return CR_Impossible;
3025 if ((
TRI->getSubRegIndexLaneMask(
Other.SubIdx) & ~
V.WriteLanes).none())
3026 return CR_Impossible;
3028 if (TrackSubRegLiveness) {
3033 if (!OtherLI.hasSubRanges()) {
3035 return (OtherMask &
V.WriteLanes).none() ? CR_Replace : CR_Impossible;
3043 TRI->composeSubRegIndexLaneMask(
Other.SubIdx, OtherSR.LaneMask);
3044 if ((OtherMask &
V.WriteLanes).none())
3047 auto OtherSRQ = OtherSR.Query(VNI->
def);
3048 if (OtherSRQ.valueIn() && OtherSRQ.endPoint() > VNI->
def) {
3050 return CR_Impossible;
3063 return CR_Impossible;
3072 return CR_Unresolved;
3075void JoinVals::computeAssignment(
unsigned ValNo, JoinVals &
Other) {
3076 Val &
V = Vals[ValNo];
3077 if (
V.isAnalyzed()) {
3080 assert(Assignments[ValNo] != -1 &&
"Bad recursion?");
3083 switch ((
V.Resolution = analyzeValue(ValNo,
Other))) {
3087 assert(
V.OtherVNI &&
"OtherVNI not assigned, can't merge.");
3088 assert(
Other.Vals[
V.OtherVNI->id].isAnalyzed() &&
"Missing recursion");
3089 Assignments[ValNo] =
Other.Assignments[
V.OtherVNI->id];
3093 <<
V.OtherVNI->def <<
" --> @"
3094 << NewVNInfo[Assignments[ValNo]]->def <<
'\n');
3097 case CR_Unresolved: {
3099 assert(
V.OtherVNI &&
"OtherVNI not assigned, can't prune");
3100 Val &OtherV =
Other.Vals[
V.OtherVNI->id];
3101 OtherV.Pruned =
true;
3106 Assignments[ValNo] = NewVNInfo.
size();
3112bool JoinVals::mapValues(JoinVals &
Other) {
3114 computeAssignment(i,
Other);
3115 if (Vals[i].Resolution == CR_Impossible) {
3124bool JoinVals::taintExtent(
3133 assert(OtherI !=
Other.LR.end() &&
"No conflict?");
3138 if (
End >= MBBEnd) {
3140 << OtherI->valno->id <<
'@' << OtherI->start <<
'\n');
3144 << OtherI->valno->id <<
'@' << OtherI->start <<
" to "
3149 TaintExtent.push_back(std::make_pair(
End, TaintedLanes));
3152 if (++OtherI ==
Other.LR.end() || OtherI->start >= MBBEnd)
3156 const Val &OV =
Other.Vals[OtherI->valno->id];
3157 TaintedLanes &= ~OV.WriteLanes;
3160 }
while (TaintedLanes.
any());
3166 if (
MI.isDebugOrPseudoInstr())
3173 unsigned S =
TRI->composeSubRegIndices(SubIdx, MO.
getSubReg());
3174 if ((Lanes &
TRI->getSubRegIndexLaneMask(S)).any())
3180bool JoinVals::resolveConflicts(JoinVals &
Other) {
3183 assert(
V.Resolution != CR_Impossible &&
"Unresolvable conflict");
3184 if (
V.Resolution != CR_Unresolved)
3193 assert(
V.OtherVNI &&
"Inconsistent conflict resolution.");
3195 const Val &OtherV =
Other.Vals[
V.OtherVNI->id];
3200 LaneBitmask TaintedLanes =
V.WriteLanes & OtherV.ValidLanes;
3202 if (!taintExtent(i, TaintedLanes,
Other, TaintExtent))
3206 assert(!TaintExtent.
empty() &&
"There should be at least one conflict.");
3219 "Interference ends on VNI->def. Should have been handled earlier");
3222 assert(LastMI &&
"Range must end at a proper instruction");
3223 unsigned TaintNum = 0;
3226 if (usesLanes(*
MI,
Other.Reg,
Other.SubIdx, TaintedLanes)) {
3231 if (&*
MI == LastMI) {
3232 if (++TaintNum == TaintExtent.
size())
3235 assert(LastMI &&
"Range must end at a proper instruction");
3236 TaintedLanes = TaintExtent[TaintNum].second;
3242 V.Resolution = CR_Replace;
3248bool JoinVals::isPrunedValue(
unsigned ValNo, JoinVals &
Other) {
3249 Val &
V = Vals[ValNo];
3250 if (
V.Pruned ||
V.PrunedComputed)
3253 if (
V.Resolution != CR_Erase &&
V.Resolution != CR_Merge)
3258 V.PrunedComputed =
true;
3259 V.Pruned =
Other.isPrunedValue(
V.OtherVNI->id, *
this);
3263void JoinVals::pruneValues(JoinVals &
Other,
3265 bool changeInstrs) {
3268 switch (Vals[i].Resolution) {
3278 Val &OtherV =
Other.Vals[Vals[i].OtherVNI->id];
3280 OtherV.ErasableImplicitDef && OtherV.Resolution == CR_Keep;
3281 if (!
Def.isBlock()) {
3288 if (MO.
getReg() == Reg) {
3301 <<
": " <<
Other.LR <<
'\n');
3306 if (isPrunedValue(i,
Other)) {
3313 << Def <<
": " << LR <<
'\n');
3371 bool DidPrune =
false;
3376 if (
V.Resolution != CR_Erase &&
3377 (
V.Resolution != CR_Keep || !
V.ErasableImplicitDef || !
V.Pruned))
3384 OtherDef =
V.OtherVNI->def;
3387 LLVM_DEBUG(
dbgs() <<
"\t\tExpecting instruction removal at " << Def
3395 if (ValueOut !=
nullptr &&
3397 (
V.Identical &&
V.Resolution == CR_Erase && ValueOut->
def == Def))) {
3399 <<
" at " << Def <<
"\n");
3406 if (
V.Identical && S.Query(OtherDef).valueOutOrDead()) {
3416 ShrinkMask |= S.LaneMask;
3430 ShrinkMask |= S.LaneMask;
3442 if (VNI->
def == Def)
3448void JoinVals::pruneMainSegments(
LiveInterval &LI,
bool &ShrinkMainRange) {
3452 if (Vals[i].Resolution != CR_Keep)
3457 Vals[i].Pruned =
true;
3458 ShrinkMainRange =
true;
3462void JoinVals::removeImplicitDefs() {
3465 if (
V.Resolution != CR_Keep || !
V.ErasableImplicitDef || !
V.Pruned)
3481 switch (Vals[i].Resolution) {
3486 if (!Vals[i].ErasableImplicitDef || !Vals[i].Pruned)
3498 if (LI !=
nullptr) {
3523 ED = ED.
isValid() ? std::min(ED,
I->start) :
I->start;
3525 LE =
LE.isValid() ? std::max(LE,
I->end) :
I->
end;
3528 NewEnd = std::min(NewEnd, LE);
3530 NewEnd = std::min(NewEnd, ED);
3536 if (S != LR.
begin())
3537 std::prev(S)->end = NewEnd;
3541 dbgs() <<
"\t\tremoved " << i <<
'@' <<
Def <<
": " << LR <<
'\n';
3543 dbgs() <<
"\t\t LHS = " << *LI <<
'\n';
3550 assert(
MI &&
"No instruction to erase");
3553 if (
Reg.isVirtual() && Reg !=
CP.getSrcReg() && Reg !=
CP.getDstReg())
3559 MI->eraseFromParent();
3572 JoinVals RHSVals(RRange,
CP.getSrcReg(),
CP.getSrcIdx(), LaneMask, NewVNInfo,
3573 CP, LIS,
TRI,
true,
true);
3574 JoinVals LHSVals(LRange,
CP.getDstReg(),
CP.getDstIdx(), LaneMask, NewVNInfo,
3575 CP, LIS,
TRI,
true,
true);
3582 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals)) {
3587 if (!LHSVals.resolveConflicts(RHSVals) ||
3588 !RHSVals.resolveConflicts(LHSVals)) {
3599 LHSVals.pruneValues(RHSVals, EndPoints,
false);
3600 RHSVals.pruneValues(LHSVals, EndPoints,
false);
3602 LHSVals.removeImplicitDefs();
3603 RHSVals.removeImplicitDefs();
3608 LRange.
join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(),
3613 if (EndPoints.
empty())
3619 dbgs() <<
"\t\trestoring liveness to " << EndPoints.
size() <<
" points: ";
3620 for (
unsigned i = 0, n = EndPoints.
size(); i != n; ++i) {
3621 dbgs() << EndPoints[i];
3625 dbgs() <<
": " << LRange <<
'\n';
3630void RegisterCoalescer::mergeSubRangeInto(
LiveInterval &LI,
3634 unsigned ComposeSubRegIdx) {
3637 Allocator, LaneMask,
3640 SR.assign(ToMerge, Allocator);
3643 LiveRange RangeCopy(ToMerge, Allocator);
3644 joinSubRegRanges(SR, RangeCopy, SR.LaneMask, CP);
3650bool RegisterCoalescer::isHighCostLiveInterval(
LiveInterval &LI) {
3653 auto &Counter = LargeLIVisitCounter[LI.
reg()];
3665 bool TrackSubRegLiveness =
MRI->shouldTrackSubRegLiveness(*
CP.getNewRC());
3667 NewVNInfo, CP, LIS,
TRI,
false, TrackSubRegLiveness);
3669 NewVNInfo, CP, LIS,
TRI,
false, TrackSubRegLiveness);
3671 LLVM_DEBUG(
dbgs() <<
"\t\tRHS = " << RHS <<
"\n\t\tLHS = " << LHS <<
'\n');
3673 if (isHighCostLiveInterval(LHS) || isHighCostLiveInterval(RHS))
3678 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals))
3682 if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals))
3686 if (
RHS.hasSubRanges() ||
LHS.hasSubRanges()) {
3691 unsigned DstIdx =
CP.getDstIdx();
3692 if (!
LHS.hasSubRanges()) {
3694 :
TRI->getSubRegIndexLaneMask(DstIdx);
3697 LHS.createSubRangeFrom(Allocator, Mask, LHS);
3698 }
else if (DstIdx != 0) {
3709 unsigned SrcIdx =
CP.getSrcIdx();
3710 if (!
RHS.hasSubRanges()) {
3712 :
TRI->getSubRegIndexLaneMask(SrcIdx);
3713 mergeSubRangeInto(LHS, RHS, Mask, CP, DstIdx);
3718 mergeSubRangeInto(LHS, R, Mask, CP, DstIdx);
3725 LHSVals.pruneMainSegments(LHS, ShrinkMainRange);
3727 LHSVals.pruneSubRegValues(LHS, ShrinkMask);
3728 RHSVals.pruneSubRegValues(LHS, ShrinkMask);
3729 }
else if (TrackSubRegLiveness && !
CP.getDstIdx() &&
CP.getSrcIdx()) {
3731 CP.getNewRC()->getLaneMask(), LHS);
3732 mergeSubRangeInto(LHS, RHS,
TRI->getSubRegIndexLaneMask(
CP.getSrcIdx()), CP,
3734 LHSVals.pruneMainSegments(LHS, ShrinkMainRange);
3735 LHSVals.pruneSubRegValues(LHS, ShrinkMask);
3743 LHSVals.pruneValues(RHSVals, EndPoints,
true);
3744 RHSVals.pruneValues(LHSVals, EndPoints,
true);
3749 LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs, &LHS);
3750 RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
3751 while (!ShrinkRegs.
empty())
3755 checkMergingChangesDbgValues(CP, LHS, LHSVals, RHS, RHSVals);
3759 auto RegIt = RegToPHIIdx.
find(
CP.getSrcReg());
3760 if (RegIt != RegToPHIIdx.
end()) {
3762 for (
unsigned InstID : RegIt->second) {
3763 auto PHIIt = PHIValToPos.
find(InstID);
3768 auto LII =
RHS.find(SI);
3769 if (LII ==
RHS.end() || LII->start > SI)
3784 if (
CP.getSrcIdx() != 0 ||
CP.getDstIdx() != 0)
3787 if (PHIIt->second.SubReg && PHIIt->second.SubReg !=
CP.getSrcIdx())
3791 PHIIt->second.Reg =
CP.getDstReg();
3795 if (
CP.getSrcIdx() != 0)
3796 PHIIt->second.SubReg =
CP.getSrcIdx();
3802 auto InstrNums = RegIt->second;
3803 RegToPHIIdx.
erase(RegIt);
3807 RegIt = RegToPHIIdx.
find(
CP.getDstReg());
3808 if (RegIt != RegToPHIIdx.
end())
3809 RegIt->second.insert(RegIt->second.end(), InstrNums.begin(),
3812 RegToPHIIdx.
insert({
CP.getDstReg(), InstrNums});
3816 LHS.join(RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo);
3821 MRI->clearKillFlags(
LHS.reg());
3822 MRI->clearKillFlags(
RHS.reg());
3824 if (!EndPoints.
empty()) {
3828 dbgs() <<
"\t\trestoring liveness to " << EndPoints.
size() <<
" points: ";
3829 for (
unsigned i = 0, n = EndPoints.
size(); i != n; ++i) {
3830 dbgs() << EndPoints[i];
3834 dbgs() <<
": " <<
LHS <<
'\n';
3843 return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(
CP);
3853 for (
auto *
X : ToInsert) {
3854 for (
const auto &
Op :
X->debug_operands()) {
3855 if (
Op.isReg() &&
Op.getReg().isVirtual())
3866 for (
auto &
MBB : MF) {
3869 for (
auto &
MI :
MBB) {
3870 if (
MI.isDebugValue()) {
3872 return MO.isReg() && MO.getReg().isVirtual();
3874 ToInsert.push_back(&
MI);
3875 }
else if (!
MI.isDebugOrPseudoInstr()) {
3877 CloseNewDVRange(CurrentSlot);
3886 for (
auto &Pair : DbgVRegToValues)
3890void RegisterCoalescer::checkMergingChangesDbgValues(
CoalescerPair &CP,
3894 JoinVals &RHSVals) {
3896 checkMergingChangesDbgValuesImpl(Reg, RHS, LHS, LHSVals);
3900 checkMergingChangesDbgValuesImpl(Reg, LHS, RHS, RHSVals);
3904 ScanForSrcReg(
CP.getSrcReg());
3905 ScanForDstReg(
CP.getDstReg());
3908void RegisterCoalescer::checkMergingChangesDbgValuesImpl(
Register Reg,
3911 JoinVals &RegVals) {
3913 auto VRegMapIt = DbgVRegToValues.
find(Reg);
3914 if (VRegMapIt == DbgVRegToValues.
end())
3917 auto &DbgValueSet = VRegMapIt->second;
3918 auto DbgValueSetIt = DbgValueSet.begin();
3919 auto SegmentIt = OtherLR.
begin();
3921 bool LastUndefResult =
false;
3926 auto ShouldUndef = [&RegVals, &
RegLR, &LastUndefResult,
3931 if (LastUndefIdx ==
Idx)
3932 return LastUndefResult;
3939 if (OtherIt ==
RegLR.end())
3948 auto Resolution = RegVals.getResolution(OtherIt->valno->id);
3950 Resolution != JoinVals::CR_Keep && Resolution != JoinVals::CR_Erase;
3952 return LastUndefResult;
3958 while (DbgValueSetIt != DbgValueSet.end() && SegmentIt != OtherLR.
end()) {
3959 if (DbgValueSetIt->first < SegmentIt->end) {
3962 if (DbgValueSetIt->first >= SegmentIt->start) {
3963 bool HasReg = DbgValueSetIt->second->hasDebugOperandForReg(Reg);
3964 bool ShouldUndefReg = ShouldUndef(DbgValueSetIt->first);
3965 if (HasReg && ShouldUndefReg) {
3967 DbgValueSetIt->second->setDebugValueUndef();
3981struct MBBPriorityInfo {
3987 :
MBB(mbb),
Depth(depth), IsSplit(issplit) {}
3997 const MBBPriorityInfo *RHS) {
3999 if (
LHS->Depth !=
RHS->Depth)
4000 return LHS->Depth >
RHS->Depth ? -1 : 1;
4003 if (
LHS->IsSplit !=
RHS->IsSplit)
4004 return LHS->IsSplit ? -1 : 1;
4008 unsigned cl =
LHS->MBB->pred_size() +
LHS->MBB->succ_size();
4009 unsigned cr =
RHS->MBB->pred_size() +
RHS->MBB->succ_size();
4011 return cl > cr ? -1 : 1;
4014 return LHS->MBB->getNumber() <
RHS->MBB->getNumber() ? -1 : 1;
4019 if (!Copy->isCopy())
4022 if (Copy->getOperand(1).isUndef())
4025 Register SrcReg = Copy->getOperand(1).getReg();
4026 Register DstReg = Copy->getOperand(0).getReg();
4034void RegisterCoalescer::lateLiveIntervalUpdate() {
4040 if (!DeadDefs.
empty())
4041 eliminateDeadDefs();
4043 ToBeUpdated.clear();
4046bool RegisterCoalescer::copyCoalesceWorkList(
4048 bool Progress =
false;
4060 bool Success = joinCopy(
MI, Again, CurrentErasedInstrs);
4066 if (!CurrentErasedInstrs.
empty()) {
4068 if (
MI && CurrentErasedInstrs.
count(
MI))
4072 if (
MI && CurrentErasedInstrs.
count(
MI))
4083 assert(Copy.isCopyLike());
4086 if (&
MI != &Copy &&
MI.isCopyLike())
4091bool RegisterCoalescer::applyTerminalRule(
const MachineInstr &Copy)
const {
4096 unsigned SrcSubReg = 0, DstSubReg = 0;
4097 if (!
isMoveInstr(*
TRI, &Copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
4118 if (&
MI == &Copy || !
MI.isCopyLike() ||
MI.getParent() != OrigBB)
4121 unsigned OtherSrcSubReg = 0, OtherSubReg = 0;
4122 if (!
isMoveInstr(*
TRI, &Copy, OtherSrcReg, OtherReg, OtherSrcSubReg,
4125 if (OtherReg == SrcReg)
4126 OtherReg = OtherSrcReg;
4145 const unsigned PrevSize = WorkList.
size();
4146 if (JoinGlobalCopies) {
4154 if (!
MI.isCopyLike())
4156 bool ApplyTerminalRule = applyTerminalRule(
MI);
4158 if (ApplyTerminalRule)
4163 if (ApplyTerminalRule)
4170 LocalWorkList.
append(LocalTerminals.
begin(), LocalTerminals.
end());
4175 if (MII.isCopyLike()) {
4176 if (applyTerminalRule(MII))
4189 if (copyCoalesceWorkList(CurrList))
4191 std::remove(WorkList.
begin() + PrevSize, WorkList.
end(),
nullptr),
4195void RegisterCoalescer::coalesceLocals() {
4196 copyCoalesceWorkList(LocalWorkList);
4201 LocalWorkList.clear();
4204void RegisterCoalescer::joinAllIntervals() {
4205 LLVM_DEBUG(
dbgs() <<
"********** JOINING INTERVALS ***********\n");
4206 assert(WorkList.
empty() && LocalWorkList.empty() &&
"Old data still around.");
4208 std::vector<MBBPriorityInfo> MBBs;
4209 MBBs.reserve(MF->size());
4211 MBBs.push_back(MBBPriorityInfo(&
MBB,
Loops->getLoopDepth(&
MBB),
4217 unsigned CurrDepth = std::numeric_limits<unsigned>::max();
4218 for (MBBPriorityInfo &
MBB : MBBs) {
4220 if (JoinGlobalCopies &&
MBB.Depth < CurrDepth) {
4222 CurrDepth =
MBB.Depth;
4224 copyCoalesceInMBB(
MBB.MBB);
4226 lateLiveIntervalUpdate();
4231 while (copyCoalesceWorkList(WorkList))
4233 lateLiveIntervalUpdate();
4236void RegisterCoalescer::releaseMemory() {
4237 ErasedInstrs.
clear();
4240 InflateRegs.
clear();
4241 LargeLIVisitCounter.
clear();
4245 LLVM_DEBUG(
dbgs() <<
"********** REGISTER COALESCER **********\n"
4246 <<
"********** Function: " << fn.
getName() <<
'\n');
4258 dbgs() <<
"* Skipped as it exposes functions that returns twice.\n");
4267 LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
4268 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4269 Loops = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
4278 for (
const auto &DebugPHI : MF->DebugPHIPositions) {
4281 unsigned SubReg = DebugPHI.second.SubReg;
4284 PHIValToPos.
insert(std::make_pair(DebugPHI.first,
P));
4285 RegToPHIIdx[
Reg].push_back(DebugPHI.first);
4294 MF->verify(
this,
"Before register coalescing", &
errs());
4296 DbgVRegToValues.
clear();
4313 if (
MRI->reg_nodbg_empty(Reg))
4315 if (
MRI->recomputeRegClass(Reg)) {
4317 <<
TRI->getRegClassName(
MRI->getRegClass(Reg)) <<
'\n');
4324 if (!
MRI->shouldTrackSubRegLiveness(Reg)) {
4332 assert((S.LaneMask & ~MaxMask).none());
4342 for (
auto &p : MF->DebugPHIPositions) {
4343 auto it = PHIValToPos.
find(
p.first);
4345 p.second.Reg = it->second.Reg;
4346 p.second.SubReg = it->second.SubReg;
4349 PHIValToPos.
clear();
4350 RegToPHIIdx.
clear();
4354 MF->verify(
this,
"After register coalescing", &
errs());
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseSet and SmallDenseSet classes.
std::optional< std::vector< StOtherPiece > > Other
SmallVector< uint32_t, 0 > Writes
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
A common definition of LaneBitmask for use in TableGen and CodeGen.
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static cl::opt< cl::boolOrDefault > EnableGlobalCopies("join-globalcopies", cl::desc("Coalesce copies that span blocks (default=subtarget)"), cl::init(cl::BOU_UNSET), cl::Hidden)
Temporary flag to test global copy optimization.
static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS)
static bool isSplitEdge(const MachineBasicBlock *MBB)
Return true if this block should be vacated by the coalescer to eliminate branches.
static int compareMBBPriority(const MBBPriorityInfo *LHS, const MBBPriorityInfo *RHS)
C-style comparator that sorts first based on the loop depth of the basic block (the unsigned),...
register Register Coalescer
static cl::opt< unsigned > LargeIntervalSizeThreshold("large-interval-size-threshold", cl::Hidden, cl::desc("If the valnos size of an interval is larger than the threshold, " "it is regarded as a large interval. "), cl::init(100))
static bool isDefInSubRange(LiveInterval &LI, SlotIndex Def)
Check if any of the subranges of LI contain a definition at Def.
static std::pair< bool, bool > addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo, const LiveRange &Src, const VNInfo *SrcValNo)
Copy segments with value number SrcValNo from liverange Src to live range @Dst and use value number D...
static bool isLiveThrough(const LiveQueryResult Q)
static bool isTerminalReg(Register DstReg, const MachineInstr &Copy, const MachineRegisterInfo *MRI)
Check if DstReg is a terminal node.
static cl::opt< bool > VerifyCoalescing("verify-coalescing", cl::desc("Verify machine instrs before and after register coalescing"), cl::Hidden)
register Register static false bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, Register &Src, Register &Dst, unsigned &SrcSub, unsigned &DstSub)
static cl::opt< bool > EnableJoinSplits("join-splitedges", cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden)
Temporary flag to test critical edge unsplitting.
static cl::opt< bool > EnableJoining("join-liveintervals", cl::desc("Coalesce copies (default=true)"), cl::init(true), cl::Hidden)
static cl::opt< unsigned > LargeIntervalFreqThreshold("large-interval-freq-threshold", cl::Hidden, cl::desc("For a large interval, if it is coalesced with other live " "intervals many times more than the threshold, stop its " "coalescing to control the compile time. "), cl::init(256))
static bool definesFullReg(const MachineInstr &MI, Register Reg)
Returns true if MI defines the full vreg Reg, as opposed to just defining a subregister.
static cl::opt< unsigned > LateRematUpdateThreshold("late-remat-update-threshold", cl::Hidden, cl::desc("During rematerialization for a copy, if the def instruction has " "many other copy uses to be rematerialized, delay the multiple " "separate live interval update work and do them all at once after " "all those rematerialization are done. It will save a lot of " "repeated work. "), cl::init(100))
static cl::opt< bool > UseTerminalRule("terminal-rule", cl::desc("Apply the terminal rule"), cl::init(false), cl::Hidden)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static DenseMap< Register, std::vector< std::pair< SlotIndex, MachineInstr * > > > buildVRegToDbgValueMap(MachineFunction &MF, const LiveIntervals *Liveness)
static void shrinkToUses(LiveInterval &LI, LiveIntervals &LIS)
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Represent the analysis usage information of a pass.
AnalysisUsage & addPreservedID(const void *ID)
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
bool test(unsigned Idx) const
Allocate memory in an ever growing pool, as if by bump-pointer.
A helper class for register coalescers.
bool flip()
Swap SrcReg and DstReg.
bool isCoalescable(const MachineInstr *) const
Return true if MI is a copy instruction that will become an identity copy after coalescing.
bool setRegisters(const MachineInstr *)
Set registers to match the copy instruction MI.
This class represents an Operation in the Expression.
The location of a single variable, composed of an expression and 0 or more DbgValueLocEntries.
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
bool isAsCheapAsAMove(const MachineInstr &MI) const override
A live range for subregisters.
LiveInterval - This class represents the liveness of a register, or stack slot.
void removeEmptySubRanges()
Removes all subranges without any segments (subranges without segments are not considered valid and s...
bool hasSubRanges() const
Returns true if subregister liveness information is available.
SubRange * createSubRangeFrom(BumpPtrAllocator &Allocator, LaneBitmask LaneMask, const LiveRange &CopyFrom)
Like createSubRange() but the new range is filled with a copy of the liveness information in CopyFrom...
iterator_range< subrange_iterator > subranges()
void refineSubRanges(BumpPtrAllocator &Allocator, LaneBitmask LaneMask, std::function< void(LiveInterval::SubRange &)> Apply, const SlotIndexes &Indexes, const TargetRegisterInfo &TRI, unsigned ComposeSubRegIdx=0)
Refines the subranges to support LaneMask.
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
SubRange * createSubRange(BumpPtrAllocator &Allocator, LaneBitmask LaneMask)
Creates a new empty subregister live range.
void clearSubRanges()
Removes all subregister liveness information.
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const
Returns true if VNI is killed by any PHI-def values in LI.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
bool checkRegMaskInterference(const LiveInterval &LI, BitVector &UsableRegs)
Test if LI is live across any register mask instructions, and compute a bit mask of physical register...
SlotIndexes * getSlotIndexes() const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
VNInfo::Allocator & getVNInfoAllocator()
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveRange & getRegUnit(unsigned Unit)
Return the live range for register unit Unit.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LiveInterval & getInterval(Register Reg)
void pruneValue(LiveRange &LR, SlotIndex Kill, SmallVectorImpl< SlotIndex > *EndPoints)
If LR has a live value at Kill, prune its live range by removing any liveness reachable from Kill.
void removeInterval(Register Reg)
Interval removal.
MachineBasicBlock * intervalIsInOneMBB(const LiveInterval &LI) const
If LI is confined to a single basic block, return a pointer to that block.
void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos)
Remove value number and related live segments of LI and its subranges that start at position Pos.
bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
void extendToIndices(LiveRange &LR, ArrayRef< SlotIndex > Indices, ArrayRef< SlotIndex > Undefs)
Extend the live range LR to reach all points in Indices.
void print(raw_ostream &O) const
Implement the dump method.
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
void splitSeparateComponents(LiveInterval &LI, SmallVectorImpl< LiveInterval * > &SplitLIs)
Split separate components in LiveInterval LI into separate intervals.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Result of a LiveRange query.
VNInfo * valueOutOrDead() const
Returns the value alive at the end of the instruction, if any.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
VNInfo * valueDefined() const
Return the value defined by this instruction, if any.
SlotIndex endPoint() const
Return the end point of the last live range segment to interact with the instruction,...
bool isKill() const
Return true if the live-in value is killed by this instruction.
Callback methods for LiveRangeEdit owners.
virtual void LRE_WillEraseInstruction(MachineInstr *MI)
Called immediately before erasing a dead machine instruction.
SlotIndex rematerializeAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, const Remat &RM, const TargetRegisterInfo &, bool Late=false, unsigned SubIdx=0, MachineInstr *ReplaceIndexMI=nullptr)
rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an instruction into MBB before...
void eliminateDeadDefs(SmallVectorImpl< MachineInstr * > &Dead, ArrayRef< Register > RegsBeingSpilled={})
eliminateDeadDefs - Try to delete machine instructions that are now dead (allDefsAreDead returns true...
bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI)
checkRematerializable - Manually add VNI to the list of rematerializable values if DefMI may be remat...
bool canRematerializeAt(Remat &RM, VNInfo *OrigVNI, SlotIndex UseIdx, bool cheapAsAMove)
canRematerializeAt - Determine if ParentVNI can be rematerialized at UseIdx.
This class represents the liveness of a register, stack slot, etc.
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
iterator addSegment(Segment S)
Add the specified Segment to this range, merging segments as appropriate.
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
void join(LiveRange &Other, const int *ValNoAssignments, const int *RHSValNoAssignments, SmallVectorImpl< VNInfo * > &NewVNInfo)
join - Join two live ranges (this, and other) together.
bool liveAt(SlotIndex index) const
VNInfo * createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc)
createDeadDef - Make sure the range has a value defined at Def.
void removeValNo(VNInfo *ValNo)
removeValNo - Remove all the segments defined by the specified value#.
bool overlaps(const LiveRange &other) const
overlaps - Return true if the intersection of the two live ranges is not empty.
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
bool verify() const
Walk the range and assert if any invariants fail to hold.
VNInfo * MergeValueNumberInto(VNInfo *V1, VNInfo *V2)
MergeValueNumberInto - This method is called when two value numbers are found to be equivalent.
unsigned getNumValNums() const
bool containsOneValue() const
iterator FindSegmentContaining(SlotIndex Idx)
Return an iterator to the segment that contains the specified index, or end() if there is none.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
MCRegUnitRootIterator enumerates the root registers of a register unit.
bool isValid() const
Check if the iterator is at the end of the list.
Wrapper class representing physical registers. Should be passed by value.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool hasEHPadSuccessor() const
bool isEHPad() const
Returns true if the block is a landing pad.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual MachineFunctionProperties getClearedProperties() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
bool exposesReturnsTwice() const
exposesReturnsTwice - Returns true if the function calls setjmp or any other similar functions with a...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
bool isDebugInstr() const
unsigned getNumOperands() const
Retuns the total number of operands.
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx=nullptr) const
Given the index of a register def operand, check if the register def is tied to a source operand,...
int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
iterator_range< mop_iterator > operands()
void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
iterator_range< filtered_mop_iterator > all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
virtual void releaseMemory()
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
bool isProperSubClass(const TargetRegisterClass *RC) const
isProperSubClass - Returns true if RC has a legal super-class with more allocatable registers.
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
SlotIndex - An opaque wrapper around machine indexes.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
bool isValid() const
Returns true if this is a valid index.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
Returns the basic block which the given index falls in.
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
SlotIndex getNextNonNullIndex(SlotIndex Index)
Returns the next non-null index, if one exists.
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
SlotIndex getIndexBefore(const MachineInstr &MI) const
getIndexBefore - Returns the index of the last indexed instruction before MI, or the start index of i...
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction for the given index, or null if the given index has no instruction associated...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
static const unsigned CommuteAnyOperandIndex
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool enableJoinGlobalCopies() const
True if the subtarget should enable joining global copies.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
VNInfo - Value Number Information.
void markUnused()
Mark this value as unused.
bool isUnused() const
Returns true if this value is unused.
unsigned id
The ID number of this value.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< DefNode * > Def
const_iterator end(StringRef path LLVM_LIFETIME_BOUND)
Get end iterator over path.
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
char & RegisterCoalescerID
RegisterCoalescer - This pass merges live ranges to eliminate copies.
void initializeRegisterCoalescerPass(PassRegistry &)
char & MachineDominatorsID
MachineDominators - This pass is a machine dominators analysis pass.
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
auto unique(Range &&R, Predicate P)
auto upper_bound(R &&Range, T &&Value)
Provide wrappers to std::upper_bound which take ranges instead of having to pass begin/end explicitly...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr LaneBitmask getLane(unsigned Lane)
static constexpr LaneBitmask getAll()
constexpr bool any() const
static constexpr LaneBitmask getNone()
Remat - Information needed to rematerialize at a specific location.
This represents a simple continuous liveness interval for a value.