Go to the documentation of this file.
63 #define DEBUG_TYPE "regalloc"
65 STATISTIC(numJoins ,
"Number of interval joins performed");
66 STATISTIC(numCrossRCs ,
"Number of cross class joins performed");
67 STATISTIC(numCommutes ,
"Number of instruction commuting performed");
68 STATISTIC(numExtends ,
"Number of copies extended");
69 STATISTIC(NumReMats ,
"Number of instructions re-materialized");
70 STATISTIC(NumInflated ,
"Number of register classes inflated");
71 STATISTIC(NumLaneConflicts,
"Number of dead lane conflicts tested");
72 STATISTIC(NumLaneResolves,
"Number of dead lane conflicts resolved");
73 STATISTIC(NumShrinkToUses,
"Number of shrinkToUses called");
76 cl::desc(
"Coalesce copies (default=true)"),
91 cl::desc(
"Coalesce copies that span blocks (default=subtarget)"),
96 cl::desc(
"Verify machine instrs before and after register coalescing"),
101 cl::desc(
"During rematerialization for a copy, if the def instruction has "
102 "many other copy uses to be rematerialized, delay the multiple "
103 "separate live interval update work and do them all at once after "
104 "all those rematerialization are done. It will save a lot of "
110 cl::desc(
"If the valnos size of an interval is larger than the threshold, "
111 "it is regarded as a large interval. "),
116 cl::desc(
"For a large interval, if it is coalesed with other live "
117 "intervals many times more than the threshold, stop its "
118 "coalescing to control the compile time. "),
153 using DbgValueLoc = std::pair<SlotIndex, MachineInstr*>;
168 bool ShrinkMainRange =
false;
172 bool JoinGlobalCopies =
false;
176 bool JoinSplitEdges =
false;
202 void eliminateDeadDefs();
213 void coalesceLocals();
216 void joinAllIntervals();
231 void lateLiveIntervalUpdate();
236 bool copyValueUndefInPredecessors(
LiveRange &
S,
372 MI->eraseFromParent();
397 void releaseMemory()
override;
413 "Simple Register Coalescing",
false,
false)
426 Dst =
MI->getOperand(0).getReg();
427 DstSub =
MI->getOperand(0).getSubReg();
428 Src =
MI->getOperand(1).getReg();
429 SrcSub =
MI->getOperand(1).getSubReg();
430 }
else if (
MI->isSubregToReg()) {
431 Dst =
MI->getOperand(0).getReg();
432 DstSub = tri.composeSubRegIndices(
MI->getOperand(0).getSubReg(),
433 MI->getOperand(3).getImm());
434 Src =
MI->getOperand(2).getReg();
435 SrcSub =
MI->getOperand(2).getSubReg();
450 for (
const auto &
MI : *
MBB) {
451 if (!
MI.isCopyLike() && !
MI.isUnconditionalBranch())
461 Flipped = CrossClass =
false;
464 unsigned SrcSub = 0, DstSub = 0;
467 Partial = SrcSub || DstSub;
484 if (!Dst)
return false;
491 if (!Dst)
return false;
501 if (SrcSub && DstSub) {
503 if (Src == Dst && SrcSub != DstSub)
529 if (DstIdx && !SrcIdx) {
535 CrossClass = NewRC != DstRC || NewRC != SrcRC;
540 "Cannot have a physical SubIdx");
559 unsigned SrcSub = 0, DstSub = 0;
567 }
else if (Src != SrcReg) {
573 if (!Dst.isPhysical())
575 assert(!DstIdx && !SrcIdx &&
"Inconsistent CoalescerPair state.");
581 return DstReg == Dst;
594 void RegisterCoalescer::getAnalysisUsage(
AnalysisUsage &AU)
const {
606 void RegisterCoalescer::eliminateDeadDefs() {
612 bool RegisterCoalescer::allUsesAvailableAt(
const MachineInstr *OrigMI,
616 return LiveRangeEdit(
nullptr, NewRegs, *MF, *LIS,
nullptr,
this)
620 void RegisterCoalescer::LRE_WillEraseInstruction(
MachineInstr *
MI) {
627 assert(!
CP.isPartial() &&
"This doesn't work for partial copies.");
628 assert(!
CP.isPhys() &&
"This doesn't work for physreg copies.");
653 if (BS == IntB.
end())
return false;
654 VNInfo *BValNo = BS->valno;
659 if (BValNo->
def != CopyIdx)
return false;
665 if (AS == IntA.
end())
return false;
666 VNInfo *AValNo = AS->valno;
672 if (!
CP.isCoalescable(ACopyMI) || !ACopyMI->
isFullCopy())
678 if (ValS == IntB.
end())
691 if (ValS+1 != BS)
return false;
695 SlotIndex FillerStart = ValS->end, FillerEnd = BS->start;
699 BValNo->
def = FillerStart;
704 IntB.
addSegment(LiveInterval::Segment(FillerStart, FillerEnd, BValNo));
707 if (BValNo != ValS->valno)
716 S.removeSegment(*
SS,
true);
720 if (!
S.getVNInfoAt(FillerStart)) {
723 S.extendInBlock(BBStart, FillerStart);
725 VNInfo *SubBValNo =
S.getVNInfoAt(CopyIdx);
726 S.addSegment(LiveInterval::Segment(FillerStart, FillerEnd, SubBValNo));
728 if (SubBValNo != SubValSNo)
729 S.MergeValueNumberInto(SubBValNo, SubValSNo);
745 bool RecomputeLiveRange = AS->end == CopyIdx;
746 if (!RecomputeLiveRange) {
749 if (
SS !=
S.end() &&
SS->end == CopyIdx) {
750 RecomputeLiveRange =
true;
755 if (RecomputeLiveRange)
762 bool RegisterCoalescer::hasOtherReachingDefs(
LiveInterval &IntA,
772 if (ASeg.
valno != AValNo)
continue;
774 if (BI != IntB.
begin())
776 for (; BI != IntB.
end() && ASeg.
end >= BI->start; ++BI) {
777 if (BI->valno == BValNo)
779 if (BI->start <= ASeg.
start && BI->end > ASeg.
start)
781 if (BI->start > ASeg.
start && BI->start < ASeg.
end)
790 static std::pair<bool,bool>
793 bool Changed =
false;
794 bool MergedWithDead =
false;
796 if (
S.valno != SrcValNo)
807 MergedWithDead =
true;
810 return std::make_pair(Changed, MergedWithDead);
847 assert(BValNo !=
nullptr && BValNo->
def == CopyIdx);
853 return {
false,
false };
856 return {
false,
false };
858 return {
false,
false };
865 return {
false,
false };
877 if (!
TII->findCommutedOpIndices(*
DefMI, UseOpIdx, NewDstIdx))
878 return {
false,
false };
883 return {
false,
false };
887 if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
888 return {
false,
false };
897 if (US == IntA.
end() || US->valno != AValNo)
901 return {
false,
false };
911 TII->commuteInstruction(*
DefMI,
false, UseOpIdx, NewDstIdx);
913 return {
false,
false };
917 return {
false,
false };
918 if (NewMI !=
DefMI) {
943 UseMO.setReg(NewReg);
948 assert(US != IntA.
end() &&
"Use must be live");
949 if (US->valno != AValNo)
952 UseMO.setIsKill(
false);
954 UseMO.substPhysReg(NewReg, *
TRI);
956 UseMO.setReg(NewReg);
975 VNInfo *SubDVNI =
S.getVNInfoAt(DefIdx);
978 VNInfo *SubBValNo =
S.getVNInfoAt(CopyIdx);
980 S.MergeValueNumberInto(SubDVNI, SubBValNo);
988 bool ShrinkB =
false;
1002 VNInfo *ASubValNo = SA.getVNInfoAt(AIdx);
1011 MaskA |= SA.LaneMask;
1017 VNInfo *BSubValNo = SR.empty() ? SR.getNextValue(CopyIdx, Allocator)
1018 : SR.getVNInfoAt(CopyIdx);
1019 assert(BSubValNo != nullptr);
1020 auto P = addSegmentsWithValNo(SR, BSubValNo, SA, ASubValNo);
1021 ShrinkB |= P.second;
1023 BSubValNo->def = ASubValNo->def;
1031 if ((SB.LaneMask & MaskA).any())
1035 SB.removeSegment(*
S,
true);
1039 BValNo->
def = AValNo->
def;
1041 ShrinkB |=
P.second;
1048 return {
true, ShrinkB };
1098 bool RegisterCoalescer::removePartialRedundancy(
const CoalescerPair &
CP,
1131 bool FoundReverseCopy =
false;
1150 bool ValB_Changed =
false;
1151 for (
auto VNI : IntB.
valnos) {
1152 if (VNI->isUnused())
1155 ValB_Changed =
true;
1163 FoundReverseCopy =
true;
1167 if (!FoundReverseCopy)
1177 if (CopyLeftBB && CopyLeftBB->
succ_size() > 1)
1188 if (InsPos != CopyLeftBB->
end()) {
1194 LLVM_DEBUG(
dbgs() <<
"\tremovePartialRedundancy: Move the copy to "
1199 TII->get(TargetOpcode::COPY), IntB.
reg())
1210 ErasedInstrs.
erase(NewCopyMI);
1212 LLVM_DEBUG(
dbgs() <<
"\tremovePartialRedundancy: Remove the copy from "
1221 deleteInstr(&CopyMI);
1235 VNInfo *BValNo = SR.Query(CopyIdx).valueOutOrDead();
1236 assert(BValNo &&
"All sublanes should be live");
1245 for (
unsigned I = 0;
I != EndPoints.size(); ) {
1247 EndPoints[
I] = EndPoints.back();
1248 EndPoints.pop_back();
1269 assert(!
Reg.isPhysical() &&
"This code cannot handle physreg aliasing");
1272 if (!
Op.isReg() || !
Op.isDef() ||
Op.getReg() !=
Reg)
1276 if (
Op.getSubReg() == 0 ||
Op.isUndef())
1282 bool RegisterCoalescer::reMaterializeTrivialDef(
const CoalescerPair &
CP,
1286 Register SrcReg =
CP.isFlipped() ?
CP.getDstReg() :
CP.getSrcReg();
1287 unsigned SrcIdx =
CP.isFlipped() ?
CP.getDstIdx() :
CP.getSrcIdx();
1288 Register DstReg =
CP.isFlipped() ?
CP.getSrcReg() :
CP.getDstReg();
1289 unsigned DstIdx =
CP.isFlipped() ?
CP.getSrcIdx() :
CP.getDstIdx();
1309 if (!
TII->isTriviallyReMaterializable(*
DefMI,
AA))
1313 bool SawStore =
false;
1330 if (SrcIdx && DstIdx)
1351 "Only expect to deal with virtual or physical registers");
1355 if (!allUsesAvailableAt(
DefMI, ValNo->
def, CopyIdx))
1375 assert(SrcIdx == 0 &&
CP.isFlipped()
1376 &&
"Shouldn't have SrcIdx+DstIdx at this point");
1380 if (CommonRC !=
nullptr) {
1399 assert(MO.
isImplicit() &&
"No explicit operands after implicit operands.");
1402 ImplicitOps.push_back(MO);
1408 ErasedInstrs.
insert(CopyMI);
1428 if (DefRC !=
nullptr) {
1433 assert(NewRC &&
"subreg chosen for remat incompatible with instruction");
1443 updateRegDefsUses(DstReg, DstReg, DstIdx);
1471 if (!SR.liveAt(DefIndex))
1472 SR.createDeadDef(DefIndex, Alloc);
1473 MaxMask &= ~SR.LaneMask;
1475 if (MaxMask.
any()) {
1493 bool UpdatedSubRanges =
false;
1498 if ((SR.
LaneMask & DstMask).none()) {
1500 <<
"Removing undefined SubRange "
1505 UpdatedSubRanges =
true;
1517 if (UpdatedSubRanges)
1524 "Only expect virtual or physical registers in remat");
1527 CopyDstReg,
true ,
true ,
false ));
1559 for (
unsigned i = 0,
e = NewMIImplDefs.size();
i !=
e; ++
i) {
1577 UseMO.substPhysReg(DstReg, *
TRI);
1579 UseMO.setReg(DstReg);
1588 if (ToBeUpdated.
count(SrcReg))
1591 unsigned NumCopyUses = 0;
1593 if (UseMO.getParent()->isCopyLike())
1599 if (!DeadDefs.empty())
1600 eliminateDeadDefs();
1602 ToBeUpdated.
insert(SrcReg);
1620 unsigned SrcSubIdx = 0, DstSubIdx = 0;
1621 if(!
isMoveInstr(*
TRI, CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
1630 if ((SR.
LaneMask & SrcMask).none())
1635 }
else if (SrcLI.
liveAt(Idx))
1643 assert(Seg !=
nullptr &&
"No segment for defining instruction");
1645 if (V->isPHIDef()) {
1646 CopyMI->
setDesc(
TII->get(TargetOpcode::IMPLICIT_DEF));
1669 if ((SR.
LaneMask & DstMask).none())
1691 if ((SR.
LaneMask & UseMask).none())
1699 isLive = DstLI.
liveAt(UseIdx);
1724 bool IsUndef =
true;
1726 if ((
S.LaneMask &
Mask).none())
1728 if (
S.liveAt(UseIdx)) {
1741 ShrinkMainRange =
true;
1745 void RegisterCoalescer::updateRegDefsUses(
Register SrcReg,
Register DstReg,
1750 if (DstInt && DstInt->
hasSubRanges() && DstReg != SrcReg) {
1756 if (
MI.isDebugInstr())
1759 addUndefFlag(*DstInt, UseIdx, MO,
SubReg);
1774 if (SrcReg == DstReg && !Visited.
insert(
UseMI).second)
1787 for (
unsigned i = 0,
e = Ops.size();
i !=
e; ++
i) {
1793 if (SubIdx && MO.
isDef())
1798 if (MO.
isUse() && !DstIsPhys) {
1817 addUndefFlag(*DstInt, UseIdx, MO, SubUseIdx);
1828 dbgs() <<
"\t\tupdated: ";
1841 LLVM_DEBUG(
dbgs() <<
"\tCan only merge into reserved registers.\n");
1850 dbgs() <<
"\tCannot join complex intervals into reserved register.\n");
1854 bool RegisterCoalescer::copyValueUndefInPredecessors(
1868 void RegisterCoalescer::setUndefOnPrunedSubRegUses(
LiveInterval &LI,
1875 if (SubRegIdx == 0 || MO.
isUndef())
1881 if (!
S.liveAt(Pos) && (PrunedLanes & SubRegMask).any()) {
1897 bool RegisterCoalescer::joinCopy(
MachineInstr *CopyMI,
bool &Again) {
1902 if (!
CP.setRegisters(CopyMI)) {
1907 if (
CP.getNewRC()) {
1910 unsigned SrcIdx =
CP.getSrcIdx();
1911 unsigned DstIdx =
CP.getDstIdx();
1912 if (
CP.isFlipped()) {
1917 CP.getNewRC(), *LIS)) {
1928 DeadDefs.push_back(CopyMI);
1929 eliminateDeadDefs();
1936 if (
MachineInstr *UndefMI = eliminateUndefCopy(CopyMI)) {
1937 if (UndefMI->isImplicitDef())
1939 deleteInstr(CopyMI);
1947 if (
CP.getSrcReg() ==
CP.getDstReg()) {
1949 LLVM_DEBUG(
dbgs() <<
"\tCopy already coalesced: " << LI <<
'\n');
1954 assert(ReadVNI &&
"No value before copy and no <undef> flag.");
1955 assert(ReadVNI != DefVNI &&
"Cannot read and define the same value.");
1966 SDefVNI =
S.MergeValueNumberInto(SDefVNI, SReadVNI);
1970 if (copyValueUndefInPredecessors(
S,
MBB, SLRQ)) {
1971 LLVM_DEBUG(
dbgs() <<
"Incoming sublane value is undef at copy\n");
1972 PrunedLanes |=
S.LaneMask;
1973 S.removeValNo(SDefVNI);
1979 if (PrunedLanes.
any()) {
1981 << PrunedLanes <<
'\n');
1982 setUndefOnPrunedSubRegUses(LI,
CP.getSrcReg(), PrunedLanes);
1987 deleteInstr(CopyMI);
1996 if (!canJoinPhys(
CP)) {
1999 bool IsDefCopy =
false;
2000 if (reMaterializeTrivialDef(
CP, CopyMI, IsDefCopy))
2013 dbgs() <<
"\tConsidering merging to "
2015 if (
CP.getDstIdx() &&
CP.getSrcIdx())
2027 ShrinkMainRange =
false;
2033 if (!joinIntervals(
CP)) {
2038 bool IsDefCopy =
false;
2039 if (reMaterializeTrivialDef(
CP, CopyMI, IsDefCopy))
2044 if (!
CP.isPartial() && !
CP.isPhys()) {
2045 bool Changed = adjustCopiesBackFrom(
CP, CopyMI);
2048 std::tie(Changed,
Shrink) = removeCopyByCommutingDef(
CP, CopyMI);
2050 deleteInstr(CopyMI);
2052 Register DstReg =
CP.isFlipped() ?
CP.getSrcReg() :
CP.getDstReg();
2064 if (!
CP.isPartial() && !
CP.isPhys())
2065 if (removePartialRedundancy(
CP, *CopyMI))
2076 if (
CP.isCrossClass()) {
2084 InflateRegs.push_back(
CP.getDstReg());
2089 ErasedInstrs.
erase(CopyMI);
2094 updateRegDefsUses(
CP.getDstReg(),
CP.getDstReg(),
CP.getDstIdx());
2095 updateRegDefsUses(
CP.getSrcReg(),
CP.getDstReg(),
CP.getSrcIdx());
2098 if (ShrinkMask.
any()) {
2101 if ((
S.LaneMask & ShrinkMask).none())
2113 if (ToBeUpdated.
count(
CP.getSrcReg()))
2114 ShrinkMainRange =
true;
2116 if (ShrinkMainRange) {
2131 dbgs() <<
"\tResult = ";
2146 assert(
CP.isPhys() &&
"Must be a physreg copy");
2151 assert(
RHS.containsOneValue() &&
"Invalid join with reserved register");
2177 !RegMaskUsable.
test(DstReg)) {
2190 if (
CP.isFlipped()) {
2230 if (
MI->readsRegister(DstReg,
TRI)) {
2240 <<
printReg(DstReg,
TRI) <<
" at " << CopyRegIdx <<
"\n");
2250 deleteInstr(CopyMI);
2338 const unsigned SubIdx;
2346 const bool SubRangeJoin;
2349 const bool TrackSubRegLiveness;
2365 enum ConflictResolution {
2397 ConflictResolution Resolution = CR_Keep;
2407 VNInfo *RedefVNI =
nullptr;
2410 VNInfo *OtherVNI =
nullptr;
2423 bool ErasableImplicitDef =
false;
2427 bool Pruned =
false;
2430 bool PrunedComputed =
false;
2437 bool Identical =
false;
2441 bool isAnalyzed()
const {
return WriteLanes.
any(); }
2453 std::pair<const VNInfo *, Register> followCopyChain(
const VNInfo *VNI)
const;
2455 bool valuesIdentical(
VNInfo *Value0,
VNInfo *Value1,
const JoinVals &Other)
const;
2464 ConflictResolution analyzeValue(
unsigned ValNo, JoinVals &Other);
2469 void computeAssignment(
unsigned ValNo, JoinVals &Other);
2487 taintExtent(
unsigned ValNo,
LaneBitmask TaintedLanes, JoinVals &Other,
2500 bool isPrunedValue(
unsigned ValNo, JoinVals &Other);
2506 bool TrackSubRegLiveness)
2507 : LR(LR),
Reg(
Reg), SubIdx(SubIdx), LaneMask(LaneMask),
2508 SubRangeJoin(SubRangeJoin), TrackSubRegLiveness(TrackSubRegLiveness),
2509 NewVNInfo(newVNInfo),
CP(cp), LIS(lis), Indexes(LIS->getSlotIndexes()),
2510 TRI(
TRI), Assignments(LR.getNumValNums(), -1),
2511 Vals(LR.getNumValNums()) {}
2515 bool mapValues(JoinVals &Other);
2519 bool resolveConflicts(JoinVals &Other);
2539 void pruneMainSegments(
LiveInterval &LI,
bool &ShrinkMainRange);
2550 void removeImplicitDefs();
2553 const int *getAssignments()
const {
return Assignments.data(); }
2556 ConflictResolution getResolution(
unsigned Num)
const {
2557 return Vals[Num].Resolution;
2577 std::pair<const VNInfo *, Register>
2578 JoinVals::followCopyChain(
const VNInfo *VNI)
const {
2584 assert(
MI &&
"No defining instruction");
2585 if (!
MI->isFullCopy())
2586 return std::make_pair(VNI, TrackReg);
2587 Register SrcReg =
MI->getOperand(1).getReg();
2589 return std::make_pair(VNI, TrackReg);
2604 if ((SMask & LaneMask).none())
2612 return std::make_pair(VNI, TrackReg);
2615 if (ValueIn ==
nullptr) {
2622 return std::make_pair(
nullptr, SrcReg);
2627 return std::make_pair(VNI, TrackReg);
2630 bool JoinVals::valuesIdentical(
VNInfo *Value0,
VNInfo *Value1,
2631 const JoinVals &Other)
const {
2634 std::tie(Orig0, Reg0) = followCopyChain(Value0);
2635 if (Orig0 == Value1 && Reg0 ==
Other.Reg)
2640 std::tie(Orig1, Reg1) =
Other.followCopyChain(Value1);
2644 if (Orig0 ==
nullptr || Orig1 ==
nullptr)
2645 return Orig0 == Orig1 && Reg0 == Reg1;
2651 return Orig0->
def == Orig1->
def && Reg0 == Reg1;
2654 JoinVals::ConflictResolution
2655 JoinVals::analyzeValue(
unsigned ValNo, JoinVals &Other) {
2656 Val &V = Vals[ValNo];
2657 assert(!V.isAnalyzed() &&
"Value has already been analyzed!");
2669 :
TRI->getSubRegIndexLaneMask(SubIdx);
2670 V.ValidLanes = V.WriteLanes = Lanes;
2679 V.ErasableImplicitDef =
true;
2683 V.ValidLanes = V.WriteLanes = computeWriteLanes(
DefMI, Redef);
2702 assert((TrackSubRegLiveness || V.RedefVNI) &&
2703 "Instruction is reading nonexistent value");
2704 if (V.RedefVNI !=
nullptr) {
2705 computeAssignment(V.RedefVNI->id, Other);
2706 V.ValidLanes |= Vals[V.RedefVNI->id].ValidLanes;
2718 V.ErasableImplicitDef =
true;
2735 if (OtherVNI->def < VNI->
def)
2736 Other.computeAssignment(OtherVNI->id, *
this);
2737 else if (VNI->
def < OtherVNI->def && OtherLRQ.
valueIn()) {
2740 V.OtherVNI = OtherLRQ.
valueIn();
2741 return CR_Impossible;
2743 V.OtherVNI = OtherVNI;
2744 Val &OtherV =
Other.Vals[OtherVNI->id];
2746 if (!OtherV.isAnalyzed())
2753 if ((V.ValidLanes & OtherV.ValidLanes).any())
2755 return CR_Impossible;
2761 V.OtherVNI = OtherLRQ.
valueIn();
2770 Other.computeAssignment(V.OtherVNI->id, *
this);
2771 Val &OtherV =
Other.Vals[V.OtherVNI->id];
2773 if (OtherV.ErasableImplicitDef) {
2783 LLVM_DEBUG(
dbgs() <<
"IMPLICIT_DEF defined at " << V.OtherVNI->def
2786 <<
", keeping it.\n");
2787 OtherV.ErasableImplicitDef =
false;
2790 OtherV.ValidLanes &= ~OtherV.WriteLanes;
2805 if (
CP.isCoalescable(
DefMI)) {
2808 V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes;
2823 valuesIdentical(VNI, V.OtherVNI, Other)) {
2846 if ((V.WriteLanes & OtherV.ValidLanes).none())
2859 "Only early clobber defs can overlap a kill");
2860 return CR_Impossible;
2868 return CR_Impossible;
2870 if (TrackSubRegLiveness) {
2875 if (!OtherLI.hasSubRanges()) {
2877 return (OtherMask & V.WriteLanes).
none() ? CR_Replace : CR_Impossible;
2886 if ((OtherMask & V.WriteLanes).none())
2889 auto OtherSRQ = OtherSR.Query(VNI->
def);
2890 if (OtherSRQ.valueIn() && OtherSRQ.endPoint() > VNI->
def) {
2892 return CR_Impossible;
2905 return CR_Impossible;
2914 return CR_Unresolved;
2917 void JoinVals::computeAssignment(
unsigned ValNo, JoinVals &Other) {
2918 Val &V = Vals[ValNo];
2919 if (V.isAnalyzed()) {
2922 assert(Assignments[ValNo] != -1 &&
"Bad recursion?");
2925 switch ((V.Resolution = analyzeValue(ValNo, Other))) {
2929 assert(V.OtherVNI &&
"OtherVNI not assigned, can't merge.");
2930 assert(
Other.Vals[V.OtherVNI->id].isAnalyzed() &&
"Missing recursion");
2931 Assignments[ValNo] =
Other.Assignments[V.OtherVNI->id];
2935 << V.OtherVNI->def <<
" --> @"
2936 << NewVNInfo[Assignments[ValNo]]->def <<
'\n');
2939 case CR_Unresolved: {
2941 assert(V.OtherVNI &&
"OtherVNI not assigned, can't prune");
2942 Val &OtherV =
Other.Vals[V.OtherVNI->id];
2945 if (OtherV.ErasableImplicitDef &&
2946 TrackSubRegLiveness &&
2947 (OtherV.WriteLanes & ~V.ValidLanes).any()) {
2948 LLVM_DEBUG(
dbgs() <<
"Cannot erase implicit_def with missing values\n");
2950 OtherV.ErasableImplicitDef =
false;
2957 OtherV.Pruned =
true;
2962 Assignments[ValNo] = NewVNInfo.size();
2968 bool JoinVals::mapValues(JoinVals &Other) {
2970 computeAssignment(
i, Other);
2971 if (Vals[
i].Resolution == CR_Impossible) {
2981 taintExtent(
unsigned ValNo,
LaneBitmask TaintedLanes, JoinVals &Other,
2989 assert(OtherI !=
Other.LR.end() &&
"No conflict?");
2994 if (End >= MBBEnd) {
2996 << OtherI->valno->id <<
'@' << OtherI->start <<
'\n');
3000 << OtherI->valno->id <<
'@' << OtherI->start <<
" to "
3005 TaintExtent.push_back(std::make_pair(End, TaintedLanes));
3008 if (++OtherI ==
Other.LR.end() || OtherI->start >= MBBEnd)
3012 const Val &OV =
Other.Vals[OtherI->valno->id];
3013 TaintedLanes &= ~OV.WriteLanes;
3016 }
while (TaintedLanes.
any());
3022 if (
MI.isDebugOrPseudoInstr())
3036 bool JoinVals::resolveConflicts(JoinVals &Other) {
3039 assert(V.Resolution != CR_Impossible &&
"Unresolvable conflict");
3040 if (V.Resolution != CR_Unresolved)
3049 assert(V.OtherVNI &&
"Inconsistent conflict resolution.");
3051 const Val &OtherV =
Other.Vals[V.OtherVNI->id];
3056 LaneBitmask TaintedLanes = V.WriteLanes & OtherV.ValidLanes;
3058 if (!taintExtent(
i, TaintedLanes, Other, TaintExtent))
3062 assert(!TaintExtent.empty() &&
"There should be at least one conflict.");
3075 "Interference ends on VNI->def. Should have been handled earlier");
3078 assert(LastMI &&
"Range must end at a proper instruction");
3079 unsigned TaintNum = 0;
3082 if (usesLanes(*
MI,
Other.Reg,
Other.SubIdx, TaintedLanes)) {
3087 if (&*
MI == LastMI) {
3088 if (++TaintNum == TaintExtent.size())
3091 assert(LastMI &&
"Range must end at a proper instruction");
3092 TaintedLanes = TaintExtent[TaintNum].second;
3098 V.Resolution = CR_Replace;
3104 bool JoinVals::isPrunedValue(
unsigned ValNo, JoinVals &Other) {
3105 Val &V = Vals[ValNo];
3106 if (V.Pruned || V.PrunedComputed)
3109 if (V.Resolution != CR_Erase && V.Resolution != CR_Merge)
3114 V.PrunedComputed =
true;
3115 V.Pruned =
Other.isPrunedValue(V.OtherVNI->id, *
this);
3119 void JoinVals::pruneValues(JoinVals &Other,
3121 bool changeInstrs) {
3124 switch (Vals[
i].Resolution) {
3134 Val &OtherV =
Other.Vals[Vals[
i].OtherVNI->id];
3135 bool EraseImpDef = OtherV.ErasableImplicitDef &&
3136 OtherV.Resolution == CR_Keep;
3137 if (!
Def.isBlock()) {
3154 EndPoints.push_back(
Def);
3157 <<
": " <<
Other.LR <<
'\n');
3162 if (isPrunedValue(
i, Other)) {
3169 <<
Def <<
": " << LR <<
'\n');
3227 bool DidPrune =
false;
3232 if (V.Resolution != CR_Erase &&
3233 (V.Resolution != CR_Keep || !V.ErasableImplicitDef || !V.Pruned))
3240 OtherDef = V.OtherVNI->def;
3251 if (ValueOut !=
nullptr && (Q.
valueIn() ==
nullptr ||
3252 (V.Identical && V.Resolution == CR_Erase &&
3253 ValueOut->
def ==
Def))) {
3255 <<
" at " <<
Def <<
"\n");
3262 if (V.Identical &&
S.Query(OtherDef).valueOutOrDead()) {
3272 ShrinkMask |=
S.LaneMask;
3286 ShrinkMask |=
S.LaneMask;
3304 void JoinVals::pruneMainSegments(
LiveInterval &LI,
bool &ShrinkMainRange) {
3308 if (Vals[
i].Resolution != CR_Keep)
3313 Vals[
i].Pruned =
true;
3314 ShrinkMainRange =
true;
3318 void JoinVals::removeImplicitDefs() {
3321 if (V.Resolution != CR_Keep || !V.ErasableImplicitDef || !V.Pruned)
3337 switch (Vals[
i].Resolution) {
3342 if (!Vals[
i].ErasableImplicitDef || !Vals[
i].Pruned)
3354 if (LI !=
nullptr) {
3393 std::prev(
S)->end = NewEnd;
3397 dbgs() <<
"\t\tremoved " <<
i <<
'@' <<
Def <<
": " << LR <<
'\n';
3399 dbgs() <<
"\t\t LHS = " << *LI <<
'\n';
3406 assert(
MI &&
"No instruction to erase");
3410 Reg !=
CP.getDstReg())
3411 ShrinkRegs.push_back(
Reg);
3416 MI->eraseFromParent();
3429 JoinVals RHSVals(RRange,
CP.getSrcReg(),
CP.getSrcIdx(), LaneMask,
3430 NewVNInfo,
CP, LIS,
TRI,
true,
true);
3431 JoinVals LHSVals(LRange,
CP.getDstReg(),
CP.getDstIdx(), LaneMask,
3432 NewVNInfo,
CP, LIS,
TRI,
true,
true);
3439 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals)) {
3444 if (!LHSVals.resolveConflicts(RHSVals) ||
3445 !RHSVals.resolveConflicts(LHSVals)) {
3456 LHSVals.pruneValues(RHSVals, EndPoints,
false);
3457 RHSVals.pruneValues(LHSVals, EndPoints,
false);
3459 LHSVals.removeImplicitDefs();
3460 RHSVals.removeImplicitDefs();
3466 LRange.
join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(),
3470 <<
' ' << LRange <<
"\n");
3471 if (EndPoints.empty())
3477 dbgs() <<
"\t\trestoring liveness to " << EndPoints.size() <<
" points: ";
3478 for (
unsigned i = 0,
n = EndPoints.size();
i !=
n; ++
i) {
3479 dbgs() << EndPoints[
i];
3483 dbgs() <<
": " << LRange <<
'\n';
3488 void RegisterCoalescer::mergeSubRangeInto(
LiveInterval &LI,
3492 unsigned ComposeSubRegIdx) {
3498 SR.assign(ToMerge, Allocator);
3501 LiveRange RangeCopy(ToMerge, Allocator);
3502 joinSubRegRanges(SR, RangeCopy, SR.LaneMask, CP);
3508 bool RegisterCoalescer::isHighCostLiveInterval(
LiveInterval &LI) {
3511 auto &Counter = LargeLIVisitCounter[LI.
reg()];
3525 NewVNInfo,
CP, LIS,
TRI,
false, TrackSubRegLiveness);
3527 NewVNInfo,
CP, LIS,
TRI,
false, TrackSubRegLiveness);
3531 if (isHighCostLiveInterval(
LHS) || isHighCostLiveInterval(
RHS))
3536 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals))
3540 if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals))
3544 if (
RHS.hasSubRanges() ||
LHS.hasSubRanges()) {
3549 unsigned DstIdx =
CP.getDstIdx();
3550 if (!
LHS.hasSubRanges()) {
3556 }
else if (DstIdx != 0) {
3567 unsigned SrcIdx =
CP.getSrcIdx();
3568 if (!
RHS.hasSubRanges()) {
3576 mergeSubRangeInto(
LHS, R,
Mask,
CP, DstIdx);
3583 LHSVals.pruneMainSegments(
LHS, ShrinkMainRange);
3585 LHSVals.pruneSubRegValues(
LHS, ShrinkMask);
3586 RHSVals.pruneSubRegValues(
LHS, ShrinkMask);
3594 LHSVals.pruneValues(RHSVals, EndPoints,
true);
3595 RHSVals.pruneValues(LHSVals, EndPoints,
true);
3600 LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs, &
LHS);
3601 RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
3602 while (!ShrinkRegs.empty())
3606 checkMergingChangesDbgValues(
CP,
LHS, LHSVals,
RHS, RHSVals);
3610 auto RegIt = RegToPHIIdx.
find(
CP.getSrcReg());
3611 if (RegIt != RegToPHIIdx.
end()) {
3613 for (
unsigned InstID : RegIt->second) {
3614 auto PHIIt = PHIValToPos.
find(InstID);
3619 auto LII =
RHS.find(
SI);
3620 if (LII ==
RHS.end() || LII->start >
SI)
3635 if (
CP.getSrcIdx() != 0 ||
CP.getDstIdx() != 0)
3638 if (PHIIt->second.SubReg && PHIIt->second.SubReg !=
CP.getSrcIdx())
3642 PHIIt->second.Reg =
CP.getDstReg();
3646 if (
CP.getSrcIdx() != 0)
3647 PHIIt->second.SubReg =
CP.getSrcIdx();
3653 auto InstrNums = RegIt->second;
3654 RegToPHIIdx.
erase(RegIt);
3658 RegIt = RegToPHIIdx.
find(
CP.getDstReg());
3659 if (RegIt != RegToPHIIdx.
end())
3660 RegIt->second.insert(RegIt->second.end(), InstrNums.begin(),
3663 RegToPHIIdx.
insert({
CP.getDstReg(), InstrNums});
3667 LHS.join(
RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo);
3675 if (!EndPoints.empty()) {
3679 dbgs() <<
"\t\trestoring liveness to " << EndPoints.size() <<
" points: ";
3680 for (
unsigned i = 0,
n = EndPoints.size();
i !=
n; ++
i) {
3681 dbgs() << EndPoints[
i];
3685 dbgs() <<
": " <<
LHS <<
'\n';
3694 return CP.isPhys() ? joinReservedPhysReg(
CP) : joinVirtRegs(
CP);
3705 for (
auto *
X : ToInsert) {
3706 for (
const auto &
Op :
X->debug_operands()) {
3707 if (
Op.isReg() &&
Op.getReg().isVirtual())
3708 DbgVRegToValues[
Op.getReg()].push_back({
Slot,
X});
3718 for (
auto &
MBB : MF) {
3721 for (
auto &
MI :
MBB) {
3722 if (
MI.isDebugValue()) {
3724 return MO.isReg() && MO.getReg().isVirtual();
3726 ToInsert.push_back(&
MI);
3727 }
else if (!
MI.isDebugOrPseudoInstr()) {
3729 CloseNewDVRange(CurrentSlot);
3738 for (
auto &Pair : DbgVRegToValues)
3742 void RegisterCoalescer::checkMergingChangesDbgValues(
CoalescerPair &
CP,
3746 JoinVals &RHSVals) {
3748 checkMergingChangesDbgValuesImpl(
Reg,
RHS,
LHS, LHSVals);
3752 checkMergingChangesDbgValuesImpl(
Reg,
LHS,
RHS, RHSVals);
3765 PerformScan(
CP.getSrcReg(), ScanForSrcReg);
3766 PerformScan(
CP.getDstReg(), ScanForDstReg);
3769 void RegisterCoalescer::checkMergingChangesDbgValuesImpl(
Register Reg,
3772 JoinVals &RegVals) {
3774 auto VRegMapIt = DbgVRegToValues.
find(
Reg);
3775 if (VRegMapIt == DbgVRegToValues.
end())
3778 auto &DbgValueSet = VRegMapIt->second;
3779 auto DbgValueSetIt = DbgValueSet.begin();
3780 auto SegmentIt = OtherLR.
begin();
3782 bool LastUndefResult =
false;
3787 auto ShouldUndef = [&RegVals, &
RegLR, &LastUndefResult,
3792 if (LastUndefIdx == Idx)
3793 return LastUndefResult;
3799 auto OtherIt =
RegLR.find(Idx);
3800 if (OtherIt ==
RegLR.end())
3809 auto Resolution = RegVals.getResolution(OtherIt->valno->id);
3810 LastUndefResult = Resolution != JoinVals::CR_Keep &&
3811 Resolution != JoinVals::CR_Erase;
3813 return LastUndefResult;
3819 while (DbgValueSetIt != DbgValueSet.end() && SegmentIt != OtherLR.
end()) {
3820 if (DbgValueSetIt->first < SegmentIt->end) {
3823 if (DbgValueSetIt->first >= SegmentIt->start) {
3824 bool HasReg = DbgValueSetIt->second->hasDebugOperandForReg(
Reg);
3825 bool ShouldUndefReg = ShouldUndef(DbgValueSetIt->first);
3826 if (HasReg && ShouldUndefReg) {
3828 DbgValueSetIt->second->setDebugValueUndef();
3842 struct MBBPriorityInfo {
3848 :
MBB(mbb),
Depth(depth), IsSplit(issplit) {}
3858 const MBBPriorityInfo *
RHS) {
3860 if (
LHS->Depth !=
RHS->Depth)
3861 return LHS->Depth >
RHS->Depth ? -1 : 1;
3864 if (
LHS->IsSplit !=
RHS->IsSplit)
3865 return LHS->IsSplit ? -1 : 1;
3869 unsigned cl =
LHS->MBB->pred_size() +
LHS->MBB->succ_size();
3870 unsigned cr =
RHS->MBB->pred_size() +
RHS->MBB->succ_size();
3872 return cl > cr ? -1 : 1;
3875 return LHS->MBB->getNumber() <
RHS->MBB->getNumber() ? -1 : 1;
3880 if (!Copy->isCopy())
3883 if (Copy->getOperand(1).isUndef())
3886 Register SrcReg = Copy->getOperand(1).getReg();
3887 Register DstReg = Copy->getOperand(0).getReg();
3896 void RegisterCoalescer::lateLiveIntervalUpdate() {
3902 if (!DeadDefs.empty())
3903 eliminateDeadDefs();
3905 ToBeUpdated.clear();
3908 bool RegisterCoalescer::
3910 bool Progress =
false;
3933 assert(Copy.isCopyLike());
3936 if (&
MI != &Copy &&
MI.isCopyLike())
3941 bool RegisterCoalescer::applyTerminalRule(
const MachineInstr &Copy)
const {
3946 unsigned SrcSubReg = 0, DstSubReg = 0;
3947 if (!
isMoveInstr(*
TRI, &Copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
3968 if (&
MI == &Copy || !
MI.isCopyLike() ||
MI.getParent() != OrigBB)
3971 unsigned OtherSrcSubReg = 0, OtherSubReg = 0;
3972 if (!
isMoveInstr(*
TRI, &Copy, OtherSrcReg, OtherReg, OtherSrcSubReg,
3975 if (OtherReg == SrcReg)
3976 OtherReg = OtherSrcReg;
3997 const unsigned PrevSize = WorkList.size();
3998 if (JoinGlobalCopies) {
4006 if (!
MI.isCopyLike())
4008 bool ApplyTerminalRule = applyTerminalRule(
MI);
4010 if (ApplyTerminalRule)
4011 LocalTerminals.push_back(&
MI);
4013 LocalWorkList.push_back(&
MI);
4015 if (ApplyTerminalRule)
4016 GlobalTerminals.push_back(&
MI);
4018 WorkList.push_back(&
MI);
4022 LocalWorkList.
append(LocalTerminals.begin(), LocalTerminals.end());
4023 WorkList.
append(GlobalTerminals.begin(), GlobalTerminals.end());
4028 if (MII.isCopyLike()) {
4029 if (applyTerminalRule(MII))
4030 Terminals.push_back(&MII);
4032 WorkList.push_back(&MII);
4035 WorkList.
append(Terminals.begin(), Terminals.end());
4041 CurrList(WorkList.begin() + PrevSize, WorkList.end());
4042 if (copyCoalesceWorkList(CurrList))
4044 nullptr), WorkList.end());
4047 void RegisterCoalescer::coalesceLocals() {
4048 copyCoalesceWorkList(LocalWorkList);
4049 for (
unsigned j = 0, je = LocalWorkList.size();
j != je; ++
j) {
4050 if (LocalWorkList[
j])
4051 WorkList.push_back(LocalWorkList[
j]);
4053 LocalWorkList.
clear();
4056 void RegisterCoalescer::joinAllIntervals() {
4057 LLVM_DEBUG(
dbgs() <<
"********** JOINING INTERVALS ***********\n");
4058 assert(WorkList.empty() && LocalWorkList.empty() &&
"Old data still around.");
4060 std::vector<MBBPriorityInfo> MBBs;
4061 MBBs.reserve(MF->size());
4063 MBBs.push_back(MBBPriorityInfo(&
MBB,
Loops->getLoopDepth(&
MBB),
4070 for (MBBPriorityInfo &
MBB : MBBs) {
4072 if (JoinGlobalCopies &&
MBB.Depth < CurrDepth) {
4074 CurrDepth =
MBB.Depth;
4076 copyCoalesceInMBB(
MBB.MBB);
4078 lateLiveIntervalUpdate();
4083 while (copyCoalesceWorkList(WorkList))
4085 lateLiveIntervalUpdate();
4088 void RegisterCoalescer::releaseMemory() {
4089 ErasedInstrs.
clear();
4092 InflateRegs.
clear();
4093 LargeLIVisitCounter.
clear();
4097 LLVM_DEBUG(
dbgs() <<
"********** SIMPLE REGISTER COALESCING **********\n"
4098 <<
"********** Function: " << fn.
getName() <<
'\n');
4110 dbgs() <<
"* Skipped as it exposes funcions that returns twice.\n");
4119 LIS = &getAnalysis<LiveIntervals>();
4120 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4121 Loops = &getAnalysis<MachineLoopInfo>();
4130 for (
const auto &DebugPHI : MF->DebugPHIPositions) {
4133 unsigned SubReg = DebugPHI.second.SubReg;
4136 PHIValToPos.
insert(std::make_pair(DebugPHI.first,
P));
4137 RegToPHIIdx[
Reg].push_back(DebugPHI.first);
4146 MF->verify(
this,
"Before register coalescing");
4148 DbgVRegToValues.
clear();
4149 DbgMergedVRegNums.
clear();
4150 buildVRegToDbgValueMap(fn);
4166 for (
unsigned i = 0,
e = InflateRegs.size();
i !=
e; ++
i) {
4187 assert((
S.LaneMask & ~MaxMask).none());
4197 for (
auto &
p : MF->DebugPHIPositions) {
4198 auto it = PHIValToPos.
find(
p.first);
4200 p.second.Reg =
it->second.Reg;
4201 p.second.SubReg =
it->second.SubReg;
4204 PHIValToPos.
clear();
4205 RegToPHIIdx.
clear();
4209 MF->verify(
this,
"After register coalescing");
iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
VNInfo * valueDefined() const
Return the value defined by this instruction, if any.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
unsigned succ_size() const
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
void join(LiveRange &Other, const int *ValNoAssignments, const int *RHSValNoAssignments, SmallVectorImpl< VNInfo * > &NewVNInfo)
join - Join two live ranges (this, and other) together.
This is an optimization pass for GlobalISel generic memory operations.
iterator erase(const_iterator CI)
bool isImplicitDef() const
auto upper_bound(R &&Range, T &&Value)
Provide wrappers to std::upper_bound which take ranges instead of having to pass begin/end explicitly...
into xmm2 addss xmm2 xmm1 xmm3 addss xmm3 movaps xmm0 unpcklps xmm0 ret seems silly when it could just be one addps Expand libm rounding functions main should enable SSE DAZ mode and other fast SSE modes Think about doing i64 math in SSE regs on x86 This testcase should have no SSE instructions in it
MachineInstrBuilder & UseMI
bool shouldTrackSubRegLiveness(const TargetRegisterClass &RC) const
Returns true if liveness for register class RC should be tracked at the subregister level.
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
SlotIndexes * getSlotIndexes() const
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
bool isValid() const
Returns true if this is a valid index.
SubRange * createSubRange(BumpPtrAllocator &Allocator, LaneBitmask LaneMask)
Creates a new empty subregister live range.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isSafeToMove(AAResults *AA, bool &SawStore) const
Return true if it is safe to move this instruction.
bool recomputeRegClass(Register Reg)
recomputeRegClass - Try to find a legal super-class of Reg's register class that still satisfies the ...
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
virtual const TargetInstrInfo * getInstrInfo() const
void setIsKill(bool Val=true)
VNInfo * valueOutOrDead() const
Returns the value alive at the end of the instruction, if any.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
simple register Simple Register false static LLVM_NODISCARD bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, Register &Src, Register &Dst, unsigned &SrcSub, unsigned &DstSub)
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
bool exposesReturnsTwice() const
exposesReturnsTwice - Returns true if the function calls setjmp or any other similar functions with a...
Reg
All possible values of the reg field in the ModR/M byte.
This represents a simple continuous liveness interval for a value.
MachineBasicBlock * intervalIsInOneMBB(const LiveInterval &LI) const
If LI is confined to a single basic block, return a pointer to that block.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual void updateRegAllocHint(Register Reg, Register NewReg, MachineFunction &MF) const
A callback to allow target a chance to update register allocation hints when a register is "changed" ...
use_instr_nodbg_iterator use_instr_nodbg_begin(Register RegNo) const
bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
SlotIndex def
The index of the defining instruction.
bool erase(const KeyT &Val)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
void removeInterval(Register Reg)
Interval removal.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
static bool isSplitEdge(const MachineBasicBlock *MBB)
Return true if this block should be vacated by the coalescer to eliminate branches.
const_iterator end(StringRef path)
Get end iterator over path.
void eliminateDeadDefs(SmallVectorImpl< MachineInstr * > &Dead, ArrayRef< Register > RegsBeingSpilled=None, AAResults *AA=nullptr)
eliminateDeadDefs - Try to delete machine instructions that are now dead (allDefsAreDead returns true...
void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
static bool isLiveThrough(const LiveQueryResult Q)
iterator_range< use_iterator > use_operands(Register Reg) const
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
reg_instr_iterator reg_instr_begin(Register RegNo) const
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
LLVM_NODISCARD T pop_back_val()
Result of a LiveRange query.
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
unsigned const TargetRegisterInfo * TRI
the resulting code requires compare and branches when and if * p
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isAsCheapAsAMove(const MachineInstr &MI) const override
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
iterator_range< reg_instr_nodbg_iterator > reg_nodbg_instructions(Register Reg) const
bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
unsigned pred_size() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
TargetInstrInfo - Interface to description of machine instruction set.
iterator_range< use_nodbg_iterator > use_nodbg_operands(Register Reg) const
bool liveAt(SlotIndex index) const
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static int compareMBBPriority(const MBBPriorityInfo *LHS, const MBBPriorityInfo *RHS)
C-style comparator that sorts first based on the loop depth of the basic block (the unsigned),...
LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const
Return a bitmask representing the parts of a register that are covered by SubIdx.
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const
Returns true if VNI is killed by any PHI-def values in LI.
const MachineOperand & getOperand(unsigned i) const
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
static cl::opt< unsigned > LargeIntervalFreqThreshold("large-interval-freq-threshold", cl::Hidden, cl::desc("For a large interval, if it is coalesed with other live " "intervals many times more than the threshold, stop its " "coalescing to control the compile time. "), cl::init(100))
iterator addSegment(Segment S)
Add the specified Segment to this range, merging segments as appropriate.
void initializeRegisterCoalescerPass(PassRegistry &)
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
void setSubReg(unsigned subReg)
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
static constexpr LaneBitmask getNone()
Represent the analysis usage information of a pass.
bool isDebugInstr() const
iterator_range< reg_nodbg_iterator > reg_nodbg_operands(Register Reg) const
static bool isDefInSubRange(LiveInterval &LI, SlotIndex Def)
Check if any of the subranges of LI contain a definition at Def.
void pruneValue(LiveRange &LR, SlotIndex Kill, SmallVectorImpl< SlotIndex > *EndPoints)
If LR has a live value at Kill, prune its live range by removing any liveness reachable from Kill.
void clearSubRanges()
Removes all subregister liveness information.
char & RegisterCoalescerID
RegisterCoalescer - This pass merges live ranges to eliminate copies.
const HexagonInstrInfo * TII
Describe properties that are true of each instruction in the target description file.
MachineOperand class - Representation of each machine instruction operand.
void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
STATISTIC(NumFunctions, "Total number of functions")
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
This class implements an extremely fast bulk output stream that can only output to a stream.
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
bool flip()
Swap SrcReg and DstReg.
virtual const TargetRegisterClass * getMatchingSuperRegClass(const TargetRegisterClass *A, const TargetRegisterClass *B, unsigned Idx) const
Return a subclass of the specified register class A so that each register in it has a sub-register of...
LiveInterval - This class represents the liveness of a register, or stack slot.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction for the given index, or null if the given index has no instruction associated...
static cl::opt< bool > EnableJoinSplits("join-splitedges", cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden)
Temporary flag to test critical edge unsplitting.
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
SlotIndex - An opaque wrapper around machine indexes.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
SlotIndex endPoint() const
Return the end point of the last live range segment to interact with the instruction,...
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool isCoalescable(const MachineInstr *) const
Return true if MI is a copy instruction that will become an identity copy after coalescing.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static reg_instr_iterator reg_instr_end()
INITIALIZE_PASS_BEGIN(RegisterCoalescer, "simple-register-coalescing", "Simple Register Coalescing", false, false) INITIALIZE_PASS_END(RegisterCoalescer
bool isValid() const
Check if the iterator is at the end of the list.
bool isProperSubClass(const TargetRegisterClass *RC) const
isProperSubClass - Returns true if RC has a legal super-class with more allocatable registers.
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
SlotIndex getIndexBefore(const MachineInstr &MI) const
getIndexBefore - Returns the index of the last indexed instruction before MI, or the start index of i...
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
static cl::opt< unsigned > LateRematUpdateThreshold("late-remat-update-threshold", cl::Hidden, cl::desc("During rematerialization for a copy, if the def instruction has " "many other copy uses to be rematerialized, delay the multiple " "separate live interval update work and do them all at once after " "all those rematerialization are done. It will save a lot of " "repeated work. "), cl::init(100))
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
Implements a dense probed hash-table based set.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
void setIsDead(bool Val=true)
void removeEmptySubRanges()
Removes all subranges without any segments (subranges without segments are not considered valid and s...
void refineSubRanges(BumpPtrAllocator &Allocator, LaneBitmask LaneMask, std::function< void(LiveInterval::SubRange &)> Apply, const SlotIndexes &Indexes, const TargetRegisterInfo &TRI, unsigned ComposeSubRegIdx=0)
Refines the subranges to support LaneMask.
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Representation of each machine instruction.
This class represents the liveness of a register, stack slot, etc.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
unsigned id
The ID number of this value.
const char * getRegClassName(const TargetRegisterClass *Class) const
Returns the name of the register class.
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
const TargetRegisterClass * getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA, const TargetRegisterClass *RCB, unsigned SubB, unsigned &PreA, unsigned &PreB) const
Find a common super-register class if it exists.
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Allocate memory in an ever growing pool, as if by bump-pointer.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const TargetRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
unsigned getNumValNums() const
static cl::opt< cl::boolOrDefault > EnableGlobalCopies("join-globalcopies", cl::desc("Coalesce copies that span blocks (default=subtarget)"), cl::init(cl::BOU_UNSET), cl::Hidden)
Temporary flag to test global copy optimization.
constexpr bool any() const
initializer< Ty > init(const Ty &Val)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool overlaps(const LiveRange &other) const
overlaps - Return true if the intersection of the two live ranges is not empty.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
iterator_range< reg_iterator > reg_operands(Register Reg) const
iterator find(const_arg_type_t< KeyT > Val)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isEarlyClobber() const
StandardInstrumentations SI(Debug, VerifyEach)
const TargetRegisterClass * getCommonSubClass(const TargetRegisterClass *A, const TargetRegisterClass *B) const
Find the largest common subclass of A and B.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
Returns the basic block which the given index falls in.
bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
print Print MemDeps of function
bool checkRegMaskInterference(const LiveInterval &LI, BitVector &UsableRegs)
Test if LI is live across any register mask instructions, and compute a bit mask of physical register...
bool containsOneValue() const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Register getReg() const
getReg - Returns the register number.
A Module instance is used to store all the information related to an LLVM module.
iterator_range< pred_iterator > predecessors()
should just be implemented with a CLZ instruction Since there are other e that share this it would be best to implement this in a target independent as zero is the default value for the binary encoder e add r0 add r5 Register operands should be distinct That when the encoding does not require two syntactical operands to refer to the same register
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
LiveInterval & getInterval(Register Reg)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
VNInfo * MergeValueNumberInto(VNInfo *V1, VNInfo *V2)
MergeValueNumberInto - This method is called when two value numbers are found to be equivalent.
iterator FindSegmentContaining(SlotIndex Idx)
Return an iterator to the segment that contains the specified index, or end() if there is none.
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx=nullptr) const
Given the index of a register def operand, check if the register def is tied to a source operand,...
std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
VNInfo * createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc)
createDeadDef - Make sure the range has a value defined at Def.
virtual bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const
Subtarget Hooks.
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
static bool definesFullReg(const MachineInstr &MI, Register Reg)
Returns true if MI defines the full vreg Reg, as opposed to just defining a subregister.
bool isEHPad() const
Returns true if the block is a landing pad.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
constexpr bool none() const
AnalysisUsage & addPreservedID(const void *ID)
bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
http eax xorl edx cl sete al setne dl sall cl
This would be a win on but not x86 or ppc64 Shrink
unsigned composeSubRegIndices(unsigned a, unsigned b) const
Return the subregister index you get from composing two subregister indices.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setIsUndef(bool Val=true)
A helper class for register coalescers.
self_iterator getIterator()
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
virtual bool enableJoinGlobalCopies() const
True if the subtarget should enable joining global copies.
const MachineBasicBlock * getParent() const
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
TargetSubtargetInfo - Generic base class for all target subtargets.
A live range for subregisters.
unsigned const MachineRegisterInfo * MRI
Wrapper class representing virtual and physical registers.
unsigned getSubReg() const
static bool isTerminalReg(Register DstReg, const MachineInstr &Copy, const MachineRegisterInfo *MRI)
Check if DstReg is a terminal node.
bool isConstantPhysReg(MCRegister PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, SlotIndex UseIdx) const
allUsesAvailableAt - Return true if all registers used by OrigMI at OrigIdx are also available with t...
bool test(unsigned Idx) const
static constexpr LaneBitmask getLane(unsigned Lane)
Segments::iterator iterator
LaneBitmask getMaxLaneMaskForVReg(Register Reg) const
Returns a mask covering all bits that can appear in lane masks of subregisters of the virtual registe...
simple register Simple Register Coalescing
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isCopyLike() const
Return true if the instruction behaves like a copy.
void extendToIndices(LiveRange &LR, ArrayRef< SlotIndex > Indices, ArrayRef< SlotIndex > Undefs)
Extend the live range LR to reach all points in Indices.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
constexpr bool all() const
LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA, LaneBitmask Mask) const
Transforms a LaneMask computed for one subregister to the lanemask that would have been computed when...
#define LLVM_NODISCARD
LLVM_NODISCARD - Warn if a type or return value is discarded.
void sort(IteratorTy Start, IteratorTy End)
bool setRegisters(const MachineInstr *)
Set registers to match the copy instruction MI.
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
auto unique(Range &&R, Predicate P)
VNInfo - Value Number Information.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
static cl::opt< unsigned > LargeIntervalSizeThreshold("large-interval-size-threshold", cl::Hidden, cl::desc("If the valnos size of an interval is larger than the threshold, " "it is regarded as a large interval. "), cl::init(100))
simple register coalescing
void verify() const
Walk the range and assert if any invariants fail to hold.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
static std::pair< bool, bool > addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo, const LiveRange &Src, const VNInfo *SrcValNo)
Copy segments with value number SrcValNo from liverange Src to live range @Dst and use value number D...
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
SlotIndex getNextNonNullIndex(SlotIndex Index)
Returns the next non-null index, if one exists.
The location of a single variable, composed of an expression and 0 or more DbgValueLocEntries.
MCRegUnitRootIterator enumerates the root registers of a register unit.
int findRegisterDefOperandIdx(Register Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
void splitSeparateComponents(LiveInterval &LI, SmallVectorImpl< LiveInterval * > &SplitLIs)
Split separate components in LiveInterval LI into separate intervals.
MachineInstrBuilder MachineInstrBuilder & DefMI
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
unsigned getNumOperands() const
Retuns the total number of operands.
char & MachineDominatorsID
MachineDominators - This pass is a machine dominators analysis pass.
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
bool hasSubRanges() const
Returns true if subregister liveness information is available.
const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Callback methods for LiveRangeEdit owners.
bool isUnused() const
Returns true if this value is unused.
AnalysisUsage & addRequired()
bool hasInterval(Register Reg) const
void removeValNo(VNInfo *ValNo)
removeValNo - Remove all the segments defined by the specified value#.
const char * getSubRegIndexName(unsigned SubIdx) const
Return the human-readable symbolic target-specific name for the specified SubRegIndex.