38 #define DEBUG_TYPE "pre-RA-sched"
40 STATISTIC(NumBacktracks,
"Number of times scheduler backtracked");
41 STATISTIC(NumUnfolds,
"Number of nodes unfolded");
42 STATISTIC(NumDups,
"Number of duplicated nodes");
43 STATISTIC(NumPRCopies,
"Number of physical register copies");
47 "Bottom-up register reduction list scheduling",
51 "Similar to list-burr but schedules in source "
52 "order when possible",
57 "Bottom-up register pressure aware list scheduling "
58 "which tries to balance latency and register pressure",
63 "Bottom-up register pressure aware list scheduling "
64 "which tries to balance ILP and register pressure",
69 cl::desc(
"Disable cycle-level precision during preRA scheduling"));
75 cl::desc(
"Disable regpressure priority in sched=list-ilp"));
78 cl::desc(
"Disable live use priority in sched=list-ilp"));
81 cl::desc(
"Disable virtual register cycle interference checks"));
84 cl::desc(
"Disable physreg def-use affinity"));
87 cl::desc(
"Disable no-stall priority in sched=list-ilp"));
90 cl::desc(
"Disable critical path priority in sched=list-ilp"));
93 cl::desc(
"Disable scheduled-height priority in sched=list-ilp"));
96 cl::desc(
"Disable scheduler's two-address hack"));
100 cl::desc(
"Number of instructions to allow ahead of the critical path "
101 "in sched=list-ilp"));
105 cl::desc(
"Average inst/cycle whan no target itinerary exists."));
125 std::vector<SUnit*> PendingQueue;
134 unsigned MinAvailableCycle;
143 unsigned NumLiveRegs;
144 std::vector<SUnit*> LiveRegDefs;
145 std::vector<SUnit*> LiveRegGens;
166 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
167 Topo(SUnits, nullptr) {
176 ~ScheduleDAGRRList()
override {
178 delete AvailableQueue;
181 void Schedule()
override;
186 bool IsReachable(
const SUnit *SU,
const SUnit *TargetSU) {
187 return Topo.IsReachable(SU, TargetSU);
192 bool WillCreateCycle(
SUnit *SU,
SUnit *TargetSU) {
193 return Topo.WillCreateCycle(SU, TargetSU);
207 void RemovePred(
SUnit *SU,
const SDep &D) {
213 bool isReady(
SUnit *SU) {
215 AvailableQueue->isReady(SU);
218 void ReleasePred(
SUnit *SU,
const SDep *PredEdge);
219 void ReleasePredecessors(
SUnit *SU);
220 void ReleasePending();
221 void AdvanceToCycle(
unsigned NextCycle);
222 void AdvancePastStalls(
SUnit *SU);
223 void EmitNode(
SUnit *SU);
224 void ScheduleNodeBottomUp(
SUnit*);
225 void CapturePred(
SDep *PredEdge);
226 void UnscheduleNodeBottomUp(
SUnit*);
227 void RestoreHazardCheckerBottomUp();
230 void InsertCopiesAndMoveSuccs(
SUnit*,
unsigned,
236 void releaseInterferences(
unsigned Reg = 0);
238 SUnit *PickNodeToScheduleBottomUp();
239 void ListScheduleBottomUp();
244 unsigned NumSUnits = SUnits.size();
245 SUnit *NewNode = newSUnit(N);
247 if (NewNode->
NodeNum >= NumSUnits)
248 Topo.InitDAGTopologicalSorting();
255 unsigned NumSUnits = SUnits.size();
256 SUnit *NewNode = Clone(N);
258 if (NewNode->
NodeNum >= NumSUnits)
259 Topo.InitDAGTopologicalSorting();
265 bool forceUnitLatencies()
const override {
279 unsigned &RegClass,
unsigned &Cost,
292 RegClass = RC->
getID();
299 unsigned DstRCIdx = cast<ConstantSDNode>(Node->
getOperand(0))->getZExtValue();
301 RegClass = RC->
getID();
306 unsigned Idx = RegDefPos.
GetIdx();
309 RegClass = RC->
getID();
320 void ScheduleDAGRRList::Schedule() {
322 <<
"********** List Scheduling BB#" << BB->getNumber()
323 <<
" '" << BB->getName() <<
"' **********\n");
331 LiveRegDefs.resize(TRI->getNumRegs() + 1,
nullptr);
332 LiveRegGens.resize(TRI->getNumRegs() + 1,
nullptr);
333 CallSeqEndForStart.clear();
334 assert(Interferences.empty() && LRegsMap.empty() &&
"stale Interferences");
337 BuildSchedGraph(
nullptr);
339 DEBUG(
for (
unsigned su = 0, e = SUnits.size(); su != e; ++su)
340 SUnits[su].dumpAll(
this));
341 Topo.InitDAGTopologicalSorting();
343 AvailableQueue->initNodes(SUnits);
348 ListScheduleBottomUp();
350 AvailableQueue->releaseState();
353 dbgs() <<
"*** Final schedule ***\n";
365 void ScheduleDAGRRList::ReleasePred(
SUnit *SU,
const SDep *PredEdge) {
370 dbgs() <<
"*** Scheduling failed! ***\n";
372 dbgs() <<
" has been released too many times!\n";
378 if (!forceUnitLatencies()) {
390 if (Height < MinAvailableCycle)
391 MinAvailableCycle = Height;
393 if (isReady(PredSU)) {
394 AvailableQueue->push(PredSU);
400 PendingQueue.push_back(PredSU);
439 goto found_chain_operand;
442 found_chain_operand:;
466 unsigned BestMaxNest = MaxNest;
468 unsigned MyNestLevel = NestLevel;
469 unsigned MyMaxNest = MaxNest;
471 MyNestLevel, MyMaxNest,
TII))
472 if (!Best || (MyMaxNest > BestMaxNest)) {
474 BestMaxNest = MyMaxNest;
478 MaxNest = BestMaxNest;
486 MaxNest = std::max(MaxNest, NestLevel);
489 assert(NestLevel != 0);
499 goto found_chain_operand;
502 found_chain_operand:;
525 void ScheduleDAGRRList::ReleasePredecessors(
SUnit *SU) {
529 ReleasePred(SU, &*
I);
530 if (
I->isAssignedRegDep()) {
535 SUnit *RegDef = LiveRegDefs[
I->getReg()]; (void)RegDef;
536 assert((!RegDef || RegDef == SU || RegDef ==
I->getSUnit()) &&
537 "interference on register dependence");
538 LiveRegDefs[
I->getReg()] =
I->getSUnit();
539 if (!LiveRegGens[
I->getReg()]) {
541 LiveRegGens[
I->getReg()] = SU;
549 unsigned CallResource = TRI->getNumRegs();
550 if (!LiveRegDefs[CallResource])
552 if (Node->isMachineOpcode() &&
553 Node->getMachineOpcode() == (
unsigned)
TII->getCallFrameDestroyOpcode()) {
554 unsigned NestLevel = 0;
555 unsigned MaxNest = 0;
559 CallSeqEndForStart[
Def] = SU;
562 LiveRegDefs[CallResource] =
Def;
563 LiveRegGens[CallResource] = SU;
570 void ScheduleDAGRRList::ReleasePending() {
572 assert(PendingQueue.empty() &&
"pending instrs not allowed in this mode");
577 if (AvailableQueue->empty())
578 MinAvailableCycle = UINT_MAX;
582 for (
unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
583 unsigned ReadyCycle = PendingQueue[i]->
getHeight();
584 if (ReadyCycle < MinAvailableCycle)
585 MinAvailableCycle = ReadyCycle;
588 if (!isReady(PendingQueue[i]))
590 AvailableQueue->push(PendingQueue[i]);
592 PendingQueue[i]->isPending =
false;
593 PendingQueue[i] = PendingQueue.back();
594 PendingQueue.pop_back();
600 void ScheduleDAGRRList::AdvanceToCycle(
unsigned NextCycle) {
601 if (NextCycle <= CurCycle)
605 AvailableQueue->setCurCycle(NextCycle);
606 if (!HazardRec->isEnabled()) {
608 CurCycle = NextCycle;
611 for (; CurCycle != NextCycle; ++CurCycle) {
612 HazardRec->RecedeCycle();
622 void ScheduleDAGRRList::AdvancePastStalls(
SUnit *SU) {
639 AdvanceToCycle(ReadyCycle);
652 HazardRec->getHazardType(SU, -Stalls);
659 AdvanceToCycle(CurCycle + Stalls);
664 void ScheduleDAGRRList::EmitNode(
SUnit *SU) {
665 if (!HazardRec->isEnabled())
675 "This target-independent node should not be scheduled.");
698 HazardRec->EmitInstruction(SU);
706 void ScheduleDAGRRList::ScheduleNodeBottomUp(
SUnit *SU) {
707 DEBUG(
dbgs() <<
"\n*** Scheduling [" << CurCycle <<
"]: ");
711 if (CurCycle < SU->getHeight())
713 <<
"] pipeline stall!\n");
727 AvailableQueue->scheduledNode(SU);
732 if (!HazardRec->isEnabled() &&
AvgIPC < 2)
733 AdvanceToCycle(CurCycle + 1);
737 ReleasePredecessors(SU);
743 if (
I->isAssignedRegDep() && LiveRegDefs[
I->getReg()] == SU) {
744 assert(NumLiveRegs > 0 &&
"NumLiveRegs is already zero!");
746 LiveRegDefs[
I->getReg()] =
nullptr;
747 LiveRegGens[
I->getReg()] =
nullptr;
748 releaseInterferences(
I->getReg());
753 unsigned CallResource = TRI->getNumRegs();
754 if (LiveRegDefs[CallResource] == SU)
757 if (SUNode->isMachineOpcode() &&
758 SUNode->getMachineOpcode() == (
unsigned)
TII->getCallFrameSetupOpcode()) {
759 assert(NumLiveRegs > 0 &&
"NumLiveRegs is already zero!");
761 LiveRegDefs[CallResource] =
nullptr;
762 LiveRegGens[CallResource] =
nullptr;
763 releaseInterferences(CallResource);
779 if (HazardRec->isEnabled() ||
AvgIPC > 1) {
782 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
783 || (!HazardRec->isEnabled() && IssueCount ==
AvgIPC))
784 AdvanceToCycle(CurCycle + 1);
791 void ScheduleDAGRRList::CapturePred(
SDep *PredEdge) {
796 AvailableQueue->remove(PredSU);
799 assert(PredSU->
NumSuccsLeft < UINT_MAX &&
"NumSuccsLeft will overflow!");
805 void ScheduleDAGRRList::UnscheduleNodeBottomUp(
SUnit *SU) {
812 if (
I->isAssignedRegDep() && SU == LiveRegGens[
I->getReg()]){
813 assert(NumLiveRegs > 0 &&
"NumLiveRegs is already zero!");
814 assert(LiveRegDefs[
I->getReg()] ==
I->getSUnit() &&
815 "Physical register dependency violated?");
817 LiveRegDefs[
I->getReg()] =
nullptr;
818 LiveRegGens[
I->getReg()] =
nullptr;
819 releaseInterferences(
I->getReg());
825 unsigned CallResource = TRI->getNumRegs();
828 if (SUNode->isMachineOpcode() &&
829 SUNode->getMachineOpcode() == (
unsigned)
TII->getCallFrameSetupOpcode()) {
831 LiveRegDefs[CallResource] = SU;
832 LiveRegGens[CallResource] = CallSeqEndForStart[SU];
838 if (LiveRegGens[CallResource] == SU)
841 if (SUNode->isMachineOpcode() &&
842 SUNode->getMachineOpcode() == (
unsigned)
TII->getCallFrameDestroyOpcode()) {
843 assert(NumLiveRegs > 0 &&
"NumLiveRegs is already zero!");
845 LiveRegDefs[CallResource] =
nullptr;
846 LiveRegGens[CallResource] =
nullptr;
847 releaseInterferences(CallResource);
851 for (
auto &Succ : SU->
Succs) {
852 if (Succ.isAssignedRegDep()) {
853 auto Reg = Succ.getReg();
854 if (!LiveRegDefs[
Reg])
858 LiveRegDefs[
Reg] = SU;
862 if (!LiveRegGens[Reg]) {
864 LiveRegGens[
Reg] = Succ.getSUnit();
865 for (
auto &Succ2 : SU->
Succs) {
866 if (Succ2.isAssignedRegDep() && Succ2.getReg() == Reg &&
867 Succ2.getSUnit()->getHeight() < LiveRegGens[
Reg]->getHeight())
868 LiveRegGens[Reg] = Succ2.getSUnit();
882 PendingQueue.push_back(SU);
885 AvailableQueue->push(SU);
887 AvailableQueue->unscheduledNode(SU);
892 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
896 HazardRec->getMaxLookAhead());
900 std::vector<SUnit*>::const_iterator
I = (
Sequence.end() - LookAhead);
901 unsigned HazardCycle = (*I)->getHeight();
902 for (std::vector<SUnit*>::const_iterator E =
Sequence.end(); I != E; ++
I) {
904 for (; SU->
getHeight() > HazardCycle; ++HazardCycle) {
905 HazardRec->RecedeCycle();
913 void ScheduleDAGRRList::BacktrackBottomUp(
SUnit *SU,
SUnit *BtSU) {
919 UnscheduleNodeBottomUp(OldSU);
920 AvailableQueue->setCurCycle(CurCycle);
926 assert(!SU->
isSucc(OldSU) &&
"Something is wrong!");
928 RestoreHazardCheckerBottomUp();
938 if (SUNode->isOperandOf(N))
946 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(
SUnit *SU) {
955 bool TryUnfold =
false;
956 for (
unsigned i = 0, e = N->
getNumValues(); i != e; ++i) {
964 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
971 if (!
TII->unfoldMemoryOperand(*DAG, N, NewNodes))
976 if (NewNodes.
size() == 3)
980 assert(NewNodes.
size() == 2 &&
"Expected a load folding node!");
983 SDNode *LoadNode = NewNodes[0];
984 unsigned NumVals = N->getNumValues();
986 for (
unsigned i = 0; i != NumVals; ++i)
988 DAG->ReplaceAllUsesOfValueWith(
SDValue(SU->
getNode(), OldNumVals-1),
994 bool isNewLoad =
true;
1000 LoadSU = CreateNewSUnit(LoadNode);
1003 InitNumRegDefsLeft(LoadSU);
1004 computeLatency(LoadSU);
1007 SUnit *NewSU = CreateNewSUnit(N);
1008 assert(N->getNodeId() == -1 &&
"Node already inserted!");
1021 InitNumRegDefsLeft(NewSU);
1022 computeLatency(NewSU);
1048 for (
unsigned i = 0, e = ChainPreds.
size(); i != e; ++i) {
1049 const SDep &Pred = ChainPreds[i];
1050 RemovePred(SU, Pred);
1052 AddPred(LoadSU, Pred);
1054 for (
unsigned i = 0, e = LoadPreds.
size(); i != e; ++i) {
1055 const SDep &Pred = LoadPreds[i];
1056 RemovePred(SU, Pred);
1058 AddPred(LoadSU, Pred);
1060 for (
unsigned i = 0, e = NodePreds.
size(); i != e; ++i) {
1061 const SDep &Pred = NodePreds[i];
1062 RemovePred(SU, Pred);
1063 AddPred(NewSU, Pred);
1065 for (
unsigned i = 0, e = NodeSuccs.
size(); i != e; ++i) {
1066 SDep D = NodeSuccs[i];
1069 RemovePred(SuccDep, D);
1071 AddPred(SuccDep, D);
1073 if (AvailableQueue->tracksRegPressure() && SuccDep->
isScheduled
1077 for (
unsigned i = 0, e = ChainSuccs.
size(); i != e; ++i) {
1078 SDep D = ChainSuccs[i];
1081 RemovePred(SuccDep, D);
1084 AddPred(SuccDep, D);
1095 AvailableQueue->addNode(LoadSU);
1096 AvailableQueue->addNode(NewSU);
1108 NewSU = CreateClone(SU);
1113 if (!I->isArtificial())
1121 if (I->isArtificial())
1123 SUnit *SuccSU = I->getSUnit();
1129 DelDeps.
push_back(std::make_pair(SuccSU, D));
1132 for (
unsigned i = 0, e = DelDeps.
size(); i != e; ++i)
1133 RemovePred(DelDeps[i].first, DelDeps[i].second);
1135 AvailableQueue->updateNode(SU);
1136 AvailableQueue->addNode(NewSU);
1144 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(
SUnit *SU,
unsigned Reg,
1148 SUnit *CopyFromSU = CreateNewSUnit(
nullptr);
1152 SUnit *CopyToSU = CreateNewSUnit(
nullptr);
1161 if (I->isArtificial())
1163 SUnit *SuccSU = I->getSUnit();
1168 DelDeps.
push_back(std::make_pair(SuccSU, *I));
1177 for (
unsigned i = 0, e = DelDeps.
size(); i != e; ++i)
1178 RemovePred(DelDeps[i].first, DelDeps[i].second);
1181 FromDep.setLatency(SU->
Latency);
1182 AddPred(CopyFromSU, FromDep);
1184 ToDep.setLatency(CopyFromSU->
Latency);
1185 AddPred(CopyToSU, ToDep);
1187 AvailableQueue->updateNode(SU);
1188 AvailableQueue->addNode(CopyFromSU);
1189 AvailableQueue->addNode(CopyToSU);
1207 assert(MCID.
ImplicitDefs &&
"Physical reg def must be in implicit def list!");
1209 for (
const uint16_t *ImpDef = MCID.
getImplicitDefs(); *ImpDef; ++ImpDef) {
1221 std::vector<SUnit*> &LiveRegDefs,
1228 if (!LiveRegDefs[*AliasI])
continue;
1231 if (LiveRegDefs[*AliasI] == SU)
continue;
1234 if (RegAdded.
insert(*AliasI).second) {
1243 std::vector<SUnit*> &LiveRegDefs,
1247 for (
unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
1248 if (!LiveRegDefs[i])
continue;
1249 if (LiveRegDefs[i] == SU)
continue;
1251 if (RegAdded.
insert(i).second)
1259 if (
const auto *RegOp = dyn_cast<RegisterMaskSDNode>(Op.getNode()))
1260 return RegOp->getRegMask();
1268 bool ScheduleDAGRRList::
1270 if (NumLiveRegs == 0)
1280 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1282 RegAdded, LRegs, TRI);
1288 unsigned NumOps = Node->getNumOperands();
1289 if (Node->getOperand(NumOps-1).getValueType() ==
MVT::Glue)
1294 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1302 for (; NumVals; --NumVals, ++i) {
1303 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->
getReg();
1313 if (!Node->isMachineOpcode())
1318 if (Node->getMachineOpcode() == (
unsigned)
TII->getCallFrameDestroyOpcode()) {
1320 unsigned CallResource = TRI->getNumRegs();
1321 if (LiveRegDefs[CallResource]) {
1322 SDNode *Gen = LiveRegGens[CallResource]->getNode();
1326 RegAdded.
insert(CallResource).second)
1340 return !LRegs.
empty();
1343 void ScheduleDAGRRList::releaseInterferences(
unsigned Reg) {
1345 for (
unsigned i = Interferences.size(); i > 0; --i) {
1346 SUnit *SU = Interferences[i-1];
1347 LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
1359 AvailableQueue->push(SU);
1361 if (i < Interferences.size())
1362 Interferences[i-1] = Interferences.back();
1363 Interferences.pop_back();
1364 LRegsMap.erase(LRegsPos);
1372 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1373 SUnit *CurSU = AvailableQueue->empty() ?
nullptr : AvailableQueue->pop();
1376 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1379 (LRegs[0] == TRI->getNumRegs() ?
"CallResource"
1380 : TRI->getName(LRegs[0]))
1381 <<
" SU #" << CurSU->
NodeNum <<
'\n');
1382 std::pair<LRegsMapT::iterator, bool> LRegsPair =
1383 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1384 if (LRegsPair.second) {
1386 Interferences.push_back(CurSU);
1389 assert(CurSU->
isPending &&
"Interferences are pending");
1391 LRegsPair.first->second = LRegs;
1393 CurSU = AvailableQueue->pop();
1401 for (
unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1402 SUnit *TrySU = Interferences[i];
1407 SUnit *BtSU =
nullptr;
1408 unsigned LiveCycle = UINT_MAX;
1409 for (
unsigned j = 0, ee = LRegs.
size(); j != ee; ++j) {
1410 unsigned Reg = LRegs[j];
1411 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1412 BtSU = LiveRegGens[
Reg];
1416 if (!WillCreateCycle(TrySU, BtSU)) {
1418 BacktrackBottomUp(TrySU, BtSU);
1425 AvailableQueue->remove(BtSU);
1434 CurSU = AvailableQueue->pop();
1437 AvailableQueue->remove(TrySU);
1451 SUnit *TrySU = Interferences[0];
1453 assert(LRegs.
size() == 1 &&
"Can't handle this yet!");
1454 unsigned Reg = LRegs[0];
1458 TRI->getMinimalPhysRegClass(Reg, VT);
1468 SUnit *NewDef =
nullptr;
1470 NewDef = CopyAndMoveSuccessors(LRDef);
1471 if (!DestRC && !NewDef)
1477 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1481 NewDef = Copies.
back();
1485 <<
" to SU #" << TrySU->
NodeNum <<
"\n");
1486 LiveRegDefs[
Reg] = NewDef;
1491 assert(CurSU &&
"Unable to resolve live physical register dependencies!");
1497 void ScheduleDAGRRList::ListScheduleBottomUp() {
1499 ReleasePredecessors(&ExitSU);
1502 if (!SUnits.empty()) {
1503 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1504 assert(RootSU->
Succs.empty() &&
"Graph root shouldn't have successors!");
1506 AvailableQueue->push(RootSU);
1512 while (!AvailableQueue->empty() || !Interferences.empty()) {
1513 DEBUG(
dbgs() <<
"\nExamining Available:\n";
1514 AvailableQueue->dump(
this));
1518 SUnit *SU = PickNodeToScheduleBottomUp();
1520 AdvancePastStalls(SU);
1522 ScheduleNodeBottomUp(SU);
1524 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1526 assert(MinAvailableCycle < UINT_MAX &&
"MinAvailableCycle uninitialized");
1527 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1535 VerifyScheduledSequence(
true);
1547 class RegReductionPQBase;
1549 struct queue_sort :
public std::binary_function<SUnit*, SUnit*, bool> {
1550 bool isReady(
SUnit* SU,
unsigned CurCycle)
const {
return true; }
1555 struct reverse_sort :
public queue_sort {
1557 reverse_sort(SF &sf) : SortFunc(sf) {}
1559 bool operator()(
SUnit* left,
SUnit* right)
const {
1562 return SortFunc(right, left);
1569 struct bu_ls_rr_sort :
public queue_sort {
1572 HasReadyFilter =
false
1575 RegReductionPQBase *SPQ;
1576 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1578 bool operator()(
SUnit* left,
SUnit* right)
const;
1582 struct src_ls_rr_sort :
public queue_sort {
1585 HasReadyFilter =
false
1588 RegReductionPQBase *SPQ;
1589 src_ls_rr_sort(RegReductionPQBase *spq)
1592 bool operator()(
SUnit* left,
SUnit* right)
const;
1596 struct hybrid_ls_rr_sort :
public queue_sort {
1599 HasReadyFilter =
false
1602 RegReductionPQBase *SPQ;
1603 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1606 bool isReady(
SUnit *SU,
unsigned CurCycle)
const;
1608 bool operator()(
SUnit* left,
SUnit* right)
const;
1613 struct ilp_ls_rr_sort :
public queue_sort {
1616 HasReadyFilter =
false
1619 RegReductionPQBase *SPQ;
1620 ilp_ls_rr_sort(RegReductionPQBase *spq)
1623 bool isReady(
SUnit *SU,
unsigned CurCycle)
const;
1625 bool operator()(
SUnit* left,
SUnit* right)
const;
1630 std::vector<SUnit*> Queue;
1631 unsigned CurQueueId;
1632 bool TracksRegPressure;
1636 std::vector<SUnit> *SUnits;
1642 ScheduleDAGRRList *scheduleDAG;
1645 std::vector<unsigned> SethiUllmanNumbers;
1653 std::vector<unsigned> RegLimit;
1657 bool hasReadyFilter,
1664 CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
1665 MF(mf),
TII(tii), TRI(tri), TLI(tli), scheduleDAG(nullptr) {
1666 if (TracksRegPressure) {
1667 unsigned NumRC = TRI->getNumRegClasses();
1668 RegLimit.resize(NumRC);
1670 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1673 E = TRI->regclass_end(); I != E; ++
I)
1678 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1679 scheduleDAG = scheduleDag;
1683 return scheduleDAG->getHazardRec();
1686 void initNodes(std::vector<SUnit> &sunits)
override;
1688 void addNode(
const SUnit *SU)
override;
1690 void updateNode(
const SUnit *SU)
override;
1692 void releaseState()
override {
1694 SethiUllmanNumbers.clear();
1698 unsigned getNodePriority(
const SUnit *SU)
const;
1700 unsigned getNodeOrdering(
const SUnit *SU)
const {
1706 bool empty()
const override {
return Queue.empty(); }
1708 void push(
SUnit *U)
override {
1709 assert(!U->
NodeQueueId &&
"Node in the queue already");
1714 void remove(
SUnit *SU)
override {
1715 assert(!Queue.empty() &&
"Queue is empty!");
1717 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1719 if (I != std::prev(Queue.end()))
1725 bool tracksRegPressure()
const override {
return TracksRegPressure; }
1727 void dumpRegPressure()
const;
1729 bool HighRegPressure(
const SUnit *SU)
const;
1731 bool MayReduceRegPressure(
SUnit *SU)
const;
1733 int RegPressureDiff(
SUnit *SU,
unsigned &LiveUses)
const;
1735 void scheduledNode(
SUnit *SU)
override;
1737 void unscheduledNode(
SUnit *SU)
override;
1740 bool canClobber(
const SUnit *SU,
const SUnit *Op);
1741 void AddPseudoTwoAddrDeps();
1742 void PrescheduleNodesWithMultipleUses();
1743 void CalculateSethiUllmanNumbers();
1747 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1748 std::vector<SUnit *>::iterator Best = Q.begin();
1749 for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
1750 E = Q.end(); I != E; ++
I)
1751 if (Picker(*Best, *I))
1754 if (Best != std::prev(Q.end()))
1761 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker,
ScheduleDAG *DAG) {
1764 reverse_sort<SF> RPicker(Picker);
1765 return popFromQueueImpl(Q, RPicker);
1769 return popFromQueueImpl(Q, Picker);
1773 class RegReductionPriorityQueue :
public RegReductionPQBase {
1783 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
1787 bool isBottomUp()
const override {
return SF::IsBottomUp; }
1789 bool isReady(
SUnit *U)
const override {
1790 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1793 SUnit *pop()
override {
1794 if (Queue.empty())
return nullptr;
1796 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1801 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1804 std::vector<SUnit*> DumpQueue = Queue;
1805 SF DumpPicker = Picker;
1806 while (!DumpQueue.empty()) {
1807 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1815 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1816 BURegReductionPriorityQueue;
1818 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1819 SrcRegReductionPriorityQueue;
1821 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1822 HybridBURRPriorityQueue;
1824 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1825 ILPBURRPriorityQueue;
1841 if (LSchedLow != RSchedLow)
1842 return LSchedLow < RSchedLow ? 1 : -1;
1850 unsigned &SethiUllmanNumber = SUNumbers[SU->
NodeNum];
1851 if (SethiUllmanNumber != 0)
1852 return SethiUllmanNumber;
1857 if (I->isCtrl())
continue;
1858 SUnit *PredSU = I->getSUnit();
1860 if (PredSethiUllman > SethiUllmanNumber) {
1861 SethiUllmanNumber = PredSethiUllman;
1863 }
else if (PredSethiUllman == SethiUllmanNumber)
1867 SethiUllmanNumber += Extra;
1869 if (SethiUllmanNumber == 0)
1870 SethiUllmanNumber = 1;
1872 return SethiUllmanNumber;
1877 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1878 SethiUllmanNumbers.assign(SUnits->size(), 0);
1880 for (
unsigned i = 0, e = SUnits->size(); i != e; ++i)
1884 void RegReductionPQBase::addNode(
const SUnit *SU) {
1885 unsigned SUSize = SethiUllmanNumbers.size();
1886 if (SUnits->size() > SUSize)
1887 SethiUllmanNumbers.resize(SUSize*2, 0);
1891 void RegReductionPQBase::updateNode(
const SUnit *SU) {
1892 SethiUllmanNumbers[SU->
NodeNum] = 0;
1898 unsigned RegReductionPQBase::getNodePriority(
const SUnit *SU)
const {
1899 assert(SU->
NodeNum < SethiUllmanNumbers.size());
1923 return SethiUllmanNumbers[SU->
NodeNum];
1925 unsigned Priority = SethiUllmanNumbers[SU->
NodeNum];
1929 return (NP > 0) ? NP : 0;
1939 void RegReductionPQBase::dumpRegPressure()
const {
1940 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1942 E = TRI->regclass_end(); I != E; ++
I) {
1947 DEBUG(
dbgs() << TRI->getRegClassName(RC) <<
": " << RP <<
" / "
1948 << RegLimit[
Id] <<
'\n');
1953 bool RegReductionPQBase::HighRegPressure(
const SUnit *SU)
const {
1961 SUnit *PredSU = I->getSUnit();
1968 RegDefPos.
IsValid(); RegDefPos.Advance()) {
1969 unsigned RCId, Cost;
1979 bool RegReductionPQBase::MayReduceRegPressure(
SUnit *SU)
const {
1986 for (
unsigned i = 0; i != NumDefs; ++i) {
1990 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2004 int RegReductionPQBase::RegPressureDiff(
SUnit *SU,
unsigned &LiveUses)
const {
2011 SUnit *PredSU = I->getSUnit();
2020 RegDefPos.
IsValid(); RegDefPos.Advance()) {
2021 MVT VT = RegDefPos.GetValue();
2022 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2033 for (
unsigned i = 0; i != NumDefs; ++i) {
2037 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2044 void RegReductionPQBase::scheduledNode(
SUnit *SU) {
2045 if (!TracksRegPressure)
2055 SUnit *PredSU = I->getSUnit();
2079 RegDefPos.
IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2083 unsigned RCId, Cost;
2095 RegDefPos.
IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2096 if (SkipRegDefs > 0)
2098 unsigned RCId, Cost;
2113 void RegReductionPQBase::unscheduledNode(
SUnit *SU) {
2114 if (!TracksRegPressure)
2137 SUnit *PredSU = I->getSUnit();
2146 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2147 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2158 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2159 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2163 for (
unsigned i = 0; i != NumDefs; ++i) {
2167 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2168 if (
RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2172 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2180 for (
unsigned i = NumDefs, e = N->
getNumValues(); i != e; ++i) {
2186 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2187 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2201 unsigned MaxHeight = 0;
2204 if (I->isCtrl())
continue;
2205 unsigned Height = I->getSUnit()->getHeight();
2208 if (I->getSUnit()->getNode() &&
2211 if (Height > MaxHeight)
2220 unsigned Scratches = 0;
2223 if (I->isCtrl())
continue;
2232 bool RetVal =
false;
2235 if (I->isCtrl())
continue;
2236 const SUnit *PredSU = I->getSUnit();
2255 bool RetVal =
false;
2258 if (I->isCtrl())
continue;
2259 const SUnit *SuccSU = I->getSUnit();
2296 if (I->isCtrl())
continue;
2297 I->getSUnit()->isVRegCycle =
true;
2309 if (I->isCtrl())
continue;
2310 SUnit *PredSU = I->getSUnit();
2313 "VRegCycle def must be CopyFromReg");
2328 if (I->isCtrl())
continue;
2329 if (I->getSUnit()->isVRegCycle &&
2342 if ((
int)SPQ->getCurCycle() < Height)
return true;
2343 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2352 RegReductionPQBase *SPQ) {
2371 if (LHeight != RHeight)
2372 return LHeight > RHeight ? 1 : -1;
2384 if (!SPQ->getHazardRec()->isEnabled()) {
2385 if (LHeight != RHeight)
2386 return LHeight > RHeight ? 1 : -1;
2388 int LDepth = left->
getDepth() - LPenalty;
2389 int RDepth = right->
getDepth() - RPenalty;
2390 if (LDepth != RDepth) {
2392 <<
") depth " << LDepth <<
" vs SU (" << right->
NodeNum
2393 <<
") depth " << RDepth <<
"\n");
2394 return LDepth < RDepth ? 1 : -1;
2410 if (LHasPhysReg != RHasPhysReg) {
2412 static const char *
const PhysRegMsg[] = {
" has no physreg",
2413 " defines a physreg" };
2416 << PhysRegMsg[LHasPhysReg] <<
" SU(" << right->
NodeNum <<
") "
2417 << PhysRegMsg[RHasPhysReg] <<
"\n");
2418 return LHasPhysReg < RHasPhysReg;
2423 unsigned LPriority = SPQ->getNodePriority(left);
2424 unsigned RPriority = SPQ->getNodePriority(right);
2430 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2434 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2437 if (LPriority != RPriority)
2438 return LPriority > RPriority;
2443 unsigned LOrder = SPQ->getNodeOrdering(left);
2444 unsigned ROrder = SPQ->getNodeOrdering(right);
2448 if ((LOrder || ROrder) && LOrder != ROrder)
2449 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2472 return LDist < RDist;
2477 if (LScratch != RScratch)
2478 return LScratch > RScratch;
2482 if ((left->
isCall && RPriority > 0) || (right->
isCall && LPriority > 0))
2501 "NodeQueueId cannot be zero");
2506 bool bu_ls_rr_sort::operator()(
SUnit *left,
SUnit *right)
const {
2514 bool src_ls_rr_sort::operator()(
SUnit *left,
SUnit *right)
const {
2518 unsigned LOrder = SPQ->getNodeOrdering(left);
2519 unsigned ROrder = SPQ->getNodeOrdering(right);
2523 if ((LOrder || ROrder) && LOrder != ROrder)
2524 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2533 bool hybrid_ls_rr_sort::isReady(
SUnit *SU,
unsigned CurCycle)
const {
2534 static const unsigned ReadyDelay = 3;
2536 if (SPQ->MayReduceRegPressure(SU))
return true;
2538 if (SU->
getHeight() > (CurCycle + ReadyDelay))
return false;
2540 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2548 bool hybrid_ls_rr_sort::operator()(
SUnit *left,
SUnit *right)
const {
2556 bool LHigh = SPQ->HighRegPressure(left);
2557 bool RHigh = SPQ->HighRegPressure(right);
2560 if (LHigh && !RHigh) {
2565 else if (!LHigh && RHigh) {
2570 if (!LHigh && !RHigh) {
2580 bool ilp_ls_rr_sort::isReady(
SUnit *SU,
unsigned CurCycle)
const {
2581 if (SU->
getHeight() > CurCycle)
return false;
2583 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2614 bool ilp_ls_rr_sort::operator()(
SUnit *left,
SUnit *right)
const {
2622 unsigned LLiveUses = 0, RLiveUses = 0;
2623 int LPDiff = 0, RPDiff = 0;
2625 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2626 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2630 <<
" != SU(" << right->
NodeNum <<
"): " << RPDiff <<
"\n");
2631 return LPDiff > RPDiff;
2637 if (LReduce && !RReduce)
return false;
2638 if (RReduce && !LReduce)
return true;
2643 <<
" != SU(" << right->
NodeNum <<
"): " << RLiveUses <<
"\n");
2644 return LLiveUses < RLiveUses;
2650 if (LStall != RStall)
2673 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2677 AddPseudoTwoAddrDeps();
2679 if (!TracksRegPressure && !SrcOrder)
2680 PrescheduleNodesWithMultipleUses();
2682 CalculateSethiUllmanNumbers();
2685 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2686 for (
unsigned i = 0, e = sunits.size(); i != e; ++i) {
2696 bool RegReductionPQBase::canClobber(
const SUnit *SU,
const SUnit *Op) {
2702 for (
unsigned i = 0; i != NumOps; ++i) {
2718 ScheduleDAGRRList *scheduleDAG,
2721 const uint16_t *ImpDefs
2724 if(!ImpDefs && !RegMask)
2729 SUnit *SuccSU =
SI->getSUnit();
2731 PE = SuccSU->
Preds.end(); PI != PE; ++PI) {
2732 if (!PI->isAssignedRegDep())
2736 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2740 for (
const uint16_t *ImpDef = ImpDefs; *ImpDef; ++ImpDef)
2745 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2760 assert(ImpDefs &&
"Caller should check hasPhysRegDefs");
2763 if (!SUNode->isMachineOpcode())
2765 const uint16_t *SUImpDefs =
2766 TII->
get(SUNode->getMachineOpcode()).getImplicitDefs();
2768 if (!SUImpDefs && !SURegMask)
2770 for (
unsigned i = NumDefs, e = N->
getNumValues(); i != e; ++i) {
2776 unsigned Reg = ImpDefs[i - NumDefs];
2781 for (;*SUImpDefs; ++SUImpDefs) {
2782 unsigned SUReg = *SUImpDefs;
2822 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2824 for (
unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2825 SUnit *SU = &(*SUnits)[i];
2843 SUnit *PredSU =
nullptr;
2845 EE = SU->
Preds.end(); II != EE; ++II)
2846 if (!II->isCtrl()) {
2847 PredSU = II->getSUnit();
2869 EE = PredSU->
Succs.end(); II != EE; ++II) {
2870 SUnit *PredSuccSU = II->getSUnit();
2871 if (PredSuccSU == SU)
continue;
2875 goto outer_loop_continue;
2879 goto outer_loop_continue;
2881 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2882 goto outer_loop_continue;
2888 <<
" next to PredSU #" << PredSU->
NodeNum
2889 <<
" to guide scheduling in the presence of multiple uses\n");
2890 for (
unsigned i = 0; i != PredSU->
Succs.size(); ++i) {
2896 scheduleDAG->RemovePred(SuccSU, Edge);
2897 scheduleDAG->AddPred(SU, Edge);
2899 scheduleDAG->AddPred(SuccSU, Edge);
2903 outer_loop_continue:;
2914 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2915 for (
unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2916 SUnit *SU = &(*SUnits)[i];
2929 for (
unsigned j = 0; j != NumOps; ++j) {
2936 if (!DUSU)
continue;
2938 E = DUSU->
Succs.end(); I != E; ++
I) {
2939 if (I->isCtrl())
continue;
2940 SUnit *SuccSU = I->getSUnit();
2952 while (SuccSU->
Succs.size() == 1 &&
2956 SuccSU = SuccSU->
Succs.front().getSUnit();
2974 (!canClobber(SuccSU, DUSU) ||
2977 !scheduleDAG->IsReachable(SuccSU, SU)) {
2978 DEBUG(
dbgs() <<
" Adding a pseudo-two-addr edge from SU #"
2998 BURegReductionPriorityQueue *PQ =
2999 new BURegReductionPriorityQueue(*IS->
MF,
false,
false, TII, TRI,
nullptr);
3000 ScheduleDAGRRList *SD =
new ScheduleDAGRRList(*IS->
MF,
false, PQ, OptLevel);
3001 PQ->setScheduleDAG(SD);
3012 SrcRegReductionPriorityQueue *PQ =
3013 new SrcRegReductionPriorityQueue(*IS->
MF,
false,
true, TII, TRI,
nullptr);
3014 ScheduleDAGRRList *SD =
new ScheduleDAGRRList(*IS->
MF,
false, PQ, OptLevel);
3015 PQ->setScheduleDAG(SD);
3027 HybridBURRPriorityQueue *PQ =
3028 new HybridBURRPriorityQueue(*IS->
MF,
true,
false, TII, TRI, TLI);
3030 ScheduleDAGRRList *SD =
new ScheduleDAGRRList(*IS->
MF,
true, PQ, OptLevel);
3031 PQ->setScheduleDAG(SD);
3043 ILPBURRPriorityQueue *PQ =
3044 new ILPBURRPriorityQueue(*IS->
MF,
true,
false, TII, TRI, TLI);
3045 ScheduleDAGRRList *SD =
new ScheduleDAGRRList(*IS->
MF,
true, PQ, OptLevel);
3046 PQ->setScheduleDAG(SD);
void push_back(const T &Elt)
bool isCtrl() const
isCtrl - Shorthand for getKind() != SDep::Data.
const SDNode * GetNode() const
const uint16_t * getImplicitDefs() const
Return a list of registers that are potentially written by any instance of this machine instruction...
bool isSucc(SUnit *N)
isSucc - Test if node N is a successor of this node.
STATISTIC(NumFunctions,"Total number of functions")
static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref, RegReductionPQBase *SPQ)
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
static bool canEnableCoalescing(SUnit *SU)
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
static cl::opt< bool > DisableSchedRegPressure("disable-sched-reg-pressure", cl::Hidden, cl::init(false), cl::desc("Disable regpressure priority in sched=list-ilp"))
static cl::opt< unsigned > AvgIPC("sched-avg-ipc", cl::Hidden, cl::init(1), cl::desc("Average inst/cycle whan no target itinerary exists."))
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
Describe properties that are true of each instruction in the target description file.
static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos, const TargetLowering *TLI, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, unsigned &RegClass, unsigned &Cost, const MachineFunction &MF)
GetCostForDef - Looks up the register class and cost for a given definition.
static bool isVirtualRegister(unsigned Reg)
isVirtualRegister - Return true if the specified register number is in the virtual register namespace...
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getID() const
getID() - Return the register class ID number.
static bool isClobberKind(unsigned Flag)
static void CheckForLiveRegDef(SUnit *SU, unsigned Reg, std::vector< SUnit * > &LiveRegDefs, SmallSet< unsigned, 4 > &RegAdded, SmallVectorImpl< unsigned > &LRegs, const TargetRegisterInfo *TRI)
CheckForLiveRegDef - Return true and update live register vector if the specified register def of the...
static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU, ScheduleDAGRRList *scheduleDAG, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
canClobberReachingPhysRegUse - True if SU would clobber one of it's successor's explicit physregs who...
const SDValue & getOperand(unsigned Num) const
void removePred(const SDep &D)
removePred - This removes the specified edge as a pred of the current node if it exists.
static cl::opt< bool > DisableSchedVRegCycle("disable-sched-vrcycle", cl::Hidden, cl::init(false), cl::desc("Disable virtual register cycle interference checks"))
void setNodeId(int Id)
Set unique node id.
static unsigned CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector< unsigned > &SUNumbers)
CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
bool isAssignedRegDep() const
isAssignedRegDep - Test if this is a Data dependence that is associated with a register.
static bool hasVRegCycleUse(const SUnit *SU)
const TargetRegisterClass * CopyDstRC
EntryToken - This is the marker used to indicate the start of a region.
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
SmallVector< SDep, 4 > Preds
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
Reports a serious error, calling any installed error handler.
static SDNode * FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest, const TargetInstrInfo *TII)
FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate the corresponding (lowered) C...
SchedulingPriorityQueue - This interface is used to plug different priorities computation algorithms ...
unsigned getHeight() const
getHeight - Return the height of this node, which is the length of the maximum path down to any node ...
unsigned getCallFrameDestroyOpcode() const
static int checkSpecialNodes(const SUnit *left, const SUnit *right)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const HexagonInstrInfo * TII
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const TargetRegisterClass * getRegClass(unsigned i) const
getRegClass - Returns the register class associated with the enumeration value.
const TargetLowering * TLI
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Regular data dependence (aka true-dependence).
CopyToReg - This node has three operands: a chain, a register number to set to this value...
const TargetRegisterClass * getRegClass(unsigned Reg) const
getRegClass - Return the register class of the specified virtual register.
Reg
All possible values of the reg field in the ModR/M byte.
Number of individual test Apply this number of consecutive mutations to each input exit after the first new interesting input is found the minimized corpus is saved into the first input directory Number of jobs to run If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
static bool isRegDefEarlyClobberKind(unsigned Flag)
static const uint32_t * getNodeRegMask(const SDNode *N)
getNodeRegMask - Returns the register mask attached to an SDNode, if any.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
INLINEASM - Represents an inline asm block.
unsigned getIROrder() const
Return the node ordering.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
ScheduleDAGSDNodes * createHybridListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level)
createHybridListDAGScheduler - This creates a bottom up register pressure aware list scheduler that m...
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
static bool isRegDefKind(unsigned Flag)
ScheduleDAGSDNodes * createSourceListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createBURRListDAGScheduler - This creates a bottom up list scheduler that schedules nodes in source c...
static RegisterScheduler burrListDAGScheduler("list-burr","Bottom-up register reduction list scheduling", createBURRListDAGScheduler)
TargetInstrInfo - Interface to description of machine instruction set.
This corresponds to the llvm.lifetime.
SDep - Scheduling dependency.
SDNode * getNode() const
get the SDNode which holds the desired result
IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
void setHeightToAtLeast(unsigned NewHeight)
setDepthToAtLeast - If NewDepth is greater than this node's depth value, set it to be the new height ...
initializer< Ty > init(const Ty &Val)
static RegisterScheduler hybridListDAGScheduler("list-hybrid","Bottom-up register pressure aware list scheduling ""which tries to balance latency and register pressure", createHybridListDAGScheduler)
ScheduleDAGSDNodes - A ScheduleDAG for scheduling SDNode-based DAGs.
bool regsOverlap(unsigned regA, unsigned regB) const
regsOverlap - Returns true if the two registers are equal or alias each other.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
static cl::opt< bool > DisableSchedStalls("disable-sched-stalls", cl::Hidden, cl::init(true), cl::desc("Disable no-stall priority in sched=list-ilp"))
MVT - Machine Value Type.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
SDNode * getNode() const
getNode - Return the representative SDNode for this SUnit.
ScheduleDAGSDNodes * createBURRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createBURRListDAGScheduler - This creates a bottom up register usage reduction list scheduler...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
static bool isOperandOf(const SUnit *SU, SDNode *N)
static RegisterScheduler ILPListDAGScheduler("list-ilp","Bottom-up register pressure aware list scheduling ""which tries to balance ILP and register pressure", createILPListDAGScheduler)
static unsigned calcMaxScratches(const SUnit *SU)
calcMaxScratches - Returns an cost estimate of the worse case requirement for scratch registers...
static bool IsChainDependent(SDNode *Outer, SDNode *Inner, unsigned NestLevel, const TargetInstrInfo *TII)
IsChainDependent - Test if Outer is reachable from Inner through chain dependencies.
static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ)
INSERT_SUBREG - This instruction takes three operands: a register that has subregisters, a register providing an insert value, and a subregister index.
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag...
static cl::opt< bool > DisableSchedCriticalPath("disable-sched-critical-path", cl::Hidden, cl::init(false), cl::desc("Disable critical path priority in sched=list-ilp"))
MCRegAliasIterator enumerates all registers aliasing Reg.
unsigned getLatency() const
getLatency - Return the latency value for this edge, which roughly means the minimum number of cycles...
for(unsigned i=0, e=MI->getNumOperands();i!=e;++i)
static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
canClobberPhysRegDefs - True if SU would clobber one of SuccSU's physical register defs...
static void resetVRegCycle(SUnit *SU)
static cl::opt< bool > DisableSchedLiveUses("disable-sched-live-uses", cl::Hidden, cl::init(true), cl::desc("Disable live use priority in sched=list-ilp"))
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
static bool hasOnlyLiveInOpers(const SUnit *SU)
hasOnlyLiveInOpers - Return true if SU has only value predecessors that are CopyFromReg from a virtua...
static unsigned closestSucc(const SUnit *SU)
closestSucc - Returns the scheduled cycle of the successor which is closest to the current cycle...
Sched::Preference SchedulingPref
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask, std::vector< SUnit * > &LiveRegDefs, SmallSet< unsigned, 4 > &RegAdded, SmallVectorImpl< unsigned > &LRegs)
CheckForLiveRegDefMasked - Check for any live physregs that are clobbered by RegMask, and add them to LRegs.
RegDefIter - In place iteration over the values defined by an SUnit.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
REG_SEQUENCE - This variadic instruction is used to form a register that represents a consecutive seq...
static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
EXTRACT_SUBREG - This instruction takes two operands: a register that has subregisters, and a subregister index.
ScheduleDAGSDNodes * createILPListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level)
createILPListDAGScheduler - This creates a bottom up register pressure aware list scheduler that trie...
static cl::opt< bool > DisableSchedHeight("disable-sched-height", cl::Hidden, cl::init(false), cl::desc("Disable scheduled-height priority in sched=list-ilp"))
static void initVRegCycle(SUnit *SU)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Represents one node in the SelectionDAG.
SelectionDAGISel - This is the common base class used for SelectionDAG-based pattern-matching instruc...
const TargetRegisterClass * CopySrcRC
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
unsigned getDepth() const
getDepth - Return the depth of this node, which is the length of the maximum path up to any node whic...
iterator_range< value_op_iterator > op_values() const
void setLatency(unsigned Lat)
setLatency - Set the latency for this edge.
TargetSubtargetInfo - Generic base class for all target subtargets.
static bool isPhysicalRegister(unsigned Reg)
isPhysicalRegister - Return true if the specified register number is in the physical register namespa...
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static bool hasOnlyLiveOutUses(const SUnit *SU)
hasOnlyLiveOutUses - Return true if SU has only value successors that are CopyToReg to a virtual regi...
static cl::opt< bool > DisableSchedPhysRegJoin("disable-sched-physreg-join", cl::Hidden, cl::init(false), cl::desc("Disable physreg def-use affinity"))
COPY_TO_REGCLASS - This instruction is a placeholder for a plain register-to-register copy into a spe...
static cl::opt< int > MaxReorderWindow("max-sched-reorder", cl::Hidden, cl::init(6), cl::desc("Number of instructions to allow ahead of the critical path ""in sched=list-ilp"))
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
unsigned short NumRegDefsLeft
static cl::opt< bool > Disable2AddrHack("disable-2addr-hack", cl::Hidden, cl::init(true), cl::desc("Disable scheduler's two-address hack"))
static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, const TargetInstrInfo *TII)
getPhysicalRegisterVT - Returns the ValueType of the physical register definition of the specified no...
static cl::opt< bool > DisableSchedCycles("disable-sched-cycles", cl::Hidden, cl::init(false), cl::desc("Disable cycle-level precision during preRA scheduling"))
int getNodeId() const
Return the unique node id.
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
static RegisterScheduler sourceListDAGScheduler("source","Similar to list-burr but schedules in source ""order when possible", createSourceListDAGScheduler)
virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const
getRegPressureLimit - Return the register pressure "high water mark" for the specific register class...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
void setHeightDirty()
setHeightDirty - Set a flag in this node to indicate that its stored Height value will require recomp...
const uint16_t * ImplicitDefs
bool addPred(const SDep &D, bool Required=true)
addPred - This adds the specified edge as a pred of the current node if not already.
virtual const TargetInstrInfo * getInstrInfo() const
SmallVector< SDep, 4 > Succs
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Arbitrary strong DAG edge (no real dependence).
virtual uint8_t getRepRegClassCostFor(MVT VT) const
Return the cost of the 'representative' register class for the specified value type.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that the first operand is an imme...
ScheduleDAGTopologicalSort is a class that computes a topological ordering for SUnits and provides me...
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
const TargetRegisterClass *const * regclass_iterator
void dump(const ScheduleDAG *G) const
SUnit - Scheduling unit.
SUnit - Scheduling unit. This is a node in the scheduling DAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
This file describes how to lower LLVM code to machine code.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.