39#include "llvm/Config/llvm-config.h"
62#define DEBUG_TYPE "pre-RA-sched"
64STATISTIC(NumBacktracks,
"Number of times scheduler backtracked");
67STATISTIC(NumPRCopies,
"Number of physical register copies");
71 "Bottom-up register reduction list scheduling",
76 "Similar to list-burr but schedules in source "
77 "order when possible",
82 "Bottom-up register pressure aware list scheduling "
83 "which tries to balance latency and register pressure",
88 "Bottom-up register pressure aware list scheduling "
89 "which tries to balance ILP and register pressure",
94 cl::desc(
"Disable cycle-level precision during preRA scheduling"));
100 cl::desc(
"Disable regpressure priority in sched=list-ilp"));
103 cl::desc(
"Disable live use priority in sched=list-ilp"));
106 cl::desc(
"Disable virtual register cycle interference checks"));
109 cl::desc(
"Disable physreg def-use affinity"));
112 cl::desc(
"Disable no-stall priority in sched=list-ilp"));
115 cl::desc(
"Disable critical path priority in sched=list-ilp"));
118 cl::desc(
"Disable scheduled-height priority in sched=list-ilp"));
121 cl::desc(
"Disable scheduler's two-address hack"));
125 cl::desc(
"Number of instructions to allow ahead of the critical path "
126 "in sched=list-ilp"));
130 cl::desc(
"Average inst/cycle when no target itinerary exists."));
150 std::vector<SUnit *> PendingQueue;
156 unsigned CurCycle = 0;
159 unsigned MinAvailableCycle = ~0u;
163 unsigned IssueCount = 0u;
168 unsigned NumLiveRegs = 0u;
169 std::unique_ptr<SUnit*[]> LiveRegDefs;
170 std::unique_ptr<SUnit*[]> LiveRegGens;
193 AvailableQueue(availqueue), Topo(SUnits, nullptr) {
198 HazardRec = STI.
getInstrInfo()->CreateTargetHazardRecognizer(&STI,
this);
201 ~ScheduleDAGRRList()
override {
203 delete AvailableQueue;
206 void Schedule()
override;
208 ScheduleHazardRecognizer *getHazardRec() {
return HazardRec; }
211 bool IsReachable(
const SUnit *SU,
const SUnit *TargetSU) {
212 return Topo.IsReachable(SU, TargetSU);
217 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
218 return Topo.WillCreateCycle(SU, TargetSU);
224 void AddPredQueued(SUnit *SU,
const SDep &
D) {
225 Topo.AddPredQueued(SU,
D.getSUnit());
232 void AddPred(SUnit *SU,
const SDep &
D) {
233 Topo.AddPred(SU,
D.getSUnit());
240 void RemovePred(SUnit *SU,
const SDep &
D) {
241 Topo.RemovePred(SU,
D.getSUnit());
246 bool isReady(SUnit *SU) {
248 AvailableQueue->isReady(SU);
251 void ReleasePred(SUnit *SU,
const SDep *PredEdge);
252 void ReleasePredecessors(SUnit *SU);
253 void ReleasePending();
254 void AdvanceToCycle(
unsigned NextCycle);
255 void AdvancePastStalls(SUnit *SU);
256 void EmitNode(SUnit *SU);
257 void ScheduleNodeBottomUp(SUnit*);
258 void CapturePred(SDep *PredEdge);
259 void UnscheduleNodeBottomUp(SUnit*);
260 void RestoreHazardCheckerBottomUp();
261 void BacktrackBottomUp(SUnit*, SUnit*);
262 SUnit *TryUnfoldSU(SUnit *);
263 SUnit *CopyAndMoveSuccessors(SUnit*);
264 void InsertCopiesAndMoveSuccs(SUnit*,
unsigned,
265 const TargetRegisterClass*,
266 const TargetRegisterClass*,
267 SmallVectorImpl<SUnit*>&);
268 bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
270 void releaseInterferences(
unsigned Reg = 0);
272 SUnit *PickNodeToScheduleBottomUp();
273 void ListScheduleBottomUp();
276 SUnit *CreateNewSUnit(SDNode *
N) {
277 unsigned NumSUnits = SUnits.size();
278 SUnit *NewNode = newSUnit(
N);
280 if (NewNode->
NodeNum >= NumSUnits)
281 Topo.AddSUnitWithoutPredecessors(NewNode);
286 SUnit *CreateClone(SUnit *
N) {
287 unsigned NumSUnits = SUnits.size();
288 SUnit *NewNode = Clone(
N);
290 if (NewNode->
NodeNum >= NumSUnits)
291 Topo.AddSUnitWithoutPredecessors(NewNode);
297 bool forceUnitLatencies()
const override {
314 unsigned &RegClass,
unsigned &Cost,
320 if (VT == MVT::Untyped) {
327 RegClass = RC->
getID();
332 unsigned Opcode =
Node->getMachineOpcode();
333 if (Opcode == TargetOpcode::REG_SEQUENCE) {
334 unsigned DstRCIdx =
Node->getConstantOperandVal(0);
336 RegClass = RC->
getID();
341 unsigned Idx = RegDefPos.
GetIdx();
344 assert(RC &&
"Not a valid register class");
345 RegClass = RC->
getID();
356void ScheduleDAGRRList::Schedule() {
358 <<
" '" << BB->getName() <<
"' **********\n");
367 LiveRegDefs.reset(
new SUnit*[
TRI->getNumRegs() + 1]());
368 LiveRegGens.reset(
new SUnit*[
TRI->getNumRegs() + 1]());
369 CallSeqEndForStart.
clear();
370 assert(Interferences.
empty() && LRegsMap.empty() &&
"stale Interferences");
383 ListScheduleBottomUp();
388 dbgs() <<
"*** Final schedule ***\n";
400void ScheduleDAGRRList::ReleasePred(SUnit *SU,
const SDep *PredEdge) {
401 SUnit *PredSU = PredEdge->
getSUnit();
405 dbgs() <<
"*** Scheduling failed! ***\n";
407 dbgs() <<
" has been released too many times!\n";
413 if (!forceUnitLatencies()) {
425 if (Height < MinAvailableCycle)
426 MinAvailableCycle = Height;
428 if (isReady(PredSU)) {
429 AvailableQueue->
push(PredSU);
435 PendingQueue.push_back(PredSU);
459 if (
N->isMachineOpcode()) {
460 if (
N->getMachineOpcode() ==
TII->getCallFrameDestroyOpcode()) {
462 }
else if (
N->getMachineOpcode() ==
TII->getCallFrameSetupOpcode()) {
470 if (
Op.getValueType() == MVT::Other) {
472 goto found_chain_operand;
475 found_chain_operand:;
499 unsigned BestMaxNest = MaxNest;
501 unsigned MyNestLevel = NestLevel;
502 unsigned MyMaxNest = MaxNest;
504 MyNestLevel, MyMaxNest,
TII))
505 if (!Best || (MyMaxNest > BestMaxNest)) {
507 BestMaxNest = MyMaxNest;
511 MaxNest = BestMaxNest;
515 if (
N->isMachineOpcode()) {
516 if (
N->getMachineOpcode() ==
TII->getCallFrameDestroyOpcode()) {
518 MaxNest = std::max(MaxNest, NestLevel);
519 }
else if (
N->getMachineOpcode() ==
TII->getCallFrameSetupOpcode()) {
528 if (
Op.getValueType() == MVT::Other) {
530 goto found_chain_operand;
533 found_chain_operand:;
556void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
558 for (SDep &Pred : SU->
Preds) {
559 ReleasePred(SU, &Pred);
565 SUnit *RegDef = LiveRegDefs[Pred.
getReg()]; (void)RegDef;
567 "interference on register dependence");
569 if (!LiveRegGens[Pred.
getReg()]) {
571 LiveRegGens[Pred.
getReg()] = SU;
579 unsigned CallResource =
TRI->getNumRegs();
580 if (!LiveRegDefs[CallResource])
581 for (SDNode *Node = SU->
getNode(); Node; Node =
Node->getGluedNode())
582 if (
Node->isMachineOpcode() &&
583 Node->getMachineOpcode() ==
TII->getCallFrameDestroyOpcode()) {
584 unsigned NestLevel = 0;
585 unsigned MaxNest = 0;
587 assert(
N &&
"Must find call sequence start");
589 SUnit *
Def = &SUnits[
N->getNodeId()];
590 CallSeqEndForStart[
Def] = SU;
593 LiveRegDefs[CallResource] =
Def;
594 LiveRegGens[CallResource] = SU;
601void ScheduleDAGRRList::ReleasePending() {
603 assert(PendingQueue.empty() &&
"pending instrs not allowed in this mode");
608 if (AvailableQueue->
empty())
609 MinAvailableCycle = std::numeric_limits<unsigned>::max();
613 for (
unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
614 unsigned ReadyCycle = PendingQueue[i]->getHeight();
615 if (ReadyCycle < MinAvailableCycle)
616 MinAvailableCycle = ReadyCycle;
618 if (PendingQueue[i]->isAvailable) {
619 if (!isReady(PendingQueue[i]))
621 AvailableQueue->
push(PendingQueue[i]);
623 PendingQueue[i]->isPending =
false;
624 PendingQueue[i] = PendingQueue.back();
625 PendingQueue.pop_back();
631void ScheduleDAGRRList::AdvanceToCycle(
unsigned NextCycle) {
632 if (NextCycle <= CurCycle)
639 CurCycle = NextCycle;
642 for (; CurCycle != NextCycle; ++CurCycle) {
653void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
670 AdvanceToCycle(ReadyCycle);
690 AdvanceToCycle(CurCycle + Stalls);
695void ScheduleDAGRRList::EmitNode(SUnit *SU) {
706 "This target-independent node should not be scheduled.");
739void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
744 if (CurCycle < SU->getHeight())
746 <<
"] pipeline stall!\n");
766 AdvanceToCycle(CurCycle + 1);
770 ReleasePredecessors(SU);
773 for (SDep &Succ : SU->
Succs) {
776 assert(NumLiveRegs > 0 &&
"NumLiveRegs is already zero!");
778 LiveRegDefs[Succ.
getReg()] =
nullptr;
779 LiveRegGens[Succ.
getReg()] =
nullptr;
780 releaseInterferences(Succ.
getReg());
785 unsigned CallResource =
TRI->getNumRegs();
786 if (LiveRegDefs[CallResource] == SU)
787 for (
const SDNode *SUNode = SU->
getNode(); SUNode;
789 if (SUNode->isMachineOpcode() &&
790 SUNode->getMachineOpcode() ==
TII->getCallFrameSetupOpcode()) {
791 assert(NumLiveRegs > 0 &&
"NumLiveRegs is already zero!");
793 LiveRegDefs[CallResource] =
nullptr;
794 LiveRegGens[CallResource] =
nullptr;
795 releaseInterferences(CallResource);
816 AdvanceToCycle(CurCycle + 1);
823void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
824 SUnit *PredSU = PredEdge->
getSUnit();
828 AvailableQueue->
remove(PredSU);
832 "NumSuccsLeft will overflow!");
838void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
842 for (SDep &Pred : SU->
Preds) {
845 assert(NumLiveRegs > 0 &&
"NumLiveRegs is already zero!");
847 "Physical register dependency violated?");
849 LiveRegDefs[Pred.
getReg()] =
nullptr;
850 LiveRegGens[Pred.
getReg()] =
nullptr;
851 releaseInterferences(Pred.
getReg());
857 unsigned CallResource =
TRI->getNumRegs();
858 for (
const SDNode *SUNode = SU->
getNode(); SUNode;
860 if (SUNode->isMachineOpcode() &&
861 SUNode->getMachineOpcode() ==
TII->getCallFrameSetupOpcode()) {
862 SUnit *SeqEnd = CallSeqEndForStart[SU];
863 assert(SeqEnd &&
"Call sequence start/end must be known");
864 assert(!LiveRegDefs[CallResource]);
865 assert(!LiveRegGens[CallResource]);
867 LiveRegDefs[CallResource] = SU;
868 LiveRegGens[CallResource] = SeqEnd;
874 if (LiveRegGens[CallResource] == SU)
875 for (
const SDNode *SUNode = SU->
getNode(); SUNode;
877 if (SUNode->isMachineOpcode() &&
878 SUNode->getMachineOpcode() ==
TII->getCallFrameDestroyOpcode()) {
879 assert(NumLiveRegs > 0 &&
"NumLiveRegs is already zero!");
880 assert(LiveRegDefs[CallResource]);
881 assert(LiveRegGens[CallResource]);
883 LiveRegDefs[CallResource] =
nullptr;
884 LiveRegGens[CallResource] =
nullptr;
885 releaseInterferences(CallResource);
889 for (
auto &Succ : SU->
Succs) {
892 if (!LiveRegDefs[
Reg])
896 LiveRegDefs[
Reg] = SU;
900 if (!LiveRegGens[
Reg]) {
903 for (
auto &Succ2 : SU->
Succs) {
904 if (Succ2.isAssignedRegDep() && Succ2.getReg() ==
Reg &&
905 Succ2.getSUnit()->getHeight() < LiveRegGens[
Reg]->getHeight())
906 LiveRegGens[
Reg] = Succ2.getSUnit();
920 PendingQueue.push_back(SU);
923 AvailableQueue->
push(SU);
930void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
933 unsigned LookAhead = std::min((
unsigned)
Sequence.size(),
938 std::vector<SUnit *>::const_iterator
I = (
Sequence.end() - LookAhead);
939 unsigned HazardCycle = (*I)->getHeight();
942 for (; SU->
getHeight() > HazardCycle; ++HazardCycle) {
951void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
957 UnscheduleNodeBottomUp(OldSU);
966 RestoreHazardCheckerBottomUp();
976 if (SUNode->isOperandOf(
N))
983SUnit *ScheduleDAGRRList::TryUnfoldSU(SUnit *SU) {
987 if (!
TII->unfoldMemoryOperand(*DAG,
N, NewNodes))
990 assert(NewNodes.
size() == 2 &&
"Expected a load folding node!");
993 SDNode *LoadNode = NewNodes[0];
994 unsigned NumVals =
N->getNumValues();
1000 bool isNewLoad =
true;
1003 LoadSU = &SUnits[LoadNode->
getNodeId()];
1010 LoadSU = CreateNewSUnit(LoadNode);
1013 InitNumRegDefsLeft(LoadSU);
1014 computeLatency(LoadSU);
1020 if (
N->getNodeId() != -1) {
1021 NewSU = &SUnits[
N->getNodeId()];
1029 NewSU = CreateNewSUnit(
N);
1032 const MCInstrDesc &MCID =
TII->get(
N->getMachineOpcode());
1042 InitNumRegDefsLeft(NewSU);
1043 computeLatency(NewSU);
1049 for (
unsigned i = 0; i != NumVals; ++i)
1051 DAG->ReplaceAllUsesOfValueWith(
SDValue(SU->
getNode(), OldNumVals - 1),
1060 for (SDep &Pred : SU->
Preds) {
1068 for (SDep &Succ : SU->
Succs) {
1076 for (
const SDep &Pred : ChainPreds) {
1077 RemovePred(SU, Pred);
1079 AddPredQueued(LoadSU, Pred);
1081 for (
const SDep &Pred : LoadPreds) {
1082 RemovePred(SU, Pred);
1084 AddPredQueued(LoadSU, Pred);
1086 for (
const SDep &Pred : NodePreds) {
1087 RemovePred(SU, Pred);
1088 AddPredQueued(NewSU, Pred);
1090 for (SDep &
D : NodeSuccs) {
1091 SUnit *SuccDep =
D.getSUnit();
1093 RemovePred(SuccDep,
D);
1095 AddPredQueued(SuccDep,
D);
1101 for (SDep &
D : ChainSuccs) {
1102 SUnit *SuccDep =
D.getSUnit();
1104 RemovePred(SuccDep,
D);
1107 AddPredQueued(SuccDep,
D);
1115 AddPredQueued(NewSU,
D);
1118 AvailableQueue->
addNode(LoadSU);
1120 AvailableQueue->
addNode(NewSU);
1132SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
1140 if (
N->getGluedNode() &&
1141 !
TII->canCopyGluedNodeDuringSchedule(
N)) {
1144 <<
"Giving up because it has incoming glue and the target does not "
1145 "want to copy it\n");
1150 bool TryUnfold =
false;
1151 for (
unsigned i = 0, e =
N->getNumValues(); i != e; ++i) {
1152 MVT VT =
N->getSimpleValueType(i);
1153 if (VT == MVT::Glue) {
1154 LLVM_DEBUG(
dbgs() <<
"Giving up because it has outgoing glue\n");
1156 }
else if (VT == MVT::Other)
1160 MVT VT =
Op.getNode()->getSimpleValueType(
Op.getResNo());
1161 if (VT == MVT::Glue && !
TII->canCopyGluedNodeDuringSchedule(
N)) {
1163 dbgs() <<
"Giving up because it one of the operands is glue and "
1164 "the target does not want to copy it\n");
1171 SUnit *UnfoldSU = TryUnfoldSU(SU);
1182 NewSU = CreateClone(SU);
1185 for (SDep &Pred : SU->
Preds)
1187 AddPredQueued(NewSU, Pred);
1196 for (SDep &Succ : SU->
Succs) {
1203 AddPredQueued(SuccSU,
D);
1208 for (
const auto &[DelSU, DelD] : DelDeps)
1209 RemovePred(DelSU, DelD);
1212 AvailableQueue->
addNode(NewSU);
1220void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU,
unsigned Reg,
1221 const TargetRegisterClass *DestRC,
1222 const TargetRegisterClass *SrcRC,
1223 SmallVectorImpl<SUnit*> &
Copies) {
1224 SUnit *CopyFromSU = CreateNewSUnit(
nullptr);
1228 SUnit *CopyToSU = CreateNewSUnit(
nullptr);
1235 for (SDep &Succ : SU->
Succs) {
1242 AddPredQueued(SuccSU,
D);
1252 for (
const auto &[DelSU, DelD] : DelDeps)
1253 RemovePred(DelSU, DelD);
1256 FromDep.setLatency(SU->
Latency);
1257 AddPredQueued(CopyFromSU, FromDep);
1259 ToDep.setLatency(CopyFromSU->
Latency);
1260 AddPredQueued(CopyToSU, ToDep);
1263 AvailableQueue->
addNode(CopyFromSU);
1264 AvailableQueue->
addNode(CopyToSU);
1265 Copies.push_back(CopyFromSU);
1266 Copies.push_back(CopyToSU);
1283 "Physical reg def must be in implicit def list!");
1284 NumRes =
MCID.getNumDefs();
1291 return N->getSimpleValueType(NumRes);
1304 if (!LiveRegDefs[*AliasI])
continue;
1307 if (LiveRegDefs[*AliasI] == SU)
continue;
1314 if (RegAdded.
insert(*AliasI).second) {
1327 for (
unsigned i = 1, e = LiveRegDefs.
size()-1; i != e; ++i) {
1328 if (!LiveRegDefs[i])
continue;
1329 if (LiveRegDefs[i] == SU)
continue;
1331 if (RegAdded.
insert(i).second)
1340 return RegOp->getRegMask();
1348bool ScheduleDAGRRList::
1349DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
1350 if (NumLiveRegs == 0)
1353 SmallSet<unsigned, 4> RegAdded;
1358 for (SDep &Pred : SU->
Preds) {
1361 RegAdded, LRegs,
TRI);
1364 for (SDNode *Node = SU->
getNode(); Node; Node =
Node->getGluedNode()) {
1369 if (
Node->getOperand(
NumOps-1).getValueType() == MVT::Glue)
1373 unsigned Flags =
Node->getConstantOperandVal(i);
1374 const InlineAsm::Flag
F(Flags);
1375 unsigned NumVals =
F.getNumOperandRegisters();
1378 if (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
1379 F.isClobberKind()) {
1381 for (; NumVals; --NumVals, ++i) {
1395 SDNode *SrcNode =
Node->getOperand(2).getNode();
1401 if (!
Node->isMachineOpcode())
1406 if (
Node->getMachineOpcode() ==
TII->getCallFrameDestroyOpcode()) {
1408 unsigned CallResource =
TRI->getNumRegs();
1409 if (LiveRegDefs[CallResource]) {
1410 SDNode *Gen = LiveRegGens[CallResource]->getNode();
1414 RegAdded.
insert(CallResource).second)
1423 const MCInstrDesc &MCID =
TII->get(
Node->getMachineOpcode());
1429 for (
unsigned i = 0; i < MCID.
getNumDefs(); ++i)
1430 if (MCID.
operands()[i].isOptionalDef()) {
1440 return !LRegs.
empty();
1443void ScheduleDAGRRList::releaseInterferences(
unsigned Reg) {
1445 for (
unsigned i = Interferences.
size(); i > 0; --i) {
1446 SUnit *SU = Interferences[i-1];
1447 LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
1449 SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
1459 AvailableQueue->
push(SU);
1461 if (i < Interferences.
size())
1462 Interferences[i-1] = Interferences.
back();
1464 LRegsMap.erase(LRegsPos);
1472SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1473 SUnit *CurSU = AvailableQueue->
empty() ? nullptr : AvailableQueue->
pop();
1474 auto FindAvailableNode = [&]() {
1476 SmallVector<unsigned, 4> LRegs;
1477 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1480 if (LRegs[0] ==
TRI->getNumRegs())
dbgs() <<
"CallResource";
1483 auto [LRegsIter, LRegsInserted] = LRegsMap.try_emplace(CurSU, LRegs);
1484 if (LRegsInserted) {
1491 LRegsIter->second = LRegs;
1493 CurSU = AvailableQueue->
pop();
1496 FindAvailableNode();
1508 for (SUnit *TrySU : Interferences) {
1509 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1513 SUnit *BtSU =
nullptr;
1514 unsigned LiveCycle = std::numeric_limits<unsigned>::max();
1515 for (
unsigned Reg : LRegs) {
1516 if (LiveRegGens[
Reg]->getHeight() < LiveCycle) {
1517 BtSU = LiveRegGens[
Reg];
1521 if (!WillCreateCycle(TrySU, BtSU)) {
1523 BacktrackBottomUp(TrySU, BtSU);
1530 AvailableQueue->
remove(BtSU);
1533 <<
") to SU(" << TrySU->NodeNum <<
")\n");
1538 if (!TrySU->isAvailable || !TrySU->NodeQueueId) {
1539 LLVM_DEBUG(
dbgs() <<
"TrySU not available; choosing node from queue\n");
1540 CurSU = AvailableQueue->
pop();
1544 AvailableQueue->
remove(TrySU);
1547 FindAvailableNode();
1559 SUnit *TrySU = Interferences[0];
1560 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1561 assert(LRegs.
size() == 1 &&
"Can't handle this yet!");
1562 unsigned Reg = LRegs[0];
1563 SUnit *LRDef = LiveRegDefs[
Reg];
1565 const TargetRegisterClass *RC =
1566 TRI->getMinimalPhysRegClass(
Reg, VT);
1567 const TargetRegisterClass *DestRC =
TRI->getCrossCopyRegClass(RC);
1576 SUnit *NewDef =
nullptr;
1578 NewDef = CopyAndMoveSuccessors(LRDef);
1579 if (!DestRC && !NewDef)
1585 InsertCopiesAndMoveSuccs(LRDef,
Reg, DestRC, RC,
Copies);
1587 <<
" to SU #" <<
Copies.front()->NodeNum <<
"\n");
1593 <<
" to SU #" << TrySU->
NodeNum <<
"\n");
1594 LiveRegDefs[
Reg] = NewDef;
1599 assert(CurSU &&
"Unable to resolve live physical register dependencies!");
1605void ScheduleDAGRRList::ListScheduleBottomUp() {
1607 ReleasePredecessors(&ExitSU);
1610 if (!SUnits.empty()) {
1611 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1612 assert(RootSU->
Succs.empty() &&
"Graph root shouldn't have successors!");
1614 AvailableQueue->
push(RootSU);
1620 while (!AvailableQueue->
empty() || !Interferences.empty()) {
1622 AvailableQueue->
dump(
this));
1626 SUnit *SU = PickNodeToScheduleBottomUp();
1628 AdvancePastStalls(SU);
1630 ScheduleNodeBottomUp(SU);
1632 while (AvailableQueue->
empty() && !PendingQueue.empty()) {
1634 assert(MinAvailableCycle < std::numeric_limits<unsigned>::max() &&
1635 "MinAvailableCycle uninitialized");
1636 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1644 VerifyScheduledSequence(
true);
1650class RegReductionPQBase;
1653 bool isReady(SUnit* SU,
unsigned CurCycle)
const {
return true; }
1658struct reverse_sort :
public queue_sort {
1661 reverse_sort(SF &sf) : SortFunc(sf) {}
1663 bool operator()(SUnit* left, SUnit* right)
const {
1666 return SortFunc(right, left);
1673struct bu_ls_rr_sort :
public queue_sort {
1676 HasReadyFilter =
false
1679 RegReductionPQBase *SPQ;
1681 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1683 bool operator()(SUnit* left, SUnit* right)
const;
1687struct src_ls_rr_sort :
public queue_sort {
1690 HasReadyFilter =
false
1693 RegReductionPQBase *SPQ;
1695 src_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1697 bool operator()(SUnit* left, SUnit* right)
const;
1701struct hybrid_ls_rr_sort :
public queue_sort {
1704 HasReadyFilter =
false
1707 RegReductionPQBase *SPQ;
1709 hybrid_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1711 bool isReady(SUnit *SU,
unsigned CurCycle)
const;
1713 bool operator()(SUnit* left, SUnit* right)
const;
1718struct ilp_ls_rr_sort :
public queue_sort {
1721 HasReadyFilter =
false
1724 RegReductionPQBase *SPQ;
1726 ilp_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1728 bool isReady(SUnit *SU,
unsigned CurCycle)
const;
1730 bool operator()(SUnit* left, SUnit* right)
const;
1733class RegReductionPQBase :
public SchedulingPriorityQueue {
1735 std::vector<SUnit *> Queue;
1736 unsigned CurQueueId = 0;
1737 bool TracksRegPressure;
1741 std::vector<SUnit> *SUnits =
nullptr;
1743 MachineFunction &MF;
1744 const TargetInstrInfo *
TII =
nullptr;
1745 const TargetRegisterInfo *
TRI =
nullptr;
1746 const TargetLowering *TLI =
nullptr;
1747 ScheduleDAGRRList *scheduleDAG =
nullptr;
1750 std::vector<unsigned> SethiUllmanNumbers;
1753 std::vector<unsigned> RegPressure;
1757 std::vector<unsigned> RegLimit;
1760 RegReductionPQBase(MachineFunction &mf,
1761 bool hasReadyFilter,
1764 const TargetInstrInfo *tii,
1765 const TargetRegisterInfo *tri,
1766 const TargetLowering *tli)
1767 : SchedulingPriorityQueue(hasReadyFilter), TracksRegPressure(tracksrp),
1768 SrcOrder(srcorder), MF(mf),
TII(tii),
TRI(tri), TLI(tli) {
1769 if (TracksRegPressure) {
1770 unsigned NumRC =
TRI->getNumRegClasses();
1771 RegLimit.resize(NumRC);
1775 for (
const TargetRegisterClass *RC :
TRI->regclasses())
1780 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1781 scheduleDAG = scheduleDag;
1784 ScheduleHazardRecognizer* getHazardRec() {
1785 return scheduleDAG->getHazardRec();
1788 void initNodes(std::vector<SUnit> &sunits)
override;
1790 void addNode(
const SUnit *SU)
override;
1792 void updateNode(
const SUnit *SU)
override;
1794 void releaseState()
override {
1796 SethiUllmanNumbers.clear();
1800 unsigned getNodePriority(
const SUnit *SU)
const;
1802 unsigned getNodeOrdering(
const SUnit *SU)
const {
1808 bool empty()
const override {
return Queue.empty(); }
1810 void push(SUnit *U)
override {
1811 assert(!
U->NodeQueueId &&
"Node in the queue already");
1812 U->NodeQueueId = ++CurQueueId;
1816 void remove(SUnit *SU)
override {
1819 std::vector<SUnit *>::iterator
I =
llvm::find(Queue, SU);
1820 if (
I != std::prev(
Queue.end()))
1826 bool tracksRegPressure()
const override {
return TracksRegPressure; }
1828 void dumpRegPressure()
const;
1830 bool HighRegPressure(
const SUnit *SU)
const;
1832 bool MayReduceRegPressure(SUnit *SU)
const;
1834 int RegPressureDiff(SUnit *SU,
unsigned &LiveUses)
const;
1836 void scheduledNode(SUnit *SU)
override;
1838 void unscheduledNode(SUnit *SU)
override;
1841 bool canClobber(
const SUnit *SU,
const SUnit *
Op);
1842 void AddPseudoTwoAddrDeps();
1843 void PrescheduleNodesWithMultipleUses();
1844 void CalculateSethiUllmanNumbers();
1848static SUnit *popFromQueueImpl(std::vector<SUnit *> &Q, SF &Picker) {
1849 unsigned BestIdx = 0;
1852 for (
unsigned I = 1,
E = std::min(Q.size(), (
decltype(Q.size()))1000);
I !=
E;
1854 if (Picker(Q[BestIdx], Q[
I]))
1856 SUnit *
V = Q[BestIdx];
1857 if (BestIdx + 1 != Q.size())
1864SUnit *popFromQueue(std::vector<SUnit *> &Q, SF &Picker, ScheduleDAG *DAG) {
1867 reverse_sort<SF> RPicker(Picker);
1868 return popFromQueueImpl(Q, RPicker);
1872 return popFromQueueImpl(Q, Picker);
1883class RegReductionPriorityQueue :
public RegReductionPQBase {
1887 RegReductionPriorityQueue(MachineFunction &mf,
1890 const TargetInstrInfo *tii,
1891 const TargetRegisterInfo *tri,
1892 const TargetLowering *tli)
1893 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
1897 bool isBottomUp()
const override {
return SF::IsBottomUp; }
1899 bool isReady(SUnit *U)
const override {
1900 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1903 SUnit *pop()
override {
1904 if (
Queue.empty())
return nullptr;
1906 SUnit *
V = popFromQueue(Queue, Picker, scheduleDAG);
1911#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1914 std::vector<SUnit *> DumpQueue =
Queue;
1915 SF DumpPicker = Picker;
1916 while (!DumpQueue.empty()) {
1917 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1925using BURegReductionPriorityQueue = RegReductionPriorityQueue<bu_ls_rr_sort>;
1926using SrcRegReductionPriorityQueue = RegReductionPriorityQueue<src_ls_rr_sort>;
1927using HybridBURRPriorityQueue = RegReductionPriorityQueue<hybrid_ls_rr_sort>;
1928using ILPBURRPriorityQueue = RegReductionPriorityQueue<ilp_ls_rr_sort>;
1945 if (LSchedLow != RSchedLow)
1946 return LSchedLow < RSchedLow ? 1 : -1;
1954 if (SUNumbers[SU->
NodeNum] != 0)
1955 return SUNumbers[SU->
NodeNum];
1959 WorkState(
const SUnit *SU) : SU(SU) {}
1961 unsigned PredsProcessed = 0;
1966 while (!WorkList.
empty()) {
1967 auto &Temp = WorkList.
back();
1968 auto *TempSU = Temp.SU;
1969 bool AllPredsKnown =
true;
1971 for (
unsigned P = Temp.PredsProcessed;
P < TempSU->Preds.size(); ++
P) {
1972 auto &Pred = TempSU->Preds[
P];
1973 if (Pred.isCtrl())
continue;
1974 SUnit *PredSU = Pred.getSUnit();
1975 if (SUNumbers[PredSU->
NodeNum] == 0) {
1978 for (
auto It : WorkList)
1979 assert(It.SU != PredSU &&
"Trying to push an element twice?");
1982 Temp.PredsProcessed =
P + 1;
1983 WorkList.push_back(PredSU);
1984 AllPredsKnown =
false;
1993 unsigned SethiUllmanNumber = 0;
1995 for (
const SDep &Pred : TempSU->Preds) {
1996 if (Pred.isCtrl())
continue;
1997 SUnit *PredSU = Pred.getSUnit();
1998 unsigned PredSethiUllman = SUNumbers[PredSU->
NodeNum];
1999 assert(PredSethiUllman > 0 &&
"We should have evaluated this pred!");
2000 if (PredSethiUllman > SethiUllmanNumber) {
2001 SethiUllmanNumber = PredSethiUllman;
2003 }
else if (PredSethiUllman == SethiUllmanNumber)
2007 SethiUllmanNumber += Extra;
2008 if (SethiUllmanNumber == 0)
2009 SethiUllmanNumber = 1;
2010 SUNumbers[TempSU->NodeNum] = SethiUllmanNumber;
2014 assert(SUNumbers[SU->
NodeNum] > 0 &&
"SethiUllman should never be zero!");
2015 return SUNumbers[SU->
NodeNum];
2020void RegReductionPQBase::CalculateSethiUllmanNumbers() {
2021 SethiUllmanNumbers.assign(SUnits->size(), 0);
2023 for (
const SUnit &SU : *SUnits)
2027void RegReductionPQBase::addNode(
const SUnit *SU) {
2028 unsigned SUSize = SethiUllmanNumbers.size();
2029 if (SUnits->size() > SUSize)
2030 SethiUllmanNumbers.resize(SUSize*2, 0);
2034void RegReductionPQBase::updateNode(
const SUnit *SU) {
2035 SethiUllmanNumbers[SU->
NodeNum] = 0;
2041unsigned RegReductionPQBase::getNodePriority(
const SUnit *SU)
const {
2048 if (
Opc == TargetOpcode::EXTRACT_SUBREG ||
2049 Opc == TargetOpcode::SUBREG_TO_REG ||
2050 Opc == TargetOpcode::INSERT_SUBREG)
2066 return SethiUllmanNumbers[SU->
NodeNum];
2068 unsigned Priority = SethiUllmanNumbers[SU->
NodeNum];
2072 return (NP > 0) ? NP : 0;
2082#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2084 for (
const TargetRegisterClass *RC :
TRI->regclasses()) {
2089 << RegLimit[Id] <<
'\n');
2094bool RegReductionPQBase::HighRegPressure(
const SUnit *SU)
const {
2098 for (
const SDep &Pred : SU->
Preds) {
2107 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2108 RegDefPos.IsValid(); RegDefPos.Advance()) {
2109 unsigned RCId,
Cost;
2112 if ((RegPressure[RCId] +
Cost) >= RegLimit[RCId])
2119bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU)
const {
2122 if (!
N->isMachineOpcode() || !SU->
NumSuccs)
2125 unsigned NumDefs =
TII->get(
N->getMachineOpcode()).getNumDefs();
2126 for (
unsigned i = 0; i != NumDefs; ++i) {
2127 MVT VT =
N->getSimpleValueType(i);
2128 if (!
N->hasAnyUseOfValue(i))
2131 if (RegPressure[RCId] >= RegLimit[RCId])
2144int RegReductionPQBase::RegPressureDiff(SUnit *SU,
unsigned &LiveUses)
const {
2147 for (
const SDep &Pred : SU->
Preds) {
2158 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2159 RegDefPos.IsValid(); RegDefPos.Advance()) {
2160 MVT VT = RegDefPos.GetValue();
2162 if (RegPressure[RCId] >= RegLimit[RCId])
2168 if (!
N || !
N->isMachineOpcode() || !SU->
NumSuccs)
2171 unsigned NumDefs =
TII->get(
N->getMachineOpcode()).getNumDefs();
2172 for (
unsigned i = 0; i != NumDefs; ++i) {
2173 MVT VT =
N->getSimpleValueType(i);
2174 if (!
N->hasAnyUseOfValue(i))
2177 if (RegPressure[RCId] >= RegLimit[RCId])
2183void RegReductionPQBase::scheduledNode(SUnit *SU) {
2184 if (!TracksRegPressure)
2190 for (
const SDep &Pred : SU->
Preds) {
2216 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2217 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2221 unsigned RCId,
Cost;
2232 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2233 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2234 if (SkipRegDefs > 0)
2236 unsigned RCId,
Cost;
2238 if (RegPressure[RCId] <
Cost) {
2242 <<
") has too many regdefs\n");
2252void RegReductionPQBase::unscheduledNode(SUnit *SU) {
2253 if (!TracksRegPressure)
2259 if (!
N->isMachineOpcode()) {
2263 unsigned Opc =
N->getMachineOpcode();
2264 if (
Opc == TargetOpcode::EXTRACT_SUBREG ||
2265 Opc == TargetOpcode::INSERT_SUBREG ||
2266 Opc == TargetOpcode::SUBREG_TO_REG ||
2267 Opc == TargetOpcode::REG_SEQUENCE ||
2268 Opc == TargetOpcode::IMPLICIT_DEF)
2272 for (
const SDep &Pred : SU->
Preds) {
2280 const SDNode *PN = PredSU->
getNode();
2290 if (POpc == TargetOpcode::IMPLICIT_DEF)
2292 if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2293 POpc == TargetOpcode::INSERT_SUBREG ||
2294 POpc == TargetOpcode::SUBREG_TO_REG) {
2300 if (POpc == TargetOpcode::REG_SEQUENCE) {
2302 const TargetRegisterClass *RC =
TRI->getRegClass(DstRCIdx);
2303 unsigned RCId = RC->
getID();
2310 for (
unsigned i = 0; i != NumDefs; ++i) {
2325 if (SU->
NumSuccs &&
N->isMachineOpcode()) {
2326 unsigned NumDefs =
TII->get(
N->getMachineOpcode()).getNumDefs();
2327 for (
unsigned i = NumDefs, e =
N->getNumValues(); i != e; ++i) {
2328 MVT VT =
N->getSimpleValueType(i);
2329 if (VT == MVT::Glue || VT == MVT::Other)
2331 if (!
N->hasAnyUseOfValue(i))
2348 unsigned MaxHeight = 0;
2350 if (Succ.
isCtrl())
continue;
2357 if (Height > MaxHeight)
2366 unsigned Scratches = 0;
2368 if (Pred.isCtrl())
continue;
2377 bool RetVal =
false;
2379 if (Pred.isCtrl())
continue;
2380 const SUnit *PredSU = Pred.getSUnit();
2385 if (
Reg.isVirtual()) {
2399 bool RetVal =
false;
2401 if (Succ.
isCtrl())
continue;
2406 if (
Reg.isVirtual()) {
2438 if (Pred.isCtrl())
continue;
2439 Pred.getSUnit()->isVRegCycle =
true;
2450 if (Pred.isCtrl())
continue;
2451 SUnit *PredSU = Pred.getSUnit();
2454 "VRegCycle def must be CopyFromReg");
2455 Pred.getSUnit()->isVRegCycle =
false;
2468 if (Pred.isCtrl())
continue;
2469 if (Pred.getSUnit()->isVRegCycle &&
2482 if ((
int)SPQ->getCurCycle() < Height)
return true;
2483 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2492 RegReductionPQBase *SPQ) {
2497 int LHeight = (int)left->
getHeight() + LPenalty;
2498 int RHeight = (int)right->
getHeight() + RPenalty;
2511 if (LHeight != RHeight)
2512 return LHeight > RHeight ? 1 : -1;
2524 if (!SPQ->getHazardRec()->isEnabled()) {
2525 if (LHeight != RHeight)
2526 return LHeight > RHeight ? 1 : -1;
2528 int LDepth = left->
getDepth() - LPenalty;
2529 int RDepth = right->
getDepth() - RPenalty;
2530 if (LDepth != RDepth) {
2532 <<
") depth " << LDepth <<
" vs SU (" << right->
NodeNum
2533 <<
") depth " << RDepth <<
"\n");
2534 return LDepth < RDepth ? 1 : -1;
2550 if (LHasPhysReg != RHasPhysReg) {
2552 static const char *
const PhysRegMsg[] = {
" has no physreg",
2553 " defines a physreg" };
2556 << PhysRegMsg[LHasPhysReg] <<
" SU(" << right->
NodeNum
2557 <<
") " << PhysRegMsg[RHasPhysReg] <<
"\n");
2558 return LHasPhysReg < RHasPhysReg;
2563 unsigned LPriority = SPQ->getNodePriority(left);
2564 unsigned RPriority = SPQ->getNodePriority(right);
2570 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2574 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2577 if (LPriority != RPriority)
2578 return LPriority > RPriority;
2583 unsigned LOrder = SPQ->getNodeOrdering(left);
2584 unsigned ROrder = SPQ->getNodeOrdering(right);
2588 if ((LOrder || ROrder) && LOrder != ROrder)
2589 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2612 return LDist < RDist;
2617 if (LScratch != RScratch)
2618 return LScratch > RScratch;
2622 if ((left->
isCall && RPriority > 0) || (right->
isCall && LPriority > 0))
2641 "NodeQueueId cannot be zero");
2646bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right)
const {
2654bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right)
const {
2658 unsigned LOrder = SPQ->getNodeOrdering(left);
2659 unsigned ROrder = SPQ->getNodeOrdering(right);
2663 if ((LOrder || ROrder) && LOrder != ROrder)
2664 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2673bool hybrid_ls_rr_sort::isReady(SUnit *SU,
unsigned CurCycle)
const {
2674 static const unsigned ReadyDelay = 3;
2676 if (SPQ->MayReduceRegPressure(SU))
return true;
2678 if (SU->
getHeight() > (CurCycle + ReadyDelay))
return false;
2680 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2688bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right)
const {
2696 bool LHigh = SPQ->HighRegPressure(left);
2697 bool RHigh = SPQ->HighRegPressure(right);
2700 if (LHigh && !RHigh) {
2705 else if (!LHigh && RHigh) {
2710 if (!LHigh && !RHigh) {
2720bool ilp_ls_rr_sort::isReady(SUnit *SU,
unsigned CurCycle)
const {
2721 if (SU->
getHeight() > CurCycle)
return false;
2723 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2737 if (
Opc == TargetOpcode::EXTRACT_SUBREG ||
2738 Opc == TargetOpcode::SUBREG_TO_REG ||
2739 Opc == TargetOpcode::INSERT_SUBREG)
2754bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right)
const {
2762 unsigned LLiveUses = 0, RLiveUses = 0;
2763 int LPDiff = 0, RPDiff = 0;
2765 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2766 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2770 <<
"): " << LPDiff <<
" != SU(" << right->
NodeNum
2771 <<
"): " << RPDiff <<
"\n");
2772 return LPDiff > RPDiff;
2778 if (LReduce && !RReduce)
return false;
2779 if (RReduce && !LReduce)
return true;
2784 <<
" != SU(" << right->
NodeNum <<
"): " << RLiveUses
2786 return LLiveUses < RLiveUses;
2792 if (LStall != RStall)
2801 <<
"): " << right->
getDepth() <<
"\n");
2815void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2819 AddPseudoTwoAddrDeps();
2821 if (!TracksRegPressure && !SrcOrder)
2822 PrescheduleNodesWithMultipleUses();
2824 CalculateSethiUllmanNumbers();
2827 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB))
2828 for (SUnit &SU : sunits)
2836bool RegReductionPQBase::canClobber(
const SUnit *SU,
const SUnit *
Op) {
2839 const MCInstrDesc &MCID =
TII->get(
Opc);
2842 for (
unsigned i = 0; i !=
NumOps; ++i) {
2858 ScheduleDAGRRList *scheduleDAG,
2864 if (ImpDefs.
empty() && !RegMask)
2869 for (
const SDep &SuccPred : SuccSU->
Preds) {
2875 scheduleDAG->IsReachable(DepSU, SuccPred.
getSUnit()))
2882 if (
TRI->regsOverlap(ImpDef, SuccPred.
getReg()) &&
2883 scheduleDAG->IsReachable(DepSU, SuccPred.
getSUnit()))
2897 unsigned NumDefs =
TII->get(
N->getMachineOpcode()).getNumDefs();
2899 assert(!ImpDefs.
empty() &&
"Caller should check hasPhysRegDefs");
2902 if (!SUNode->isMachineOpcode())
2905 TII->get(SUNode->getMachineOpcode()).implicit_defs();
2907 if (SUImpDefs.
empty() && !SURegMask)
2909 for (
unsigned i = NumDefs, e =
N->getNumValues(); i != e; ++i) {
2910 MVT VT =
N->getSimpleValueType(i);
2911 if (VT == MVT::Glue || VT == MVT::Other)
2913 if (!
N->hasAnyUseOfValue(i))
2919 if (
TRI->regsOverlap(
Reg, SUReg))
2957void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2959 for (SUnit &SU : *SUnits) {
2975 SDNode *PredFrameSetup =
nullptr;
2976 for (
const SDep &Pred : SU.
Preds)
2990 PredFrameSetup = PredND;
2995 if (PredFrameSetup !=
nullptr)
2999 SUnit *PredSU =
nullptr;
3000 for (
const SDep &Pred : SU.
Preds)
3022 for (
const SDep &PredSucc : PredSU->
Succs) {
3023 SUnit *PredSuccSU = PredSucc.
getSUnit();
3024 if (PredSuccSU == &SU)
continue;
3028 goto outer_loop_continue;
3032 goto outer_loop_continue;
3034 if (scheduleDAG->IsReachable(&SU, PredSuccSU))
3035 goto outer_loop_continue;
3041 dbgs() <<
" Prescheduling SU #" << SU.
NodeNum <<
" next to PredSU #"
3043 <<
" to guide scheduling in the presence of multiple uses\n");
3044 for (
unsigned i = 0; i != PredSU->
Succs.size(); ++i) {
3047 SUnit *SuccSU =
Edge.getSUnit();
3048 if (SuccSU != &SU) {
3049 Edge.setSUnit(PredSU);
3050 scheduleDAG->RemovePred(SuccSU,
Edge);
3051 scheduleDAG->AddPredQueued(&SU,
Edge);
3053 scheduleDAG->AddPredQueued(SuccSU,
Edge);
3057 outer_loop_continue:;
3068void RegReductionPQBase::AddPseudoTwoAddrDeps() {
3069 for (SUnit &SU : *SUnits) {
3078 unsigned Opc =
Node->getMachineOpcode();
3079 const MCInstrDesc &MCID =
TII->get(
Opc);
3082 for (
unsigned j = 0;
j !=
NumOps; ++
j) {
3088 const SUnit *DUSU = &(*SUnits)[DU->
getNodeId()];
3091 for (
const SDep &Succ : DUSU->
Succs) {
3106 while (SuccSU->
Succs.size() == 1 &&
3109 TargetOpcode::COPY_TO_REGCLASS)
3110 SuccSU = SuccSU->
Succs.front().getSUnit();
3123 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
3124 SuccOpc == TargetOpcode::INSERT_SUBREG ||
3125 SuccOpc == TargetOpcode::SUBREG_TO_REG)
3128 (!canClobber(SuccSU, DUSU) ||
3131 !scheduleDAG->IsReachable(SuccSU, &SU)) {
3133 <<
" Adding a pseudo-two-addr edge from SU #"
3152 BURegReductionPriorityQueue *PQ =
3153 new BURegReductionPriorityQueue(*IS->
MF,
false,
false,
TII,
TRI,
nullptr);
3154 ScheduleDAGRRList *SD =
new ScheduleDAGRRList(*IS->
MF,
false, PQ, OptLevel);
3155 PQ->setScheduleDAG(SD);
3166 SrcRegReductionPriorityQueue *PQ =
3167 new SrcRegReductionPriorityQueue(*IS->
MF,
false,
true,
TII,
TRI,
nullptr);
3168 ScheduleDAGRRList *SD =
new ScheduleDAGRRList(*IS->
MF,
false, PQ, OptLevel);
3169 PQ->setScheduleDAG(SD);
3181 HybridBURRPriorityQueue *PQ =
3182 new HybridBURRPriorityQueue(*IS->
MF,
true,
false,
TII,
TRI, TLI);
3184 ScheduleDAGRRList *SD =
new ScheduleDAGRRList(*IS->
MF,
true, PQ, OptLevel);
3185 PQ->setScheduleDAG(SD);
3196 ILPBURRPriorityQueue *PQ =
3197 new ILPBURRPriorityQueue(*IS->
MF,
true,
false,
TII,
TRI, TLI);
3198 ScheduleDAGRRList *SD =
new ScheduleDAGRRList(*IS->
MF,
true, PQ, OptLevel);
3199 PQ->setScheduleDAG(SD);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file defines the DenseMap class.
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
std::pair< BasicBlock *, BasicBlock * > Edge
static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, const TargetInstrInfo *TII)
getPhysicalRegisterVT - Returns the ValueType of the physical register definition of the specified no...
static bool CheckForLiveRegDef(SUnit *SU, MCRegister Reg, std::vector< SUnit * > &LiveRegDefs, SmallSet< unsigned, 4 > &RegAdded, SmallVectorImpl< unsigned > &LRegs, const TargetRegisterInfo *TRI, const SDNode *Node=nullptr)
CheckForLiveRegDef - Return true and update live register vector if the specified register def of the...
static bool canEnableCoalescing(SUnit *SU)
static RegisterScheduler sourceListDAGScheduler("source", "Similar to list-burr but schedules in source " "order when possible", createSourceListDAGScheduler)
static cl::opt< bool > DisableSchedCycles("disable-sched-cycles", cl::Hidden, cl::init(false), cl::desc("Disable cycle-level precision during preRA scheduling"))
static cl::opt< bool > DisableSchedStalls("disable-sched-stalls", cl::Hidden, cl::init(true), cl::desc("Disable no-stall priority in sched=list-ilp"))
static bool hasOnlyLiveInOpers(const SUnit *SU)
hasOnlyLiveInOpers - Return true if SU has only value predecessors that are CopyFromReg from a virtua...
static bool IsChainDependent(SDNode *Outer, SDNode *Inner, unsigned NestLevel, const TargetInstrInfo *TII)
IsChainDependent - Test if Outer is reachable from Inner through chain dependencies.
static bool hasOnlyLiveOutUses(const SUnit *SU)
hasOnlyLiveOutUses - Return true if SU has only value successors that are CopyToReg to a virtual regi...
static cl::opt< bool > DisableSchedCriticalPath("disable-sched-critical-path", cl::Hidden, cl::init(false), cl::desc("Disable critical path priority in sched=list-ilp"))
static cl::opt< bool > Disable2AddrHack("disable-2addr-hack", cl::Hidden, cl::init(true), cl::desc("Disable scheduler's two-address hack"))
static RegisterScheduler ILPListDAGScheduler("list-ilp", "Bottom-up register pressure aware list scheduling " "which tries to balance ILP and register pressure", createILPListDAGScheduler)
static void resetVRegCycle(SUnit *SU)
static RegisterScheduler hybridListDAGScheduler("list-hybrid", "Bottom-up register pressure aware list scheduling " "which tries to balance latency and register pressure", createHybridListDAGScheduler)
static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU, ScheduleDAGRRList *scheduleDAG, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
canClobberReachingPhysRegUse - True if SU would clobber one of it's successor's explicit physregs who...
static cl::opt< bool > DisableSchedPhysRegJoin("disable-sched-physreg-join", cl::Hidden, cl::init(false), cl::desc("Disable physreg def-use affinity"))
static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
canClobberPhysRegDefs - True if SU would clobber one of SuccSU's physical register defs.
static cl::opt< unsigned > AvgIPC("sched-avg-ipc", cl::Hidden, cl::init(1), cl::desc("Average inst/cycle when no target itinerary exists."))
static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos, const TargetLowering *TLI, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, unsigned &RegClass, unsigned &Cost, const MachineFunction &MF)
GetCostForDef - Looks up the register class and cost for a given definition.
static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ)
static cl::opt< bool > DisableSchedRegPressure("disable-sched-reg-pressure", cl::Hidden, cl::init(false), cl::desc("Disable regpressure priority in sched=list-ilp"))
static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ)
static void initVRegCycle(SUnit *SU)
static constexpr unsigned RegSequenceCost
static cl::opt< int > MaxReorderWindow("max-sched-reorder", cl::Hidden, cl::init(6), cl::desc("Number of instructions to allow ahead of the critical path " "in sched=list-ilp"))
static SDNode * FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest, const TargetInstrInfo *TII)
FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate the corresponding (lowered) C...
static bool isOperandOf(const SUnit *SU, SDNode *N)
static cl::opt< bool > DisableSchedVRegCycle("disable-sched-vrcycle", cl::Hidden, cl::init(false), cl::desc("Disable virtual register cycle interference checks"))
static int checkSpecialNodes(const SUnit *left, const SUnit *right)
static cl::opt< bool > DisableSchedLiveUses("disable-sched-live-uses", cl::Hidden, cl::init(true), cl::desc("Disable live use priority in sched=list-ilp"))
static const uint32_t * getNodeRegMask(const SDNode *N)
getNodeRegMask - Returns the register mask attached to an SDNode, if any.
static unsigned closestSucc(const SUnit *SU)
closestSucc - Returns the scheduled cycle of the successor which is closest to the current cycle.
static bool hasVRegCycleUse(const SUnit *SU)
static cl::opt< bool > DisableSchedHeight("disable-sched-height", cl::Hidden, cl::init(false), cl::desc("Disable scheduled-height priority in sched=list-ilp"))
static RegisterScheduler burrListDAGScheduler("list-burr", "Bottom-up register reduction list scheduling", createBURRListDAGScheduler)
static unsigned calcMaxScratches(const SUnit *SU)
calcMaxScratches - Returns an cost estimate of the worse case requirement for scratch registers,...
static unsigned CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector< unsigned > &SUNumbers)
CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref, RegReductionPQBase *SPQ)
static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask, ArrayRef< SUnit * > LiveRegDefs, SmallSet< unsigned, 4 > &RegAdded, SmallVectorImpl< unsigned > &LRegs)
CheckForLiveRegDefMasked - Check for any live physregs that are clobbered by RegMask,...
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
int getNodeId() const
Return the unique node id.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getIROrder() const
Return the node ordering.
void setNodeId(int Id)
Set unique node id.
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
LLVM_ABI bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
@ Data
Regular data dependence (aka true-dependence).
@ Artificial
Arbitrary strong DAG edge (no real dependence).
unsigned getLatency() const
Returns the latency value for this edge, which roughly means the minimum number of cycles that must e...
bool isAssignedRegDep() const
Tests if this is a Data dependence that is associated with a register.
bool isArtificial() const
Tests if this is an Order dependence that is marked as "artificial", meaning it isn't necessary for c...
bool isCtrl() const
Shorthand for getKind() != SDep::Data.
Register getReg() const
Returns the register associated with this edge.
Scheduling unit. This is a node in the scheduling DAG.
bool isCall
Is a function call.
LLVM_ABI void setHeightToAtLeast(unsigned NewHeight)
If NewHeight is greater than this node's height value, set it to be the new height value.
unsigned NodeQueueId
Queue id of node.
unsigned NodeNum
Entry # of node in the node vector.
bool hasPhysRegClobbers
Has any physreg defs, used or not.
bool isCallOp
Is a function call operand.
const TargetRegisterClass * CopyDstRC
Is a special copy node if != nullptr.
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
LLVM_ABI void setHeightDirty()
Sets a flag in this node to indicate that its stored Height value will require recomputation the next...
bool isSucc(const SUnit *N) const
Tests if node N is a successor of this node.
LLVM_ABI void removePred(const SDep &D)
Removes the specified edge as a pred of the current node if it exists.
unsigned short Latency
Node latency.
unsigned short NumRegDefsLeft
bool isPending
True once pending.
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
bool isScheduled
True once scheduled.
bool isAvailable
True once available.
bool isScheduleLow
True if preferable to schedule low.
bool hasPhysRegDefs
Has physreg defs that are being used.
SmallVector< SDep, 4 > Succs
All sunit successors.
Sched::Preference SchedulingPref
Scheduling preference.
const TargetRegisterClass * CopySrcRC
SDNode * getNode() const
Returns the representative SDNode for this SUnit.
bool isTwoAddress
Is a two-address instruction.
bool isCommutable
Is a commutable instruction.
bool isVRegCycle
May use and def the same vreg.
SmallVector< SDep, 4 > Preds
All sunit predecessors.
LLVM_ABI bool addPred(const SDep &D, bool Required=true)
Adds the specified edge as a pred of the current node if not already.
RegDefIter - In place iteration over the values defined by an SUnit.
const SDNode * GetNode() const
ScheduleDAGSDNodes - A ScheduleDAG for scheduling SDNode-based DAGs.
This class can compute a topological ordering for SUnits and provides methods for dynamically updatin...
void MarkDirty()
Mark the ordering as temporarily broken, after a new node has been added.
virtual void dumpNode(const SUnit &SU) const =0
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
unsigned getMaxLookAhead() const
virtual void RecedeCycle()
RecedeCycle - This callback is invoked whenever the next bottom-up instruction to be scheduled cannot...
virtual void Reset()
Reset - This callback is invoked when a new block of instructions is about to be schedule.
virtual void EmitInstruction(SUnit *)
EmitInstruction - This callback is invoked when an instruction is emitted, to advance the hazard stat...
virtual bool atIssueLimit() const
atIssueLimit - Return true if no more instructions may be issued in this cycle.
virtual HazardType getHazardType(SUnit *, int Stalls=0)
getHazardType - Return the hazard type of emitting this node.
This interface is used to plug different priorities computation algorithms into the list scheduler.
void setCurCycle(unsigned Cycle)
virtual void remove(SUnit *SU)=0
virtual void releaseState()=0
virtual void scheduledNode(SUnit *)
As each node is scheduled, this method is invoked.
virtual bool tracksRegPressure() const
virtual void dump(ScheduleDAG *) const
bool hasReadyFilter() const
virtual void initNodes(std::vector< SUnit > &SUnits)=0
virtual bool empty() const =0
virtual void unscheduledNode(SUnit *)
virtual void addNode(const SUnit *SU)=0
virtual void updateNode(const SUnit *SU)=0
virtual void push(SUnit *U)=0
SelectionDAGISel - This is the common base class used for SelectionDAG-based pattern-matching instruc...
const TargetLowering * TLI
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
virtual uint8_t getRepRegClassCostFor(MVT VT) const
Return the cost of the 'representative' register class for the specified value type.
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const
Return the register pressure "high water mark" for the specific register class.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ LIFETIME_START
This corresponds to the llvm.lifetime.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INLINEASM
INLINEASM - Represents an inline asm block.
initializer< Ty > init(const Ty &Val)
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
NodeAddr< DefNode * > Def
NodeAddr< NodeBase * > Node
LLVM_ABI std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI ScheduleDAGSDNodes * createBURRListDAGScheduler(SelectionDAGISel *IS, CodeGenOptLevel OptLevel)
createBURRListDAGScheduler - This creates a bottom up register usage reduction list scheduler.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI ScheduleDAGSDNodes * createHybridListDAGScheduler(SelectionDAGISel *IS, CodeGenOptLevel)
createHybridListDAGScheduler - This creates a bottom up register pressure aware list scheduler that m...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI ScheduleDAGSDNodes * createSourceListDAGScheduler(SelectionDAGISel *IS, CodeGenOptLevel OptLevel)
createSourceListDAGScheduler - This creates a bottom up list scheduler that schedules nodes in source...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI ScheduleDAGSDNodes * createILPListDAGScheduler(SelectionDAGISel *IS, CodeGenOptLevel)
createILPListDAGScheduler - This creates a bottom up register pressure aware list scheduler that trie...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.