22#define DEBUG_TYPE "pipeliner"
27 cl::desc(
"Swap target blocks of a conditional branch for MVE expander"));
41 unsigned &InitVal,
unsigned &LoopVal) {
42 assert(Phi.isPHI() &&
"Expecting a Phi.");
46 for (
unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2)
47 if (Phi.getOperand(i + 1).getMBB() !=
Loop)
48 InitVal = Phi.getOperand(i).getReg();
50 LoopVal = Phi.getOperand(i).getReg();
52 assert(InitVal != 0 && LoopVal != 0 &&
"Unexpected Phi structure.");
57 for (
unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2)
58 if (Phi.getOperand(i + 1).getMBB() != LoopBB)
59 return Phi.getOperand(i).getReg();
65 for (
unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2)
66 if (Phi.getOperand(i + 1).getMBB() == LoopBB)
67 return Phi.getOperand(i).getReg();
84 bool PhiIsSwapped =
false;
89 if (UseStage != -1 && UseStage >= DefStage)
90 Diff = UseStage - DefStage;
92 if (isLoopCarried(*
MI))
97 MaxDiff = std::max(Diff, MaxDiff);
99 RegToStageDiff[Reg] = std::make_pair(MaxDiff, PhiIsSwapped);
103 generatePipelinedLoop();
106void ModuloScheduleExpander::generatePipelinedLoop() {
119 ValueMapTy *VRMap =
new ValueMapTy[(MaxStageCount + 1) * 2];
124 ValueMapTy *VRMapPhi =
new ValueMapTy[(MaxStageCount + 1) * 2];
131 generateProlog(MaxStageCount, KernelBB, VRMap, PrologBBs);
140 unsigned StageNum = Schedule.
getStage(CI);
141 MachineInstr *NewMI = cloneInstr(CI, MaxStageCount, StageNum);
142 updateInstruction(NewMI,
false, MaxStageCount, StageNum, VRMap);
144 InstrMap[NewMI] = CI;
151 updateInstruction(NewMI,
false, MaxStageCount, 0, VRMap);
153 InstrMap[NewMI] = &
MI;
156 NewKernel = KernelBB;
160 generateExistingPhis(KernelBB, PrologBBs.
back(), KernelBB, KernelBB, VRMap,
161 InstrMap, MaxStageCount, MaxStageCount,
false);
162 generatePhis(KernelBB, PrologBBs.
back(), KernelBB, KernelBB, VRMap, VRMapPhi,
163 InstrMap, MaxStageCount, MaxStageCount,
false);
169 generateEpilog(MaxStageCount, KernelBB, BB, VRMap, VRMapPhi, EpilogBBs,
174 splitLifetimes(KernelBB, EpilogBBs);
177 removeDeadInstructions(KernelBB, EpilogBBs);
180 addBranches(*Preheader, PrologBBs, KernelBB, EpilogBBs, VRMap);
191 BB->eraseFromParent();
195void ModuloScheduleExpander::generateProlog(
unsigned LastStage,
198 MBBVectorTy &PrologBBs) {
205 for (
unsigned i = 0; i < LastStage; ++i) {
218 for (
int StageNum = i; StageNum >= 0; --StageNum) {
222 if (Schedule.
getStage(&*BBI) == StageNum) {
226 cloneAndChangeInstr(&*BBI, i, (
unsigned)StageNum);
227 updateInstruction(NewMI,
false, i, (
unsigned)StageNum, VRMap);
229 InstrMap[NewMI] = &*BBI;
233 rewritePhiValues(NewBB, i, VRMap, InstrMap);
235 dbgs() <<
"prolog:\n";
254void ModuloScheduleExpander::generateEpilog(
256 ValueMapTy *VRMap, ValueMapTy *VRMapPhi, MBBVectorTy &EpilogBBs,
257 MBBVectorTy &PrologBBs) {
263 assert(!checkBranch &&
"generateEpilog must be able to analyze the branch");
268 if (*LoopExitI == KernelBB)
270 assert(LoopExitI != KernelBB->
succ_end() &&
"Expecting a successor");
280 int EpilogStage = LastStage + 1;
281 for (
unsigned i = LastStage; i >= 1; --i, ++EpilogStage) {
290 if (EpilogStart == LoopExitBB)
295 for (
unsigned StageNum = i; StageNum <= LastStage; ++StageNum) {
296 for (
auto &BBI : *BB) {
300 if ((
unsigned)Schedule.
getStage(In) == StageNum) {
304 updateInstruction(NewMI, i == 1, EpilogStage, 0, VRMap);
306 InstrMap[NewMI] =
In;
310 generateExistingPhis(NewBB, PrologBBs[i - 1], PredBB, KernelBB, VRMap,
311 InstrMap, LastStage, EpilogStage, i == 1);
312 generatePhis(NewBB, PrologBBs[i - 1], PredBB, KernelBB, VRMap, VRMapPhi,
313 InstrMap, LastStage, EpilogStage, i == 1);
317 dbgs() <<
"epilog:\n";
328 assert((OrigBB ==
TBB || OrigBB == FBB) &&
329 "Unable to determine looping branch direction");
335 if (EpilogBBs.size() > 0) {
350 if (O.getParent()->getParent() !=
MBB)
361 if (MO.getParent()->getParent() != BB)
369void ModuloScheduleExpander::generateExistingPhis(
372 unsigned LastStageNum,
unsigned CurStageNum,
bool IsLast) {
376 unsigned PrologStage = 0;
377 unsigned PrevStage = 0;
378 bool InKernel = (LastStageNum == CurStageNum);
380 PrologStage = LastStageNum - 1;
381 PrevStage = CurStageNum;
383 PrologStage = LastStageNum - (CurStageNum - LastStageNum);
384 PrevStage = LastStageNum + (CurStageNum - LastStageNum) - 1;
388 BBE = BB->getFirstNonPHI();
392 unsigned InitVal = 0;
393 unsigned LoopVal = 0;
399 unsigned PhiOp2 = LoopVal;
400 if (
auto It = VRMap[LastStageNum].
find(LoopVal);
401 It != VRMap[LastStageNum].end())
404 int StageScheduled = Schedule.
getStage(&*BBI);
406 unsigned NumStages = getStagesForReg(Def, CurStageNum);
407 if (NumStages == 0) {
410 unsigned NewReg = VRMap[PrevStage][LoopVal];
411 rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, 0, &*BBI, Def,
413 if (VRMap[CurStageNum].
count(LoopVal))
414 VRMap[CurStageNum][
Def] = VRMap[CurStageNum][LoopVal];
420 unsigned MaxPhis = PrologStage + 2;
421 if (!InKernel && (
int)PrologStage <= LoopValStage)
422 MaxPhis = std::max((
int)MaxPhis - (
int)LoopValStage, 1);
423 unsigned NumPhis = std::min(NumStages, MaxPhis);
426 unsigned AccessStage = (LoopValStage != -1) ? LoopValStage : StageScheduled;
433 if (!InKernel && StageScheduled >= LoopValStage && AccessStage == 0 &&
438 if (InKernel && LoopValStage != -1 && StageScheduled > LoopValStage)
439 StageDiff = StageScheduled - LoopValStage;
440 for (
unsigned np = 0; np < NumPhis; ++np) {
444 if (np > PrologStage || StageScheduled >= (
int)LastStageNum)
447 else if (PrologStage >= AccessStage + StageDiff + np &&
448 VRMap[PrologStage - StageDiff - np].
count(LoopVal) != 0)
449 PhiOp1 = VRMap[PrologStage - StageDiff - np][LoopVal];
452 else if (PrologStage >= AccessStage + StageDiff + np) {
458 while (InstOp1 && InstOp1->
isPHI() && InstOp1->
getParent() == BB) {
459 int PhiStage = Schedule.
getStage(InstOp1);
460 if ((
int)(PrologStage - StageDiff - np) < PhiStage + Indirects)
465 int PhiOpStage = Schedule.
getStage(InstOp1);
466 int StageAdj = (PhiOpStage != -1 ? PhiStage - PhiOpStage : 0);
467 if (PhiOpStage != -1 && PrologStage - StageAdj >= Indirects + np &&
468 VRMap[PrologStage - StageAdj - Indirects - np].
count(PhiOp1)) {
469 PhiOp1 = VRMap[PrologStage - StageAdj - Indirects - np][PhiOp1];
483 bool LoopDefIsPhi = PhiInst && PhiInst->
isPHI();
488 int StageDiffAdj = 0;
489 if (LoopValStage != -1 && StageScheduled > LoopValStage)
490 StageDiffAdj = StageScheduled - LoopValStage;
493 if (np == 0 && PrevStage == LastStageNum &&
494 (StageScheduled != 0 || LoopValStage != 0) &&
495 VRMap[PrevStage - StageDiffAdj].count(LoopVal))
496 PhiOp2 = VRMap[PrevStage - StageDiffAdj][LoopVal];
499 else if (np > 0 && PrevStage == LastStageNum &&
500 VRMap[PrevStage - np + 1].
count(Def))
501 PhiOp2 = VRMap[PrevStage - np + 1][Def];
503 else if (
static_cast<unsigned>(LoopValStage) > PrologStage + 1 &&
504 VRMap[PrevStage - StageDiffAdj - np].count(LoopVal))
505 PhiOp2 = VRMap[PrevStage - StageDiffAdj - np][LoopVal];
508 else if (VRMap[PrevStage - np].
count(Def) &&
509 (!LoopDefIsPhi || (PrevStage != LastStageNum) ||
510 (LoopValStage == StageScheduled)))
511 PhiOp2 = VRMap[PrevStage - np][
Def];
519 if (
static_cast<int>(PrologStage - np) >= StageScheduled) {
520 int LVNumStages = getStagesForPhi(LoopVal);
521 int StageDiff = (StageScheduled - LoopValStage);
522 LVNumStages -= StageDiff;
524 if (LVNumStages > (
int)np && VRMap[CurStageNum].count(LoopVal)) {
526 unsigned ReuseStage = CurStageNum;
527 if (isLoopCarried(*PhiInst))
528 ReuseStage -= LVNumStages;
531 if (VRMap[ReuseStage - np].
count(LoopVal)) {
532 NewReg = VRMap[ReuseStage - np][LoopVal];
534 rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI,
537 VRMap[CurStageNum - np][
Def] = NewReg;
539 if (VRMap[LastStageNum - np - 1].
count(LoopVal))
540 PhiOp2 = VRMap[LastStageNum - np - 1][LoopVal];
542 if (IsLast && np == NumPhis - 1)
548 if (InKernel && StageDiff > 0 &&
549 VRMap[CurStageNum - StageDiff - np].
count(LoopVal))
550 PhiOp2 = VRMap[CurStageNum - StageDiff - np][LoopVal];
558 TII->
get(TargetOpcode::PHI), NewReg);
562 InstrMap[NewPhi] = &*BBI;
567 unsigned PrevReg = 0;
568 if (InKernel && VRMap[PrevStage - np].
count(LoopVal))
569 PrevReg = VRMap[PrevStage - np][LoopVal];
570 rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI, Def,
573 if (VRMap[CurStageNum - np].
count(Def)) {
574 unsigned R = VRMap[CurStageNum - np][
Def];
575 rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI, R,
582 if (IsLast && np == NumPhis - 1)
590 VRMap[CurStageNum - np][
Def] = NewReg;
593 while (NumPhis++ < NumStages) {
594 rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, NumPhis, &*BBI, Def,
600 if (NumStages == 0 && IsLast && VRMap[CurStageNum].
count(LoopVal))
608void ModuloScheduleExpander::generatePhis(
611 InstrMapTy &InstrMap,
unsigned LastStageNum,
unsigned CurStageNum,
615 unsigned PrologStage = 0;
616 unsigned PrevStage = 0;
617 unsigned StageDiff = CurStageNum - LastStageNum;
618 bool InKernel = (StageDiff == 0);
620 PrologStage = LastStageNum - 1;
621 PrevStage = CurStageNum;
623 PrologStage = LastStageNum - StageDiff;
624 PrevStage = LastStageNum + StageDiff - 1;
628 BBE = BB->instr_end();
630 for (
unsigned i = 0, e = BBI->getNumOperands(); i !=
e; ++i) {
635 int StageScheduled = Schedule.
getStage(&*BBI);
636 assert(StageScheduled != -1 &&
"Expecting scheduled instruction.");
638 unsigned NumPhis = getStagesForReg(Def, CurStageNum);
642 if (!InKernel && NumPhis == 0 && StageScheduled == 0 &&
645 if (!InKernel && (
unsigned)StageScheduled > PrologStage)
650 PhiOp2 = VRMap[PrevStage][
Def];
652 if (InstOp2->isPHI() && InstOp2->getParent() == NewBB)
657 if (NumPhis > PrologStage + 1 - StageScheduled)
658 NumPhis = PrologStage + 1 - StageScheduled;
659 for (
unsigned np = 0; np < NumPhis; ++np) {
682 unsigned PhiOp1 = VRMap[PrologStage][
Def];
683 if (np <= PrologStage)
684 PhiOp1 = VRMap[PrologStage - np][
Def];
686 if (PrevStage == LastStageNum && np == 0)
687 PhiOp2 = VRMap[LastStageNum][
Def];
689 PhiOp2 = VRMapPhi[PrevStage - np][
Def];
697 TII->
get(TargetOpcode::PHI), NewReg);
701 InstrMap[NewPhi] = &*BBI;
706 rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI, PhiOp1,
708 rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI, PhiOp2,
712 VRMapPhi[PrevStage - np - 1][
Def] = NewReg;
714 VRMapPhi[CurStageNum - np][
Def] = NewReg;
715 if (np == NumPhis - 1)
716 rewriteScheduledInstr(NewBB, InstrMap, CurStageNum, np, &*BBI, Def,
719 if (IsLast && np == NumPhis - 1)
731 MBBVectorTy &EpilogBBs) {
739 if (
MI->isInlineAsm()) {
743 bool SawStore =
false;
746 if (!
MI->isSafeToMove(SawStore) && !
MI->isPHI()) {
760 unsigned realUses = 0;
764 if (
U.getParent()->getParent() != BB) {
776 MI++->eraseFromParent();
787 MI.eraseFromParent();
803 MBBVectorTy &EpilogBBs) {
805 for (
auto &
PHI : KernelBB->
phis()) {
812 if (
I->isPHI() &&
I->getParent() == KernelBB) {
818 if (!
MI ||
MI->getParent() != KernelBB ||
MI->isPHI())
822 unsigned SplitReg = 0;
825 if (BBJ.readsRegister(Def,
nullptr)) {
830 TII->
get(TargetOpcode::COPY), SplitReg)
833 BBJ.substituteRegister(Def, SplitReg, 0, *
TRI);
838 for (
auto &
Epilog : EpilogBBs)
840 if (
I.readsRegister(Def,
nullptr))
841 I.substituteRegister(Def, SplitReg, 0, *
TRI);
853 for (
unsigned i = 1, e =
MI.getNumOperands(); i != e; i += 2)
854 if (
MI.getOperand(i + 1).getMBB() ==
Incoming) {
855 MI.removeOperand(i + 1);
866 MBBVectorTy &PrologBBs,
868 MBBVectorTy &EpilogBBs,
870 assert(PrologBBs.size() == EpilogBBs.size() &&
"Prolog/Epilog mismatch");
877 unsigned MaxIter = PrologBBs.
size() - 1;
878 for (
unsigned i = 0, j = MaxIter; i <= MaxIter; ++i, --
j) {
885 std::optional<bool> StaticallyGreater =
887 unsigned numAdded = 0;
888 if (!StaticallyGreater) {
891 }
else if (*StaticallyGreater ==
false) {
893 Prolog->removeSuccessor(LastPro);
898 if (LastPro != LastEpi) {
902 if (LastPro == KernelBB) {
916 I != E && numAdded > 0; ++
I, --numAdded)
917 updateInstruction(&*
I,
false, j, 0, VRMap);
921 LoopInfo->setPreheader(PrologBBs[MaxIter]);
922 LoopInfo->adjustTripCount(-(MaxIter + 1));
928bool ModuloScheduleExpander::computeDelta(
MachineInstr &
MI,
unsigned &Delta) {
932 bool OffsetIsScalable;
937 if (OffsetIsScalable)
940 if (!BaseOp->
isReg())
948 if (BaseDef && BaseDef->
isPHI()) {
950 BaseDef =
MRI.getVRegDef(BaseReg);
966void ModuloScheduleExpander::updateMemOperands(
MachineInstr &NewMI,
978 if (MMO->isVolatile() || MMO->isAtomic() ||
979 (MMO->isInvariant() && MMO->isDereferenceable()) ||
980 (!MMO->getValue())) {
985 if (Num != UINT_MAX && computeDelta(OldMI, Delta)) {
986 int64_t AdjOffset = Delta * Num;
1000 unsigned CurStageNum,
1001 unsigned InstStageNum) {
1003 updateMemOperands(*NewMI, *OldMI, CurStageNum - InstStageNum);
1010MachineInstr *ModuloScheduleExpander::cloneAndChangeInstr(
1011 MachineInstr *OldMI,
unsigned CurStageNum,
unsigned InstStageNum) {
1013 auto It = InstrChanges.
find(OldMI);
1014 if (It != InstrChanges.
end()) {
1015 std::pair<unsigned, int64_t> RegAndOffset = It->second;
1016 unsigned BasePos, OffsetPos;
1020 MachineInstr *LoopDef = findDefInLoop(RegAndOffset.first);
1021 if (Schedule.
getStage(LoopDef) > (
signed)InstStageNum)
1022 NewOffset += RegAndOffset.second * (CurStageNum - InstStageNum);
1025 updateMemOperands(*NewMI, *OldMI, CurStageNum - InstStageNum);
1031void ModuloScheduleExpander::updateInstruction(
MachineInstr *NewMI,
1033 unsigned CurStageNum,
1034 unsigned InstrStageNum,
1035 ValueMapTy *VRMap) {
1045 VRMap[CurStageNum][reg] = NewReg;
1048 }
else if (MO.
isUse()) {
1051 int DefStageNum = Schedule.
getStage(Def);
1052 unsigned StageNum = CurStageNum;
1053 if (DefStageNum != -1 && (
int)InstrStageNum > DefStageNum) {
1055 unsigned StageDiff = (InstrStageNum - DefStageNum);
1057 StageNum -= StageDiff;
1059 if (
auto It = VRMap[StageNum].
find(reg); It != VRMap[StageNum].end())
1068MachineInstr *ModuloScheduleExpander::findDefInLoop(
unsigned Reg) {
1071 while (
Def->isPHI()) {
1072 if (!Visited.
insert(Def).second)
1074 for (
unsigned i = 1, e =
Def->getNumOperands(); i < e; i += 2)
1075 if (
Def->getOperand(i + 1).getMBB() == BB) {
1076 Def =
MRI.getVRegDef(
Def->getOperand(i).getReg());
1084unsigned ModuloScheduleExpander::getPrevMapVal(
1085 unsigned StageNum,
unsigned PhiStage,
unsigned LoopVal,
unsigned LoopStage,
1087 unsigned PrevVal = 0;
1088 if (StageNum > PhiStage) {
1090 if (PhiStage == LoopStage && VRMap[StageNum - 1].
count(LoopVal))
1092 PrevVal = VRMap[StageNum - 1][LoopVal];
1093 else if (VRMap[StageNum].
count(LoopVal))
1096 PrevVal = VRMap[StageNum][LoopVal];
1100 else if (StageNum == PhiStage + 1)
1103 else if (StageNum > PhiStage + 1 && LoopInst->
getParent() == BB)
1106 getPrevMapVal(StageNum - 1, PhiStage,
getLoopPhiReg(*LoopInst, BB),
1107 LoopStage, VRMap, BB);
1119 InstrMapTy &InstrMap) {
1120 for (
auto &
PHI : BB->
phis()) {
1121 unsigned InitVal = 0;
1122 unsigned LoopVal = 0;
1128 unsigned NumPhis = getStagesForPhi(PhiDef);
1129 if (NumPhis > StageNum)
1131 for (
unsigned np = 0; np <= NumPhis; ++np) {
1133 getPrevMapVal(StageNum - np, PhiStage, LoopVal, LoopStage, VRMap, BB);
1136 rewriteScheduledInstr(NewBB, InstrMap, StageNum - np, np, &
PHI, PhiDef,
1145void ModuloScheduleExpander::rewriteScheduledInstr(
1147 unsigned PhiNum,
MachineInstr *Phi,
unsigned OldReg,
unsigned NewReg,
1150 int StagePhi = Schedule.
getStage(Phi) + PhiNum;
1165 assert(OrigInstr != InstrMap.end() &&
"Instruction not scheduled.");
1167 int StageSched = Schedule.
getStage(OrigMI);
1168 int CycleSched = Schedule.
getCycle(OrigMI);
1169 unsigned ReplaceReg = 0;
1171 if (StagePhi == StageSched &&
Phi->isPHI()) {
1172 int CyclePhi = Schedule.
getCycle(Phi);
1173 if (PrevReg && InProlog)
1174 ReplaceReg = PrevReg;
1175 else if (PrevReg && !isLoopCarried(*Phi) &&
1176 (CyclePhi <= CycleSched || OrigMI->isPHI()))
1177 ReplaceReg = PrevReg;
1179 ReplaceReg = NewReg;
1183 if (!InProlog && StagePhi + 1 == StageSched && !isLoopCarried(*Phi))
1184 ReplaceReg = NewReg;
1185 if (StagePhi > StageSched &&
Phi->isPHI())
1186 ReplaceReg = NewReg;
1187 if (!InProlog && !
Phi->isPHI() && StagePhi < StageSched)
1188 ReplaceReg = NewReg;
1191 MRI.constrainRegClass(ReplaceReg,
MRI.getRegClass(OldReg));
1193 UseOp.setReg(ReplaceReg);
1195 Register SplitReg =
MRI.createVirtualRegister(
MRI.getRegClass(OldReg));
1199 UseOp.setReg(SplitReg);
1205bool ModuloScheduleExpander::isLoopCarried(
MachineInstr &Phi) {
1208 int DefCycle = Schedule.
getCycle(&Phi);
1209 int DefStage = Schedule.
getStage(&Phi);
1211 unsigned InitVal = 0;
1212 unsigned LoopVal = 0;
1215 if (!
Use ||
Use->isPHI())
1219 return (LoopCycle > DefCycle) || (LoopStage <= DefStage);
1234 bool Changed =
true;
1239 if (
MRI.use_empty(
MI.getOperand(0).getReg())) {
1242 MI.eraseFromParent();
1244 }
else if (!KeepSingleSrcPhi &&
MI.getNumExplicitOperands() == 3) {
1246 MRI.constrainRegClass(
MI.getOperand(1).getReg(),
1247 MRI.getRegClass(
MI.getOperand(0).getReg()));
1248 assert(ConstrainRegClass &&
1249 "Expected a valid constrained register class!");
1250 (void)ConstrainRegClass;
1251 MRI.replaceRegWith(
MI.getOperand(0).getReg(),
1252 MI.getOperand(1).getReg());
1255 MI.eraseFromParent();
1264class KernelRewriter {
1300 : S(S), BB(LoopBB), PreheaderBB(
L.getLoopPreheader()),
1301 ExitBB(
L.getExitBlock()),
MRI(BB->
getParent()->getRegInfo()),
1302 TII(BB->
getParent()->getSubtarget().getInstrInfo()), LIS(LIS) {
1304 if (PreheaderBB == BB)
1308void KernelRewriter::rewrite() {
1318 if (
MI->getParent())
1319 MI->removeFromParent();
1324 assert(FirstMI &&
"Failed to find first MI in schedule");
1331 (
I++)->eraseFromParent();
1336 if (
MI.isPHI() ||
MI.isTerminator())
1345 EliminateDeadPhis(BB,
MRI, LIS);
1351 for (
auto MI = BB->getFirstNonPHI();
MI != BB->end(); ++
MI) {
1360 if (
MI.getParent() != BB) {
1381 int ProducerStage = S.
getStage(Producer);
1382 assert(ConsumerStage != -1 &&
1383 "In-loop consumer should always be scheduled!");
1384 assert(ConsumerStage >= ProducerStage);
1385 unsigned StageDiff = ConsumerStage - ProducerStage;
1387 for (
unsigned I = 0;
I < StageDiff; ++
I)
1397 while (LoopProducer->isPHI() && LoopProducer->getParent() == BB) {
1400 LoopProducer =
MRI.getUniqueVRegDef(LoopReg);
1403 int LoopProducerStage = S.
getStage(LoopProducer);
1405 std::optional<Register> IllegalPhiDefault;
1407 if (LoopProducerStage == -1) {
1409 }
else if (LoopProducerStage > ConsumerStage) {
1415 int LoopProducerCycle = S.
getCycle(LoopProducer);
1418 assert(LoopProducerCycle <= ConsumerCycle);
1419 assert(LoopProducerStage == ConsumerStage + 1);
1426 IllegalPhiDefault = Defaults.
front();
1429 assert(ConsumerStage >= LoopProducerStage);
1430 int StageDiff = ConsumerStage - LoopProducerStage;
1431 if (StageDiff > 0) {
1433 <<
" to " << (Defaults.
size() + StageDiff) <<
"\n");
1438 Defaults.
empty() ? std::optional<Register>()
1444 auto DefaultI = Defaults.
rbegin();
1445 while (DefaultI != Defaults.
rend())
1446 LoopReg =
phi(LoopReg, *DefaultI++,
MRI.getRegClass(Reg));
1448 if (IllegalPhiDefault) {
1454 auto RC =
MRI.getRegClass(Reg);
1458 .
addReg(*IllegalPhiDefault)
1464 S.
setStage(IllegalPhi, LoopProducerStage);
1471Register KernelRewriter::phi(
Register LoopReg, std::optional<Register> InitReg,
1475 auto I = Phis.find({LoopReg, *InitReg});
1476 if (
I != Phis.end())
1479 for (
auto &KV : Phis) {
1480 if (KV.first.first == LoopReg)
1487 auto I = UndefPhis.find(LoopReg);
1488 if (
I != UndefPhis.end()) {
1496 MI->getOperand(1).setReg(*InitReg);
1497 Phis.insert({{LoopReg, *InitReg},
R});
1499 MRI.constrainRegClass(R,
MRI.getRegClass(*InitReg));
1500 assert(ConstrainRegClass &&
"Expected a valid constrained register class!");
1501 (void)ConstrainRegClass;
1508 RC =
MRI.getRegClass(LoopReg);
1512 MRI.constrainRegClass(R,
MRI.getRegClass(*InitReg));
1513 assert(ConstrainRegClass &&
"Expected a valid constrained register class!");
1514 (void)ConstrainRegClass;
1517 .
addReg(InitReg ? *InitReg : undef(RC))
1522 UndefPhis[LoopReg] =
R;
1524 Phis[{LoopReg, *InitReg}] =
R;
1534 R =
MRI.createVirtualRegister(RC);
1537 TII->get(TargetOpcode::IMPLICIT_DEF), R);
1546class KernelOperandInfo {
1559 while (isRegInLoop(MO)) {
1561 if (
MI->isFullCopy()) {
1562 MO = &
MI->getOperand(1);
1569 MO = &
MI->getOperand(3);
1574 MO =
MI->getOperand(2).getMBB() == BB ? &
MI->getOperand(1)
1575 : &
MI->getOperand(3);
1582 return PhiDefaults.
size() ==
Other.PhiDefaults.size();
1586 OS <<
"use of " << *
Source <<
": distance(" << PhiDefaults.
size() <<
") in "
1593 MRI.getVRegDef(MO->
getReg())->getParent() == BB;
1605 for (
auto I =
BB->
begin(), NI = NewBB->
begin(); !
I->isTerminator();
1621 if (Stage == -1 || Stage >= MinStage)
1634 for (
auto &Sub : Subs)
1635 Sub.first->substituteRegister(DefMO.getReg(), Sub.second, 0,
1640 MI->eraseFromParent();
1670 MI.removeFromParent();
1674 BlockMIs.erase({SourceBB, KernelMI});
1684 assert(Def->findRegisterDefOperandIdx(
MI.getOperand(1).getReg(),
1687 MI.getOperand(0).setReg(PhiReg);
1691 for (
auto *
P : PhiToDelete)
1692 P->eraseFromParent();
1698 DestBB->
insert(InsertPt, NewMI);
1699 Register OrigR = Phi->getOperand(0).getReg();
1720 if (
Use &&
Use->isPHI() &&
Use->getParent() == SourceBB) {
1735 for (
unsigned I = 0;
I < distance; ++
I) {
1738 unsigned LoopRegIdx = 3, InitRegIdx = 1;
1744 return CanonicalUseReg;
1769 EliminateDeadPhis(ExitingBB,
MRI,
LIS,
true);
1792 EliminateDeadPhis(
B,
MRI,
LIS,
true);
1796 for (
size_t I = 0;
I <
Epilogs.size();
I++) {
1798 for (
size_t J =
I; J <
Epilogs.size(); J++) {
1802 for (
size_t K = Iteration; K >
I; K--)
1816 for (; PI !=
Prologs.end(); ++PI, ++EI) {
1818 (*PI)->addSuccessor(*EI);
1822 if (
Use &&
Use->getParent() == Pred) {
1824 if (CanonicalUse->
isPHI()) {
1845 for (
auto I =
B->instr_rbegin();
1846 I != std::next(
B->getFirstNonPHI()->getReverseIterator());) {
1854 MI->eraseFromParent();
1860 EliminateDeadPhis(
B,
MRI,
LIS);
1861 EliminateDeadPhis(ExitingBB,
MRI,
LIS);
1880 if (
Use.getParent() !=
BB)
1883 Use->substituteRegister(OldR, R, 0,
1892 Exit->replacePhiUsesWith(
BB, NewBB);
1899 assert(CanAnalyzeBr &&
"Must be able to analyze the loop branch!");
1911 unsigned OpIdx =
MI->findRegisterDefOperandIdx(Reg,
nullptr);
1923 R =
MI->getOperand(1).getReg();
1928 MI->getOperand(0).setReg(PhiR);
1934 if (Stage == -1 ||
LiveStages.count(
MI->getParent()) == 0 ||
1949 for (
auto &Sub : Subs)
1950 Sub.first->substituteRegister(DefMO.getReg(), Sub.second, 0,
1955 MI->eraseFromParent();
1960 bool KernelDisposed =
false;
1969 std::optional<bool> StaticallyGreater =
1971 if (!StaticallyGreater) {
1975 }
else if (*StaticallyGreater ==
false) {
1979 Prolog->removeSuccessor(Fallthrough);
1985 KernelDisposed =
true;
1997 if (!KernelDisposed) {
2028 std::string ScheduleDump;
2035 assert(
LIS &&
"Requires LiveIntervals!");
2040 if (!ExpandedKernel) {
2058 IllegalPhis.
insert(&*NI);
2064 auto OI = ExpandedKernel->
begin();
2066 for (; !OI->isTerminator() && !NI->isTerminator(); ++OI, ++NI) {
2067 while (OI->isPHI() || OI->isFullCopy())
2069 while (NI->isPHI() || NI->isFullCopy())
2071 assert(OI->getOpcode() == NI->getOpcode() &&
"Opcodes don't match?!");
2073 for (
auto OOpI = OI->operands_begin(), NOpI = NI->operands_begin();
2074 OOpI != OI->operands_end(); ++OOpI, ++NOpI)
2076 KernelOperandInfo(&*NOpI,
MRI, IllegalPhis));
2080 for (
auto &OldAndNew : KOIs) {
2081 if (OldAndNew.first == OldAndNew.second)
2084 errs() <<
"Modulo kernel validation error: [\n";
2085 errs() <<
" [golden] ";
2086 OldAndNew.first.print(
errs());
2088 OldAndNew.second.print(
errs());
2093 errs() <<
"Golden reference kernel:\n";
2095 errs() <<
"New kernel:\n";
2097 errs() << ScheduleDump;
2099 "Modulo kernel validation (-pipeliner-experimental-cg) failed");
2122 if (Exit->pred_size() == 1)
2137 else if (FBB ==
Loop)
2143 Loop->replaceSuccessor(Exit, NewExit);
2144 TII->insertUnconditionalBranch(*NewExit, Exit,
DebugLoc());
2147 Exit->replacePhiUsesWith(
Loop, NewExit);
2157 InstrMapTy &LastStage0Insts,
2161 LoopInfo->createRemainingIterationsGreaterCondition(RequiredTC,
MBB,
Cond,
2180void ModuloScheduleExpanderMVE::generatePipelinedLoop() {
2283 Prolog->addSuccessor(NewKernel);
2288 Epilog->addSuccessor(NewPreheader);
2289 Epilog->addSuccessor(NewExit);
2291 InstrMapTy LastStage0Insts;
2292 insertCondBranch(*Check, Schedule.
getNumStages() + NumUnroll - 2,
2293 LastStage0Insts, *Prolog, *NewPreheader);
2298 generateProlog(PrologVRMap);
2299 generateKernel(PrologVRMap, KernelVRMap, LastStage0Insts);
2300 generateEpilog(KernelVRMap, EpilogVRMap, LastStage0Insts);
2304void ModuloScheduleExpanderMVE::updateInstrUse(
2315 if (!UseMO.isReg() || !UseMO.getReg().isVirtual())
2320 if (!DefInst || DefInst->
getParent() != OrigKernel)
2322 unsigned InitReg = 0;
2323 unsigned DefReg = OrigReg;
2324 if (DefInst->
isPHI()) {
2327 getPhiRegs(*DefInst, OrigKernel, InitReg, LoopReg);
2330 DefInst =
MRI.getVRegDef(LoopReg);
2332 unsigned DefStageNum = Schedule.
getStage(DefInst);
2333 DiffStage += StageNum - DefStageNum;
2335 if (PhaseNum >= DiffStage && CurVRMap[PhaseNum - DiffStage].
count(DefReg))
2337 NewReg = CurVRMap[PhaseNum - DiffStage][DefReg];
2338 else if (!PrevVRMap)
2347 NewReg = (*PrevVRMap)[PrevVRMap->
size() - (DiffStage - PhaseNum)][DefReg];
2350 MRI.constrainRegClass(NewReg,
MRI.getRegClass(OrigReg));
2352 UseMO.setReg(NewReg);
2354 Register SplitReg =
MRI.createVirtualRegister(
MRI.getRegClass(OrigReg));
2355 BuildMI(*OrigKernel,
MI,
MI->getDebugLoc(), TII->
get(TargetOpcode::COPY),
2358 UseMO.setReg(SplitReg);
2367 unsigned InitVal, LoopVal;
2376void ModuloScheduleExpanderMVE::generatePhi(
2381 int StageNum = Schedule.
getStage(OrigMI);
2383 if (Schedule.
getNumStages() - NumUnroll + UnrollNum - 1 >= StageNum)
2384 UsePrologReg =
true;
2385 else if (Schedule.
getNumStages() - NumUnroll + UnrollNum == StageNum)
2386 UsePrologReg =
false;
2422 if (!DefMO.isReg() || DefMO.isDead())
2425 auto NewReg = KernelVRMap[UnrollNum].find(OrigReg);
2426 if (NewReg == KernelVRMap[UnrollNum].
end())
2430 int PrologNum = Schedule.
getNumStages() - NumUnroll + UnrollNum - 1;
2431 CorrespondReg = PrologVRMap[PrologNum][OrigReg];
2440 Register PhiReg =
MRI.createVirtualRegister(
MRI.getRegClass(OrigReg));
2442 TII->
get(TargetOpcode::PHI), PhiReg)
2447 PhiVRMap[UnrollNum][OrigReg] = PhiReg;
2453 for (
unsigned Idx = 1;
Idx < Phi.getNumOperands();
Idx += 2) {
2454 if (Phi.getOperand(
Idx).getReg() == OrigReg) {
2455 Phi.getOperand(
Idx).setReg(NewReg);
2456 Phi.getOperand(
Idx + 1).setMBB(NewMBB);
2463void ModuloScheduleExpanderMVE::mergeRegUsesAfterPipeline(
Register OrigReg,
2471 if (
O.getParent()->getParent() != OrigKernel &&
2472 O.getParent()->getParent() != Prolog &&
2473 O.getParent()->getParent() != NewKernel &&
2474 O.getParent()->getParent() != Epilog)
2476 if (
O.getParent()->getParent() == OrigKernel &&
O.getParent()->isPHI())
2482 if (!UsesAfterLoop.
empty()) {
2483 Register PhiReg =
MRI.createVirtualRegister(
MRI.getRegClass(OrigReg));
2485 TII->
get(TargetOpcode::PHI), PhiReg)
2500 if (!LoopPhis.
empty()) {
2502 unsigned InitReg, LoopReg;
2503 getPhiRegs(*Phi, OrigKernel, InitReg, LoopReg);
2504 Register NewInit =
MRI.createVirtualRegister(
MRI.getRegClass(InitReg));
2506 TII->
get(TargetOpcode::PHI), NewInit)
2516void ModuloScheduleExpanderMVE::generateProlog(
2518 PrologVRMap.
clear();
2521 for (
int PrologNum = 0; PrologNum < Schedule.
getNumStages() - 1;
2527 if (StageNum > PrologNum)
2530 updateInstrDef(NewMI, PrologVRMap[PrologNum],
false);
2531 NewMIMap[NewMI] = {PrologNum, StageNum};
2532 Prolog->push_back(NewMI);
2536 for (
auto I : NewMIMap) {
2538 int PrologNum =
I.second.first;
2539 int StageNum =
I.second.second;
2540 updateInstrUse(
MI, StageNum, PrologNum, PrologVRMap,
nullptr);
2544 dbgs() <<
"prolog:\n";
2549void ModuloScheduleExpanderMVE::generateKernel(
2552 KernelVRMap.
clear();
2553 KernelVRMap.
resize(NumUnroll);
2555 PhiVRMap.
resize(NumUnroll);
2558 for (
int UnrollNum = 0; UnrollNum < NumUnroll; ++UnrollNum) {
2564 if (UnrollNum == NumUnroll - 1)
2565 LastStage0Insts[
MI] = NewMI;
2566 updateInstrDef(NewMI, KernelVRMap[UnrollNum],
2567 (UnrollNum == NumUnroll - 1 && StageNum == 0));
2568 generatePhi(
MI, UnrollNum, PrologVRMap, KernelVRMap, PhiVRMap);
2569 NewMIMap[NewMI] = {UnrollNum, StageNum};
2574 for (
auto I : NewMIMap) {
2576 int UnrollNum =
I.second.first;
2577 int StageNum =
I.second.second;
2578 updateInstrUse(
MI, StageNum, UnrollNum, KernelVRMap, &PhiVRMap);
2582 insertCondBranch(*NewKernel, NumUnroll - 1, LastStage0Insts, *NewKernel,
2586 dbgs() <<
"kernel:\n";
2591void ModuloScheduleExpanderMVE::generateEpilog(
2594 EpilogVRMap.
clear();
2597 for (
int EpilogNum = 0; EpilogNum < Schedule.
getNumStages() - 1;
2603 if (StageNum <= EpilogNum)
2606 updateInstrDef(NewMI, EpilogVRMap[EpilogNum], StageNum - 1 == EpilogNum);
2607 NewMIMap[NewMI] = {EpilogNum, StageNum};
2608 Epilog->push_back(NewMI);
2612 for (
auto I : NewMIMap) {
2614 int EpilogNum =
I.second.first;
2615 int StageNum =
I.second.second;
2616 updateInstrUse(
MI, StageNum, EpilogNum, EpilogVRMap, &KernelVRMap);
2623 insertCondBranch(*Epilog, 0, LastStage0Insts, *NewPreheader, *NewExit);
2626 dbgs() <<
"epilog:\n";
2632void ModuloScheduleExpanderMVE::calcNumUnroll() {
2649 int NumUnrollLocal = 1;
2657 if (Inst2Idx[
MI] <= Inst2Idx[
DefMI])
2659 NumUnroll = std::max(NumUnroll, NumUnrollLocal);
2668void ModuloScheduleExpanderMVE::updateInstrDef(
MachineInstr *NewMI,
2678 VRMap[
Reg] = NewReg;
2680 mergeRegUsesAfterPipeline(Reg, NewReg);
2691 generatePipelinedLoop();
2696 if (!L.getExitBlock()) {
2697 LLVM_DEBUG(
dbgs() <<
"Can not apply MVE expander: No single exit block.\n");
2713 if (
Ref.getParent() != BB ||
Ref.isPHI()) {
2714 LLVM_DEBUG(
dbgs() <<
"Can not apply MVE expander: A phi result is "
2715 "referenced outside of the loop or by phi.\n");
2722 unsigned InitVal, LoopVal;
2724 if (!
Register(LoopVal).isVirtual() ||
2725 MRI.getVRegDef(LoopVal)->getParent() != BB) {
2727 dbgs() <<
"Can not apply MVE expander: A phi source value coming "
2728 "from the loop is not defined in the loop.\n");
2731 if (UsedByPhi.
count(LoopVal)) {
2732 LLVM_DEBUG(
dbgs() <<
"Can not apply MVE expander: A value defined in the "
2733 "loop is referenced by two or more phis.\n");
2736 UsedByPhi.
insert(LoopVal);
2775char ModuloScheduleTest::ID = 0;
2778 "Modulo Schedule test pass",
false,
false)
2785 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
2786 for (
auto *L : MLI) {
2787 if (L->getTopBlock() != L->getBottomBlock())
2796 std::pair<StringRef, StringRef> StageAndCycle = getToken(S,
"_");
2797 std::pair<StringRef, StringRef> StageTokenAndValue =
2798 getToken(StageAndCycle.first,
"-");
2799 std::pair<StringRef, StringRef> CycleTokenAndValue =
2800 getToken(StageAndCycle.second,
"-");
2801 if (StageTokenAndValue.first !=
"Stage" ||
2802 CycleTokenAndValue.first !=
"_Cycle") {
2804 "Bad post-instr symbol syntax: see comment in ModuloScheduleTest");
2808 StageTokenAndValue.second.drop_front().getAsInteger(10, Stage);
2809 CycleTokenAndValue.second.drop_front().getAsInteger(10,
Cycle);
2811 dbgs() <<
" Stage=" << Stage <<
", Cycle=" <<
Cycle <<
"\n";
2815 LiveIntervals &LIS = getAnalysis<LiveIntervalsWrapperPass>().getLIS();
2817 dbgs() <<
"--- ModuloScheduleTest running on BB#" << BB->
getNumber() <<
"\n";
2820 std::vector<MachineInstr *> Instrs;
2822 if (
MI.isTerminator())
2824 Instrs.push_back(&
MI);
2826 dbgs() <<
"Parsing post-instr symbol for " <<
MI;
2849 MI->setPostInstrSymbol(MF,
Sym);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static const Function * getParent(const Value *V)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::optional< std::vector< StOtherPiece > > Other
DenseMap< Block *, BlockRelaxAux > Blocks
global merge Global merge function pass
const HexagonInstrInfo * TII
static unsigned getLoopPhiReg(const MachineInstr &Phi, const MachineBasicBlock *LoopBB)
Return the Phi register value that comes the loop block.
static void getPhiRegs(MachineInstr &Phi, MachineBasicBlock *Loop, unsigned &InitVal, unsigned &LoopVal)
Return the register values for the operands of a Phi instruction.
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static MachineBasicBlock * createDedicatedExit(MachineBasicBlock *Loop, MachineBasicBlock *Exit)
Create a dedicated exit for Loop.
static void removePhis(MachineBasicBlock *BB, MachineBasicBlock *Incoming)
Remove the incoming block from the Phis in a basic block.
static unsigned getLoopPhiReg(MachineInstr &Phi, MachineBasicBlock *LoopBB)
Return the Phi register value that comes the loop block.
static void replaceRegUsesAfterLoop(unsigned FromReg, unsigned ToReg, MachineBasicBlock *MBB, MachineRegisterInfo &MRI, LiveIntervals &LIS)
Replace all uses of FromReg that appear outside the specified basic block with ToReg.
static void replacePhiSrc(MachineInstr &Phi, Register OrigReg, Register NewReg, MachineBasicBlock *NewMBB)
static MachineInstr * getLoopPhiUser(Register Reg, MachineBasicBlock *Loop)
Return a phi if Reg is referenced by the phi.
static void parseSymbolString(StringRef S, int &Cycle, int &Stage)
static cl::opt< bool > SwapBranchTargetsMVE("pipeliner-swap-branch-targets-mve", cl::Hidden, cl::init(false), cl::desc("Swap target blocks of a conditional branch for MVE expander"))
static bool hasUseAfterLoop(unsigned Reg, MachineBasicBlock *BB, MachineRegisterInfo &MRI)
Return true if the register has a use that occurs outside the specified loop.
static unsigned getInitPhiReg(MachineInstr &Phi, MachineBasicBlock *LoopBB)
Return the Phi register value that comes from the incoming block.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Implements a dense probed hash-table based set.
A possibly irreducible generalization of a Loop.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Remove the branching code at the end of the specific MBB.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
bool hasInterval(Register Reg) const
void insertMBBInMaps(MachineBasicBlock *MBB)
void RemoveMachineInstrFromMaps(MachineInstr &MI)
LiveInterval & createEmptyInterval(Register Reg)
Interval creation.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
BlockT * getExitBlock() const
If getExitBlocks would return exactly one block, return that block.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
Represents a single loop in the control flow graph.
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void replacePhiUsesWith(MachineBasicBlock *Old, MachineBasicBlock *New)
Update all phi nodes in this basic block to refer to basic block New instead of basic block Old.
instr_iterator instr_begin()
void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New)
Replace successor OLD with NEW and update probability info.
void transferSuccessors(MachineBasicBlock *FromMBB)
Transfers all the successors from MBB to this machine basic block (i.e., copies all the successors Fr...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
reverse_instr_iterator instr_rbegin()
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
succ_iterator succ_begin()
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
reverse_instr_iterator instr_rend()
Instructions::iterator instr_iterator
pred_iterator pred_begin()
void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
instr_iterator instr_end()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< iterator > terminators()
instr_iterator getFirstInstrTerminator()
Same getFirstTerminator but it ignores bundles and return an instr_iterator instead.
Instructions::reverse_iterator reverse_instr_iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
const MachineBasicBlock & front() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
bool memoperands_empty() const
Return true if we don't have any memory operands which described the memory access done by this instr...
void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
void dropMemRefs(MachineFunction &MF)
Clear this MachineInstr's memory reference descriptor list.
iterator_range< mop_iterator > operands()
iterator_range< mop_iterator > defs()
Returns a range over all explicit operands that are register definitions.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
iterator_range< filtered_mop_iterator > all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
MachineBasicBlock * getTopBlock()
Return the "top" block in the loop, which is the first block in the linear layout,...
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void setMBB(MachineBasicBlock *MBB)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0)
defusechain_iterator - This class provides iterator support for machine operands in the function that...
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
use_instr_iterator use_instr_begin(Register RegNo) const
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
static use_instr_iterator use_instr_end()
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
iterator_range< use_instr_iterator > use_instructions(Register Reg) const
const TargetRegisterInfo * getTargetRegisterInfo() const
use_iterator use_begin(Register RegNo) const
static use_iterator use_end()
iterator_range< use_iterator > use_operands(Register Reg) const
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
static bool canApply(MachineLoop &L)
Check if ModuloScheduleExpanderMVE can be applied to L.
The ModuloScheduleExpander takes a ModuloSchedule and expands it in-place, rewriting the old loop and...
MachineBasicBlock * getRewrittenKernel()
Returns the newly rewritten kernel block, or nullptr if this was optimized away.
void cleanup()
Performs final cleanup after expansion.
void expand()
Performs the actual expansion.
void annotate()
Performs the annotation.
Represents a schedule for a single-block loop.
int getNumStages() const
Return the number of stages contained in this schedule, which is the largest stage index + 1.
MachineLoop * getLoop() const
Return the single-block loop being scheduled.
ArrayRef< MachineInstr * > getInstructions()
Return the rescheduled instructions in order.
void print(raw_ostream &OS)
int getCycle(MachineInstr *MI)
Return the cycle that MI is scheduled at, or -1.
void setStage(MachineInstr *MI, int MIStage)
Set the stage of a newly created instruction.
int getStage(MachineInstr *MI)
Return the stage that MI is scheduled in, or -1.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
std::deque< MachineBasicBlock * > PeeledBack
SmallVector< MachineInstr *, 4 > IllegalPhisToDelete
Illegal phis that need to be deleted once we re-link stages.
DenseMap< MachineInstr *, MachineInstr * > CanonicalMIs
CanonicalMIs and BlockMIs form a bidirectional map between any of the loop kernel clones.
SmallVector< MachineBasicBlock *, 4 > Prologs
All prolog and epilog blocks.
MachineBasicBlock * peelKernel(LoopPeelDirection LPD)
Peels one iteration of the rewritten kernel (BB) in the specified direction.
ModuloSchedule & Schedule
std::deque< MachineBasicBlock * > PeeledFront
State passed from peelKernel to peelPrologAndEpilogs().
unsigned getStage(MachineInstr *MI)
Helper to get the stage of an instruction in the schedule.
void rewriteUsesOf(MachineInstr *MI)
Change all users of MI, if MI is predicated out (LiveStages[MI->getParent()] == false).
SmallVector< MachineBasicBlock *, 4 > Epilogs
DenseMap< MachineBasicBlock *, BitVector > AvailableStages
For every block, the stages that are available.
DenseMap< std::pair< MachineBasicBlock *, MachineInstr * >, MachineInstr * > BlockMIs
Register getEquivalentRegisterIn(Register Reg, MachineBasicBlock *BB)
All prolog and epilog blocks are clones of the kernel, so any produced register in one block has an c...
MachineBasicBlock * Preheader
The original loop preheader.
void rewriteKernel()
Converts BB from the original loop body to the rewritten, pipelined steady-state.
DenseMap< MachineInstr *, unsigned > PhiNodeLoopIteration
When peeling the epilogue keep track of the distance between the phi nodes and the kernel.
DenseMap< MachineBasicBlock *, BitVector > LiveStages
For every block, the stages that are produced.
const TargetInstrInfo * TII
void filterInstructions(MachineBasicBlock *MB, int MinStage)
void peelPrologAndEpilogs()
Peel the kernel forwards and backwards to produce prologs and epilogs, and stitch them together.
MachineBasicBlock * BB
The original loop block that gets rewritten in-place.
void fixupBranches()
Insert branches between prologs, kernel and epilogs.
MachineBasicBlock * CreateLCSSAExitingBlock()
Create a poor-man's LCSSA by cloning only the PHIs from the kernel block to a block dominated by all ...
void validateAgainstModuloScheduleExpander()
Runs ModuloScheduleExpander and treats it as a golden input to validate aspects of the code generated...
Register getPhiCanonicalReg(MachineInstr *CanonicalPhi, MachineInstr *Phi)
Helper function to find the right canonical register for a phi instruction coming from a peeled out p...
MachineRegisterInfo & MRI
void moveStageBetweenBlocks(MachineBasicBlock *DestBB, MachineBasicBlock *SourceBB, unsigned Stage)
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator erase(const_iterator CI)
void push_back(const T &Elt)
reverse_iterator rbegin()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
Reverses the branch condition of the specified condition list, returning false on success and true if...
virtual unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const
Remove the branching code at the end of the specific MBB.
virtual std::unique_ptr< PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const
Return true if the instruction contains a base register and offset.
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, MachineBasicBlock *DestBB, const DebugLoc &DL, int *BytesAdded=nullptr) const
virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const
If the instruction is an increment of a constant value, return the amount.
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A Use represents the edge between a Value definition and its users.
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< PhiNode * > Phi
NodeAddr< DefNode * > Def
const_iterator end(StringRef path LLVM_LIFETIME_BOUND)
Get end iterator over path.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
MachineBasicBlock * PeelSingleBlockLoop(LoopPeelDirection Direction, MachineBasicBlock *Loop, MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Peels a single block loop.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
auto reverse(ContainerTy &&C)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Ref
The access may reference the value stored in memory.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
OutputIt copy(R &&Range, OutputIt Out)
void initializeModuloScheduleTestPass(PassRegistry &)
@ LPD_Back
Peel the last iteration of the loop.
@ LPD_Front
Peel the first iteration of the loop.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...