87#define DEBUG_TYPE "si-wqm"
96 StateStrict = StateStrictWWM | StateStrictWQM,
103 explicit PrintState(
int State) : State(State) {}
109 static const std::pair<char, const char *> Mapping[] = {
110 std::pair(StateWQM,
"WQM"), std::pair(StateStrictWWM,
"StrictWWM"),
111 std::pair(StateStrictWQM,
"StrictWQM"), std::pair(StateExact,
"Exact")};
112 char State = PS.State;
113 for (
auto M : Mapping) {
114 if (State & M.first) {
137 char InitialState = 0;
138 bool NeedsLowering =
false;
164 unsigned AndSaveExecOpc;
165 unsigned AndSaveExecTermOpc;
185 std::vector<WorkItem> &Worklist);
187 unsigned SubReg,
char Flag, std::vector<WorkItem> &Worklist);
189 std::vector<WorkItem> &Worklist);
191 std::vector<WorkItem> &Worklist);
192 char scanInstructions(
MachineFunction &MF, std::vector<WorkItem> &Worklist);
193 void propagateInstruction(
MachineInstr &
MI, std::vector<WorkItem> &Worklist);
208 Register SaveOrig,
char StrictStateNeeded);
211 char NonStrictState,
char CurrentStrictState);
222 bool lowerLiveMaskQueries();
223 bool lowerCopyInstrs();
224 bool lowerKillInstrs(
bool IsWQM);
250 MachineFunctionProperties::Property::IsSSA);
256char SIWholeQuadMode::ID = 0;
269 return new SIWholeQuadMode;
274 for (
const auto &BII :
Blocks) {
277 <<
" InNeeds = " << PrintState(BII.second.InNeeds)
278 <<
", Needs = " << PrintState(BII.second.Needs)
279 <<
", OutNeeds = " << PrintState(BII.second.OutNeeds) <<
"\n\n";
282 auto III = Instructions.find(&
MI);
283 if (III != Instructions.end()) {
284 dbgs() <<
" " <<
MI <<
" Needs = " << PrintState(III->second.Needs)
285 <<
", OutNeeds = " << PrintState(III->second.OutNeeds) <<
'\n';
293 std::vector<WorkItem> &Worklist) {
296 assert(!(Flag & StateExact) && Flag != 0);
302 Flag &= ~II.Disabled;
306 if ((
II.Needs & Flag) == Flag)
311 Worklist.emplace_back(&
MI);
317 std::vector<WorkItem> &Worklist) {
329 : (
Reg.isVirtual() ?
MRI->getMaxLaneMaskForVReg(Reg)
341 :
Phi(
Phi), PredIdx(PredIdx), DefinedLanes(DefinedLanes) {}
343 using VisitKey = std::pair<const VNInfo *, LaneBitmask>;
347 unsigned NextPredIdx = 0;
349 const VNInfo *NextValue =
nullptr;
350 const VisitKey
Key(
Value, DefinedLanes);
352 if (Visited.
insert(Key).second) {
357 if (
Value->isPHIDef()) {
360 assert(
MBB &&
"Phi-def has no defining MBB");
363 unsigned Idx = NextPredIdx;
366 for (; PI != PE && !NextValue; ++PI, ++
Idx) {
368 if (!Visited.
count(VisitKey(VN, DefinedLanes)))
378 assert(
MI &&
"Def has no defining instruction");
380 if (
Reg.isVirtual()) {
384 if (
Op.getReg() != Reg)
390 :
TRI->getSubRegIndexLaneMask(
Op.getSubReg());
394 HasDef |= Overlap.
any();
397 DefinedLanes |= OpLanes;
401 if ((DefinedLanes & UseLanes) != UseLanes) {
405 if (!Visited.
count(VisitKey(VN, DefinedLanes)))
412 markInstruction(*
MI, Flag, Worklist);
415 markInstruction(*
MI, Flag, Worklist);
419 if (!NextValue && !PhiStack.
empty()) {
422 NextValue =
Entry.Phi;
423 NextPredIdx =
Entry.PredIdx;
424 DefinedLanes =
Entry.DefinedLanes;
434 std::vector<WorkItem> &Worklist) {
441 case AMDGPU::EXEC_LO:
449 if (
Reg.isVirtual()) {
451 markDefs(
MI, LR, Reg,
Op.getSubReg(), Flag, Worklist);
460 markDefs(
MI, LR, Unit, AMDGPU::NoSubRegister, Flag, Worklist);
466void SIWholeQuadMode::markInstructionUses(
const MachineInstr &
MI,
char Flag,
467 std::vector<WorkItem> &Worklist) {
468 LLVM_DEBUG(
dbgs() <<
"markInstructionUses " << PrintState(Flag) <<
": "
472 markOperand(
MI,
Use, Flag, Worklist);
478 std::vector<WorkItem> &Worklist) {
479 char GlobalFlags = 0;
483 bool HasImplicitDerivatives =
496 unsigned Opcode =
MI.getOpcode();
499 if (
TII->isWQM(Opcode)) {
504 if (
ST->hasExtendedImageInsts() && HasImplicitDerivatives) {
508 markInstructionUses(
MI, StateWQM, Worklist);
509 GlobalFlags |= StateWQM;
511 }
else if (Opcode == AMDGPU::WQM) {
515 LowerToCopyInstrs.push_back(&
MI);
516 }
else if (Opcode == AMDGPU::SOFT_WQM) {
517 LowerToCopyInstrs.push_back(&
MI);
519 }
else if (Opcode == AMDGPU::STRICT_WWM) {
523 markInstructionUses(
MI, StateStrictWWM, Worklist);
524 GlobalFlags |= StateStrictWWM;
525 LowerToMovInstrs.push_back(&
MI);
526 }
else if (Opcode == AMDGPU::STRICT_WQM ||
527 TII->isDualSourceBlendEXP(
MI)) {
531 markInstructionUses(
MI, StateStrictWQM, Worklist);
532 GlobalFlags |= StateStrictWQM;
534 if (Opcode == AMDGPU::STRICT_WQM) {
535 LowerToMovInstrs.push_back(&
MI);
540 BBI.Needs |= StateExact;
541 if (!(BBI.InNeeds & StateExact)) {
542 BBI.InNeeds |= StateExact;
543 Worklist.emplace_back(
MBB);
545 GlobalFlags |= StateExact;
546 III.Disabled = StateWQM | StateStrict;
548 }
else if (Opcode == AMDGPU::LDS_PARAM_LOAD ||
549 Opcode == AMDGPU::DS_PARAM_LOAD ||
550 Opcode == AMDGPU::LDS_DIRECT_LOAD ||
551 Opcode == AMDGPU::DS_DIRECT_LOAD) {
554 III.Needs |= StateStrictWQM;
555 GlobalFlags |= StateStrictWQM;
556 }
else if (Opcode == AMDGPU::V_SET_INACTIVE_B32 ||
557 Opcode == AMDGPU::V_SET_INACTIVE_B64) {
558 III.Disabled = StateStrict;
560 if (Inactive.
isReg()) {
562 LowerToCopyInstrs.push_back(&
MI);
564 markOperand(
MI, Inactive, StateStrictWWM, Worklist);
568 }
else if (
TII->isDisableWQM(
MI)) {
569 BBI.Needs |= StateExact;
570 if (!(BBI.InNeeds & StateExact)) {
571 BBI.InNeeds |= StateExact;
572 Worklist.emplace_back(
MBB);
574 GlobalFlags |= StateExact;
575 III.Disabled = StateWQM | StateStrict;
576 }
else if (Opcode == AMDGPU::SI_PS_LIVE ||
577 Opcode == AMDGPU::SI_LIVE_MASK) {
578 LiveMaskQueries.push_back(&
MI);
579 }
else if (Opcode == AMDGPU::SI_KILL_I1_TERMINATOR ||
580 Opcode == AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR ||
581 Opcode == AMDGPU::SI_DEMOTE_I1) {
582 KillInstrs.push_back(&
MI);
583 BBI.NeedsLowering =
true;
584 }
else if (Opcode == AMDGPU::SI_INIT_EXEC ||
585 Opcode == AMDGPU::SI_INIT_EXEC_FROM_INPUT) {
586 InitExecInstrs.push_back(&
MI);
587 }
else if (WQMOutputs) {
594 if (
Reg.isPhysical() &&
595 TRI->hasVectorRegisters(
TRI->getPhysRegBaseClass(Reg))) {
603 markInstruction(
MI, Flags, Worklist);
604 GlobalFlags |=
Flags;
613 if (GlobalFlags & StateWQM) {
615 markInstruction(*
MI, StateWQM, Worklist);
617 markInstruction(*
MI, StateWQM, Worklist);
624 std::vector<WorkItem>& Worklist) {
631 if ((
II.OutNeeds & StateWQM) && !(
II.Disabled & StateWQM) &&
632 (
MI.isTerminator() || (
TII->usesVM_CNT(
MI) &&
MI.mayStore()))) {
638 if (
II.Needs & StateWQM) {
639 BI.Needs |= StateWQM;
640 if (!(BI.InNeeds & StateWQM)) {
641 BI.InNeeds |= StateWQM;
642 Worklist.emplace_back(
MBB);
648 char InNeeds = (
II.Needs & ~StateStrict) |
II.OutNeeds;
649 if (!PrevMI->isPHI()) {
651 if ((PrevII.OutNeeds | InNeeds) != PrevII.OutNeeds) {
652 PrevII.OutNeeds |= InNeeds;
653 Worklist.emplace_back(PrevMI);
662 markInstructionUses(
MI,
II.Needs, Worklist);
666 if (
II.Needs & StateStrictWWM)
667 BI.Needs |= StateStrictWWM;
668 if (
II.Needs & StateStrictWQM)
669 BI.Needs |= StateStrictWQM;
673 std::vector<WorkItem>& Worklist) {
680 if ((LastII.OutNeeds | BI.OutNeeds) != LastII.OutNeeds) {
681 LastII.OutNeeds |= BI.OutNeeds;
682 Worklist.emplace_back(LastMI);
688 BlockInfo &PredBI =
Blocks[Pred];
689 if ((PredBI.OutNeeds | BI.InNeeds) == PredBI.OutNeeds)
692 PredBI.OutNeeds |= BI.InNeeds;
693 PredBI.InNeeds |= BI.InNeeds;
694 Worklist.emplace_back(Pred);
699 BlockInfo &SuccBI =
Blocks[Succ];
700 if ((SuccBI.InNeeds | BI.OutNeeds) == SuccBI.InNeeds)
703 SuccBI.InNeeds |= BI.OutNeeds;
704 Worklist.emplace_back(Succ);
709 std::vector<WorkItem> Worklist;
710 char GlobalFlags = scanInstructions(MF, Worklist);
712 while (!Worklist.empty()) {
717 propagateInstruction(*WI.MI, Worklist);
719 propagateBlock(*WI.MBB, Worklist);
728 Register SaveReg =
MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
737 LIS->InsertMachineInstrInMaps(*Save);
738 LIS->InsertMachineInstrInMaps(*Restore);
739 LIS->createAndComputeVirtRegInterval(SaveReg);
750 BB->
splitAt(*TermMI,
true, LIS);
754 unsigned NewOpcode = 0;
756 case AMDGPU::S_AND_B32:
757 NewOpcode = AMDGPU::S_AND_B32_term;
759 case AMDGPU::S_AND_B64:
760 NewOpcode = AMDGPU::S_AND_B64_term;
762 case AMDGPU::S_MOV_B32:
763 NewOpcode = AMDGPU::S_MOV_B32_term;
765 case AMDGPU::S_MOV_B64:
766 NewOpcode = AMDGPU::S_MOV_B64_term;
779 DTUpdates.
push_back({DomTreeT::Insert, SplitBB, Succ});
780 DTUpdates.
push_back({DomTreeT::Delete, BB, Succ});
782 DTUpdates.
push_back({DomTreeT::Insert, BB, SplitBB});
784 MDT->getBase().applyUpdates(DTUpdates);
786 PDT->applyUpdates(DTUpdates);
792 LIS->InsertMachineInstrInMaps(*
MI);
800 assert(LiveMaskReg.isVirtual());
814 switch (
MI.getOperand(2).getImm()) {
816 Opcode = AMDGPU::V_CMP_LG_F32_e64;
819 Opcode = AMDGPU::V_CMP_GE_F32_e64;
822 Opcode = AMDGPU::V_CMP_GT_F32_e64;
825 Opcode = AMDGPU::V_CMP_LE_F32_e64;
828 Opcode = AMDGPU::V_CMP_LT_F32_e64;
831 Opcode = AMDGPU::V_CMP_EQ_F32_e64;
834 Opcode = AMDGPU::V_CMP_O_F32_e64;
837 Opcode = AMDGPU::V_CMP_U_F32_e64;
841 Opcode = AMDGPU::V_CMP_NEQ_F32_e64;
845 Opcode = AMDGPU::V_CMP_NLT_F32_e64;
849 Opcode = AMDGPU::V_CMP_NLE_F32_e64;
853 Opcode = AMDGPU::V_CMP_NGT_F32_e64;
857 Opcode = AMDGPU::V_CMP_NGE_F32_e64;
861 Opcode = AMDGPU::V_CMP_NLG_F32_e64;
873 Register VCC =
ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
906 LIS->ReplaceMachineInstrInMaps(
MI, *VcmpMI);
909 LIS->InsertMachineInstrInMaps(*MaskUpdateMI);
910 LIS->InsertMachineInstrInMaps(*ExecMaskMI);
911 LIS->InsertMachineInstrInMaps(*EarlyTermMI);
912 LIS->InsertMachineInstrInMaps(*NewTerm);
919 assert(LiveMaskReg.isVirtual());
924 const bool IsDemote = IsWQM && (
MI.getOpcode() == AMDGPU::SI_DEMOTE_I1);
926 int64_t KillVal =
MI.getOperand(1).getImm();
933 if (
Op.getImm() == KillVal) {
941 if (
MI.getOpcode() == AMDGPU::SI_DEMOTE_I1) {
942 LIS->RemoveMachineInstrFromMaps(
MI);
947 LIS->ReplaceMachineInstrInMaps(
MI, *NewTerm);
956 TmpReg =
MRI->createVirtualRegister(
TRI->getBoolRC());
957 ComputeKilledMaskMI =
982 LiveMaskWQM =
MRI->createVirtualRegister(
TRI->getBoolRC());
991 unsigned MovOpc =
ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
998 unsigned Opcode = KillVal ? AndN2Opc : AndOpc;
1005 LIS->RemoveMachineInstrFromMaps(
MI);
1010 if (ComputeKilledMaskMI)
1011 LIS->InsertMachineInstrInMaps(*ComputeKilledMaskMI);
1012 LIS->InsertMachineInstrInMaps(*MaskUpdateMI);
1013 LIS->InsertMachineInstrInMaps(*EarlyTermMI);
1015 LIS->InsertMachineInstrInMaps(*WQMMaskMI);
1016 LIS->InsertMachineInstrInMaps(*NewTerm);
1019 LIS->removeInterval(CndReg);
1020 LIS->createAndComputeVirtRegInterval(CndReg);
1023 LIS->createAndComputeVirtRegInterval(TmpReg);
1025 LIS->createAndComputeVirtRegInterval(LiveMaskWQM);
1038 const BlockInfo &BI = BII->second;
1039 if (!BI.NeedsLowering)
1045 char State = BI.InitialState;
1049 if (StateTransition.count(&
MI))
1050 State = StateTransition[&
MI];
1053 switch (
MI.getOpcode()) {
1054 case AMDGPU::SI_DEMOTE_I1:
1055 case AMDGPU::SI_KILL_I1_TERMINATOR:
1056 SplitPoint = lowerKillI1(
MBB,
MI, State == StateWQM);
1058 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
1059 SplitPoint = lowerKillF32(
MBB,
MI);
1069 if (!SplitPoints.
empty()) {
1090 : LIS->getMBBEndIdx(&
MBB);
1092 Last != MBBE ? LIS->getInstructionIndex(*
Last) : LIS->getMBBEndIdx(&
MBB);
1103 if (Next < FirstIdx)
1108 assert(EndMI &&
"Segment does not end on valid instruction");
1112 SlotIndex Next = LIS->getInstructionIndex(*NextI);
1132 bool IsExecDef =
false;
1135 MO.getReg() == AMDGPU::EXEC_LO || MO.getReg() == AMDGPU::EXEC;
1152 assert(LiveMaskReg.isVirtual());
1155 if (!IsTerminator) {
1157 if (FirstTerm !=
MBB.
end()) {
1158 SlotIndex FirstTermIdx = LIS->getInstructionIndex(*FirstTerm);
1160 IsTerminator = BeforeIdx > FirstTermIdx;
1167 unsigned Opcode = IsTerminator ? AndSaveExecTermOpc : AndSaveExecOpc;
1171 unsigned Opcode = IsTerminator ? AndTermOpc : AndOpc;
1177 LIS->InsertMachineInstrInMaps(*
MI);
1178 StateTransition[
MI] = StateExact;
1193 LIS->InsertMachineInstrInMaps(*
MI);
1194 StateTransition[
MI] = StateWQM;
1199 Register SaveOrig,
char StrictStateNeeded) {
1202 assert(StrictStateNeeded == StateStrictWWM ||
1203 StrictStateNeeded == StateStrictWQM);
1205 if (StrictStateNeeded == StateStrictWWM) {
1214 LIS->InsertMachineInstrInMaps(*
MI);
1215 StateTransition[
MI] = StrictStateNeeded;
1220 Register SavedOrig,
char NonStrictState,
1221 char CurrentStrictState) {
1225 assert(CurrentStrictState == StateStrictWWM ||
1226 CurrentStrictState == StateStrictWQM);
1228 if (CurrentStrictState == StateStrictWWM) {
1237 LIS->InsertMachineInstrInMaps(*
MI);
1238 StateTransition[
MI] = NonStrictState;
1246 BlockInfo &BI = BII->second;
1250 if (!IsEntry && BI.Needs == StateWQM && BI.OutNeeds != StateExact) {
1251 BI.InitialState = StateWQM;
1260 bool WQMFromExec = IsEntry;
1261 char State = (IsEntry || !(BI.InNeeds & StateWQM)) ? StateExact : StateWQM;
1262 char NonStrictState = 0;
1268 if (
II != IE &&
II->getOpcode() == AMDGPU::COPY &&
1269 II->getOperand(1).getReg() ==
TRI->getExec())
1284 BI.InitialState = State;
1288 char Needs = StateExact | StateWQM;
1294 if (FirstStrict == IE)
1302 if (
MI.isTerminator() ||
TII->mayReadEXEC(*
MRI,
MI)) {
1305 if (III->second.Needs & StateStrictWWM)
1306 Needs = StateStrictWWM;
1307 else if (III->second.Needs & StateStrictWQM)
1308 Needs = StateStrictWQM;
1309 else if (III->second.Needs & StateWQM)
1312 Needs &= ~III->second.Disabled;
1313 OutNeeds = III->second.OutNeeds;
1318 Needs = StateExact | StateWQM | StateStrict;
1322 if (
MI.isBranch() && OutNeeds == StateExact)
1328 if (BI.OutNeeds & StateWQM)
1330 else if (BI.OutNeeds == StateExact)
1333 Needs = StateWQM | StateExact;
1337 if (!(Needs & State)) {
1339 if (State == StateStrictWWM || Needs == StateStrictWWM ||
1340 State == StateStrictWQM || Needs == StateStrictWQM) {
1342 First = FirstStrict;
1349 bool SaveSCC =
false;
1352 case StateStrictWWM:
1353 case StateStrictWQM:
1357 SaveSCC = (Needs & StateStrict) || ((Needs & StateWQM) && WQMFromExec);
1361 SaveSCC = !(Needs & StateWQM);
1367 char StartState = State & StateStrict ? NonStrictState : State;
1369 StartState == StateWQM && (Needs & StateExact) && !(Needs & StateWQM);
1370 bool ExactToWQM = StartState == StateExact && (Needs & StateWQM) &&
1371 !(Needs & StateExact);
1372 bool PreferLast = Needs == StateWQM;
1377 if ((WQMToExact && (OutNeeds & StateWQM)) || ExactToWQM) {
1379 if (
TII->hasUnwantedEffectsWhenEXECEmpty(*
I)) {
1380 PreferLast = WQMToExact;
1386 prepareInsertion(
MBB,
First,
II, PreferLast, SaveSCC);
1388 if (State & StateStrict) {
1389 assert(State == StateStrictWWM || State == StateStrictWQM);
1390 assert(SavedNonStrictReg);
1391 fromStrictMode(
MBB,
Before, SavedNonStrictReg, NonStrictState, State);
1393 LIS->createAndComputeVirtRegInterval(SavedNonStrictReg);
1394 SavedNonStrictReg = 0;
1395 State = NonStrictState;
1398 if (Needs & StateStrict) {
1399 NonStrictState = State;
1400 assert(Needs == StateStrictWWM || Needs == StateStrictWQM);
1401 assert(!SavedNonStrictReg);
1402 SavedNonStrictReg =
MRI->createVirtualRegister(BoolRC);
1404 toStrictMode(
MBB,
Before, SavedNonStrictReg, Needs);
1408 if (!WQMFromExec && (OutNeeds & StateWQM)) {
1410 SavedWQMReg =
MRI->createVirtualRegister(BoolRC);
1415 }
else if (ExactToWQM) {
1416 assert(WQMFromExec == (SavedWQMReg == 0));
1421 LIS->createAndComputeVirtRegInterval(SavedWQMReg);
1434 if (Needs != (StateExact | StateWQM | StateStrict)) {
1435 if (Needs != (StateExact | StateWQM))
1446 assert(!SavedNonStrictReg);
1449bool SIWholeQuadMode::lowerLiveMaskQueries() {
1458 LIS->ReplaceMachineInstrInMaps(*
MI, *Copy);
1459 MI->eraseFromParent();
1461 return !LiveMaskQueries.empty();
1464bool SIWholeQuadMode::lowerCopyInstrs() {
1466 assert(
MI->getNumExplicitOperands() == 2);
1471 TRI->getRegClassForOperandReg(*
MRI,
MI->getOperand(0));
1472 if (
TRI->isVGPRClass(regClass)) {
1473 const unsigned MovOp =
TII->getMovOpcode(regClass);
1474 MI->setDesc(
TII->get(MovOp));
1479 return MO.isUse() && MO.getReg() == AMDGPU::EXEC;
1485 if (
MI->getOperand(0).isEarlyClobber()) {
1486 LIS->removeInterval(Reg);
1487 MI->getOperand(0).setIsEarlyClobber(
false);
1488 LIS->createAndComputeVirtRegInterval(Reg);
1490 int Index =
MI->findRegisterUseOperandIdx(AMDGPU::EXEC,
nullptr);
1491 while (
Index >= 0) {
1493 Index =
MI->findRegisterUseOperandIdx(AMDGPU::EXEC,
nullptr);
1495 MI->setDesc(
TII->get(AMDGPU::COPY));
1500 if (
MI->getOpcode() == AMDGPU::V_SET_INACTIVE_B32 ||
1501 MI->getOpcode() == AMDGPU::V_SET_INACTIVE_B64) {
1502 assert(
MI->getNumExplicitOperands() == 3);
1506 assert(
MI->getOperand(2).isUndef());
1507 MI->removeOperand(2);
1508 MI->untieRegOperand(1);
1510 assert(
MI->getNumExplicitOperands() == 2);
1513 unsigned CopyOp =
MI->getOperand(1).isReg()
1515 :
TII->getMovOpcode(
TRI->getRegClassForOperandReg(
1516 *
MRI,
MI->getOperand(0)));
1517 MI->setDesc(
TII->get(CopyOp));
1519 return !LowerToCopyInstrs.empty() || !LowerToMovInstrs.empty();
1522bool SIWholeQuadMode::lowerKillInstrs(
bool IsWQM) {
1526 switch (
MI->getOpcode()) {
1527 case AMDGPU::SI_DEMOTE_I1:
1528 case AMDGPU::SI_KILL_I1_TERMINATOR:
1529 SplitPoint = lowerKillI1(*
MBB, *
MI, IsWQM);
1531 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
1532 SplitPoint = lowerKillF32(*
MBB, *
MI);
1538 return !KillInstrs.empty();
1543 bool IsWave32 =
ST->isWave32();
1545 if (
MI.getOpcode() == AMDGPU::SI_INIT_EXEC) {
1549 TII->get(IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
1551 .
addImm(
MI.getOperand(0).getImm());
1553 LIS->RemoveMachineInstrFromMaps(
MI);
1554 LIS->InsertMachineInstrInMaps(*InitMI);
1556 MI.eraseFromParent();
1567 Register InputReg =
MI.getOperand(0).getReg();
1573 if (DefInstr != FirstMI) {
1579 LIS->handleMove(*DefInstr);
1591 Register CountReg =
MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1592 auto BfeMI =
BuildMI(*
MBB, FirstMI,
DL,
TII->get(AMDGPU::S_BFE_U32), CountReg)
1594 .
addImm((
MI.getOperand(1).getImm() & Mask) | 0x70000);
1597 TII->get(IsWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64), Exec)
1600 auto CmpMI =
BuildMI(*
MBB, FirstMI,
DL,
TII->get(AMDGPU::S_CMP_EQ_U32))
1605 TII->get(IsWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
1610 MI.eraseFromParent();
1614 LIS->RemoveMachineInstrFromMaps(
MI);
1615 MI.eraseFromParent();
1617 LIS->InsertMachineInstrInMaps(*BfeMI);
1618 LIS->InsertMachineInstrInMaps(*BfmMI);
1619 LIS->InsertMachineInstrInMaps(*CmpMI);
1620 LIS->InsertMachineInstrInMaps(*CmovMI);
1622 LIS->removeInterval(InputReg);
1623 LIS->createAndComputeVirtRegInterval(InputReg);
1624 LIS->createAndComputeVirtRegInterval(CountReg);
1637 if (
MI->getParent() == &Entry)
1638 InsertPt = std::next(
MI->getIterator());
1649 <<
" ------------- \n");
1654 LiveMaskQueries.clear();
1655 LowerToCopyInstrs.clear();
1656 LowerToMovInstrs.clear();
1658 InitExecInstrs.clear();
1659 StateTransition.clear();
1663 TII =
ST->getInstrInfo();
1664 TRI = &
TII->getRegisterInfo();
1666 LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
1667 auto *MDTWrapper = getAnalysisIfAvailable<MachineDominatorTreeWrapperPass>();
1668 MDT = MDTWrapper ? &MDTWrapper->getDomTree() :
nullptr;
1670 getAnalysisIfAvailable<MachinePostDominatorTreeWrapperPass>();
1671 PDT = PDTWrapper ? &PDTWrapper->getPostDomTree() :
nullptr;
1673 if (
ST->isWave32()) {
1674 AndOpc = AMDGPU::S_AND_B32;
1675 AndTermOpc = AMDGPU::S_AND_B32_term;
1676 AndN2Opc = AMDGPU::S_ANDN2_B32;
1677 XorOpc = AMDGPU::S_XOR_B32;
1678 AndSaveExecOpc = AMDGPU::S_AND_SAVEEXEC_B32;
1679 AndSaveExecTermOpc = AMDGPU::S_AND_SAVEEXEC_B32_term;
1680 WQMOpc = AMDGPU::S_WQM_B32;
1681 Exec = AMDGPU::EXEC_LO;
1683 AndOpc = AMDGPU::S_AND_B64;
1684 AndTermOpc = AMDGPU::S_AND_B64_term;
1685 AndN2Opc = AMDGPU::S_ANDN2_B64;
1686 XorOpc = AMDGPU::S_XOR_B64;
1687 AndSaveExecOpc = AMDGPU::S_AND_SAVEEXEC_B64;
1688 AndSaveExecTermOpc = AMDGPU::S_AND_SAVEEXEC_B64_term;
1689 WQMOpc = AMDGPU::S_WQM_B64;
1690 Exec = AMDGPU::EXEC;
1694 bool Changed =
false;
1702 const bool HasLiveMaskQueries = !LiveMaskQueries.empty();
1703 const bool HasWaveModes = GlobalFlags & ~StateExact;
1704 const bool HasKills = !KillInstrs.empty();
1705 const bool UsesWQM = GlobalFlags & StateWQM;
1706 if (HasKills || UsesWQM || (HasWaveModes && HasLiveMaskQueries)) {
1707 LiveMaskReg =
MRI->createVirtualRegister(
TRI->getBoolRC());
1711 LIS->InsertMachineInstrInMaps(*
MI);
1717 Changed |= lowerLiveMaskQueries();
1718 Changed |= lowerCopyInstrs();
1720 if (!HasWaveModes) {
1722 Changed |= lowerKillInstrs(
false);
1723 }
else if (GlobalFlags == StateWQM) {
1727 LIS->InsertMachineInstrInMaps(*
MI);
1728 lowerKillInstrs(
true);
1733 processBlock(*BII.first, BII.first == &Entry);
1736 lowerBlock(*BII.first);
1741 if (LiveMaskReg != Exec)
1742 LIS->createAndComputeVirtRegInterval(LiveMaskReg);
1747 LIS->removeAllRegUnitsForPhysReg(AMDGPU::SCC);
1750 if (!KillInstrs.empty() || !InitExecInstrs.empty())
1751 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
Provides AMDGPU specific target descriptions.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static void analyzeFunction(Function &Fn, const DataLayout &Layout, FunctionVarLocsBuilder *FnVarLocs)
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
DenseMap< Block *, BlockRelaxAux > Blocks
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static void splitBlock(MachineBasicBlock &MBB, MachineInstr &MI, MachineDominatorTree *MDT)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an Operation in the Expression.
Core dominator tree base class.
FunctionPass class - This class is used to implement most global optimizations.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Result of a LiveRange query.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
This class represents the liveness of a register, stack slot, etc.
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
succ_iterator succ_begin()
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
pred_iterator pred_begin()
MachineBasicBlock * splitAt(MachineInstr &SplitInst, bool UpdateLiveIns=true, LiveIntervals *LIS=nullptr)
Split a basic block into 2 pieces at SplitPoint.
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
iterator_range< pred_iterator > predecessors()
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual MachineFunctionProperties getClearedProperties() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it.
const MachineBasicBlock * getParent() const
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements a map that also provides access to all stored values in a deterministic order.
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
A Use represents the edge between a Value definition and its users.
VNInfo - Value Number Information.
LLVM Value Representation.
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char WavefrontSize[]
Key for Kernel::CodeProps::Metadata::mWavefrontSize.
LLVM_READONLY int getVOPe32(uint16_t Opcode)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ Define
Register definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
FunctionPass * createSIWholeQuadModePass()
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
static constexpr LaneBitmask getAll()
constexpr bool any() const
static constexpr LaneBitmask getNone()
This represents a simple continuous liveness interval for a value.