42#define DEBUG_TYPE "si-insert-waitcnts"
45 "Force emit s_waitcnt expcnt(0) instrs");
47 "Force emit s_waitcnt lgkmcnt(0) instrs");
49 "Force emit s_waitcnt vmcnt(0) instrs");
52 "amdgpu-waitcnt-forcezero",
53 cl::desc(
"Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
67 SAMPLE_CNT = NUM_NORMAL_INST_CNTS,
70 NUM_EXTENDED_INST_CNTS,
71 NUM_INST_CNTS = NUM_EXTENDED_INST_CNTS
85auto inst_counter_types(InstCounterType MaxCounter = NUM_INST_CNTS) {
86 return enum_seq(LOAD_CNT, MaxCounter);
89using RegInterval = std::pair<int, int>;
91struct HardwareLimits {
96 unsigned SamplecntMax;
101struct RegisterEncoding {
111 VMEM_SAMPLER_READ_ACCESS,
112 VMEM_BVH_READ_ACCESS,
114 SCRATCH_WRITE_ACCESS,
134enum RegisterMapping {
135 SQ_MAX_PGM_VGPRS = 512,
137 SQ_MAX_PGM_SGPRS = 256,
145 NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS,
166static const unsigned instrsForExtendedCounterTypes[NUM_EXTENDED_INST_CNTS] = {
167 AMDGPU::S_WAIT_LOADCNT, AMDGPU::S_WAIT_DSCNT, AMDGPU::S_WAIT_EXPCNT,
168 AMDGPU::S_WAIT_STORECNT, AMDGPU::S_WAIT_SAMPLECNT, AMDGPU::S_WAIT_BVHCNT,
169 AMDGPU::S_WAIT_KMCNT};
177static bool isNormalMode(InstCounterType MaxCounter) {
178 return MaxCounter == NUM_NORMAL_INST_CNTS;
183 assert(updateVMCntOnly(Inst));
186 return VMEM_NOSAMPLER;
193 return BaseInfo->
BVH ? VMEM_BVH
207 return Wait.StoreCnt;
209 return Wait.SampleCnt;
220 unsigned &WC = getCounterRef(
Wait,
T);
221 WC = std::min(WC, Count);
225 getCounterRef(
Wait,
T) = ~0
u;
229 return getCounterRef(
Wait,
T);
233InstCounterType eventCounter(
const unsigned *masks, WaitEventType E) {
234 for (
auto T : inst_counter_types()) {
235 if (masks[
T] & (1 << E))
249class WaitcntBrackets {
251 WaitcntBrackets(
const GCNSubtarget *SubTarget, InstCounterType MaxCounter,
252 HardwareLimits Limits, RegisterEncoding Encoding,
253 const unsigned *WaitEventMaskForInst,
254 InstCounterType SmemAccessCounter)
255 :
ST(SubTarget), MaxCounter(MaxCounter), Limits(Limits),
256 Encoding(Encoding), WaitEventMaskForInst(WaitEventMaskForInst),
257 SmemAccessCounter(SmemAccessCounter) {}
259 unsigned getWaitCountMax(InstCounterType
T)
const {
262 return Limits.LoadcntMax;
264 return Limits.DscntMax;
266 return Limits.ExpcntMax;
268 return Limits.StorecntMax;
270 return Limits.SamplecntMax;
272 return Limits.BvhcntMax;
274 return Limits.KmcntMax;
281 unsigned getScoreLB(InstCounterType
T)
const {
286 unsigned getScoreUB(InstCounterType
T)
const {
291 unsigned getScoreRange(InstCounterType
T)
const {
292 return getScoreUB(
T) - getScoreLB(
T);
295 unsigned getRegScore(
int GprNo, InstCounterType
T)
const {
296 if (GprNo < NUM_ALL_VGPRS) {
297 return VgprScores[
T][GprNo];
299 assert(
T == SmemAccessCounter);
300 return SgprScores[GprNo - NUM_ALL_VGPRS];
309 bool counterOutOfOrder(InstCounterType
T)
const;
311 void simplifyWaitcnt(InstCounterType
T,
unsigned &Count)
const;
314 void applyWaitcnt(InstCounterType
T,
unsigned Count);
319 unsigned hasPendingEvent()
const {
return PendingEvents; }
320 unsigned hasPendingEvent(WaitEventType E)
const {
321 return PendingEvents & (1 << E);
323 unsigned hasPendingEvent(InstCounterType
T)
const {
324 unsigned HasPending = PendingEvents & WaitEventMaskForInst[
T];
325 assert((HasPending != 0) == (getScoreRange(
T) != 0));
329 bool hasMixedPendingEvents(InstCounterType
T)
const {
330 unsigned Events = hasPendingEvent(
T);
332 return Events & (Events - 1);
335 bool hasPendingFlat()
const {
336 return ((LastFlat[DS_CNT] > ScoreLBs[DS_CNT] &&
337 LastFlat[DS_CNT] <= ScoreUBs[DS_CNT]) ||
338 (LastFlat[LOAD_CNT] > ScoreLBs[LOAD_CNT] &&
339 LastFlat[LOAD_CNT] <= ScoreUBs[LOAD_CNT]));
342 void setPendingFlat() {
343 LastFlat[LOAD_CNT] = ScoreUBs[LOAD_CNT];
344 LastFlat[DS_CNT] = ScoreUBs[DS_CNT];
349 bool hasOtherPendingVmemTypes(
int GprNo, VmemType V)
const {
350 assert(GprNo < NUM_ALL_VGPRS);
351 return VgprVmemTypes[GprNo] & ~(1 <<
V);
354 void clearVgprVmemTypes(
int GprNo) {
355 assert(GprNo < NUM_ALL_VGPRS);
356 VgprVmemTypes[GprNo] = 0;
359 void setStateOnFunctionEntryOrReturn() {
360 setScoreUB(STORE_CNT, getScoreUB(STORE_CNT) + getWaitCountMax(STORE_CNT));
361 PendingEvents |= WaitEventMaskForInst[STORE_CNT];
378 static bool mergeScore(
const MergeInfo &M,
unsigned &Score,
379 unsigned OtherScore);
381 void setScoreLB(InstCounterType
T,
unsigned Val) {
386 void setScoreUB(InstCounterType
T,
unsigned Val) {
393 if (getScoreRange(EXP_CNT) > getWaitCountMax(EXP_CNT))
397 void setRegScore(
int GprNo, InstCounterType
T,
unsigned Val) {
398 if (GprNo < NUM_ALL_VGPRS) {
399 VgprUB = std::max(VgprUB, GprNo);
400 VgprScores[
T][GprNo] = Val;
402 assert(
T == SmemAccessCounter);
403 SgprUB = std::max(SgprUB, GprNo - NUM_ALL_VGPRS);
404 SgprScores[GprNo - NUM_ALL_VGPRS] = Val;
410 unsigned OpNo,
unsigned Val);
413 InstCounterType MaxCounter = NUM_EXTENDED_INST_CNTS;
414 HardwareLimits Limits = {};
415 RegisterEncoding Encoding = {};
416 const unsigned *WaitEventMaskForInst;
417 InstCounterType SmemAccessCounter;
418 unsigned ScoreLBs[NUM_INST_CNTS] = {0};
419 unsigned ScoreUBs[NUM_INST_CNTS] = {0};
420 unsigned PendingEvents = 0;
422 unsigned LastFlat[NUM_INST_CNTS] = {0};
427 unsigned VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS] = {{0}};
430 unsigned SgprScores[SQ_MAX_PGM_SGPRS] = {0};
433 unsigned char VgprVmemTypes[NUM_ALL_VGPRS] = {0};
445class WaitcntGenerator {
450 InstCounterType MaxCounter;
454 WaitcntGenerator() =
default;
455 WaitcntGenerator(
const MachineFunction &MF, InstCounterType MaxCounter)
463 bool isOptNone()
const {
return OptNone; }
477 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
492 virtual const unsigned *getWaitEventMask()
const = 0;
496 virtual AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const = 0;
498 virtual ~WaitcntGenerator() =
default;
501 static constexpr unsigned
502 eventMask(std::initializer_list<WaitEventType> Events) {
504 for (
auto &E : Events)
511class WaitcntGeneratorPreGFX12 :
public WaitcntGenerator {
513 WaitcntGeneratorPreGFX12() =
default;
515 : WaitcntGenerator(MF, NUM_NORMAL_INST_CNTS) {}
518 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
526 const unsigned *getWaitEventMask()
const override {
529 static const unsigned WaitEventMaskForInstPreGFX12[NUM_INST_CNTS] = {
530 eventMask({VMEM_ACCESS, VMEM_READ_ACCESS, VMEM_SAMPLER_READ_ACCESS,
531 VMEM_BVH_READ_ACCESS}),
532 eventMask({SMEM_ACCESS, LDS_ACCESS, GDS_ACCESS, SQ_MESSAGE}),
533 eventMask({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK, EXP_PARAM_ACCESS,
534 EXP_POS_ACCESS, EXP_LDS_ACCESS}),
535 eventMask({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
540 return WaitEventMaskForInstPreGFX12;
546class WaitcntGeneratorGFX12Plus :
public WaitcntGenerator {
548 WaitcntGeneratorGFX12Plus() =
default;
550 InstCounterType MaxCounter)
551 : WaitcntGenerator(MF, MaxCounter) {}
554 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
562 const unsigned *getWaitEventMask()
const override {
565 static const unsigned WaitEventMaskForInstGFX12Plus[NUM_INST_CNTS] = {
566 eventMask({VMEM_ACCESS, VMEM_READ_ACCESS}),
567 eventMask({LDS_ACCESS, GDS_ACCESS}),
568 eventMask({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK, EXP_PARAM_ACCESS,
569 EXP_POS_ACCESS, EXP_LDS_ACCESS}),
570 eventMask({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
571 eventMask({VMEM_SAMPLER_READ_ACCESS}),
572 eventMask({VMEM_BVH_READ_ACCESS}),
573 eventMask({SMEM_ACCESS, SQ_MESSAGE})};
575 return WaitEventMaskForInstGFX12Plus;
595 std::unique_ptr<WaitcntBrackets>
Incoming;
599 InstCounterType SmemAccessCounter;
605 bool ForceEmitZeroWaitcnts;
606 bool ForceEmitWaitcnt[NUM_INST_CNTS];
611 WaitcntGeneratorPreGFX12 WCGPreGFX12;
612 WaitcntGeneratorGFX12Plus WCGGFX12Plus;
614 WaitcntGenerator *WCG =
nullptr;
620 InstCounterType MaxCounter = NUM_NORMAL_INST_CNTS;
626 (void)ForceExpCounter;
627 (void)ForceLgkmCounter;
628 (void)ForceVMCounter;
631 bool shouldFlushVmCnt(
MachineLoop *
ML, WaitcntBrackets &Brackets);
633 WaitcntBrackets &ScoreBrackets);
638 return "SI insert wait instructions";
650 bool isForceEmitWaitcnt()
const {
651 for (
auto T : inst_counter_types())
652 if (ForceEmitWaitcnt[
T])
657 void setForceEmitWaitcnt() {
663 ForceEmitWaitcnt[
EXP_CNT] =
true;
665 ForceEmitWaitcnt[
EXP_CNT] =
false;
670 ForceEmitWaitcnt[DS_CNT] =
true;
671 ForceEmitWaitcnt[KM_CNT] =
true;
673 ForceEmitWaitcnt[DS_CNT] =
false;
674 ForceEmitWaitcnt[KM_CNT] =
false;
679 ForceEmitWaitcnt[LOAD_CNT] =
true;
680 ForceEmitWaitcnt[SAMPLE_CNT] =
true;
681 ForceEmitWaitcnt[BVH_CNT] =
true;
683 ForceEmitWaitcnt[LOAD_CNT] =
false;
684 ForceEmitWaitcnt[SAMPLE_CNT] =
false;
685 ForceEmitWaitcnt[BVH_CNT] =
false;
692 WaitEventType getVmemWaitEventType(
const MachineInstr &Inst)
const {
694 static const WaitEventType VmemReadMapping[NUM_VMEM_TYPES] = {
695 VMEM_READ_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS};
707 return SCRATCH_WRITE_ACCESS;
708 return VMEM_WRITE_ACCESS;
711 return VMEM_READ_ACCESS;
712 return VmemReadMapping[getVmemType(Inst)];
719 WaitcntBrackets &ScoreBrackets,
727 WaitcntBrackets *ScoreBrackets);
729 WaitcntBrackets &ScoreBrackets);
734RegInterval WaitcntBrackets::getRegInterval(
const MachineInstr *
MI,
737 unsigned OpNo)
const {
739 if (!
TRI->isInAllocatableClass(
Op.getReg()))
751 if (
TRI->isVectorRegister(*
MRI,
Op.getReg())) {
752 assert(Reg >= Encoding.VGPR0 && Reg <= Encoding.VGPRL);
755 Result.first += AGPR_OFFSET;
757 }
else if (
TRI->isSGPRReg(*
MRI,
Op.getReg())) {
758 assert(Reg >= Encoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS);
759 Result.first =
Reg - Encoding.SGPR0 + NUM_ALL_VGPRS;
761 Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS);
769 unsigned Size =
TRI->getRegSizeInBits(*RC);
781 assert(
TRI->isVectorRegister(*
MRI,
MI->getOperand(OpNo).getReg()));
783 setRegScore(RegNo, EXP_CNT, Val);
791 InstCounterType
T = eventCounter(WaitEventMaskForInst, E);
793 unsigned UB = getScoreUB(
T);
794 unsigned CurrScore = UB + 1;
800 PendingEvents |= 1 << E;
801 setScoreUB(
T, CurrScore);
811 if (AddrOpIdx != -1) {
812 setExpScore(&Inst,
TII,
TRI,
MRI, AddrOpIdx, CurrScore);
825 AMDGPU::OpName::data1),
830 Inst.
getOpcode() != AMDGPU::DS_CONSUME &&
831 Inst.
getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
834 if (
Op.isReg() && !
Op.isDef() &&
835 TRI->isVectorRegister(*
MRI,
Op.getReg())) {
836 setExpScore(&Inst,
TII,
TRI,
MRI,
I, CurrScore);
840 }
else if (
TII->isFLAT(Inst)) {
852 }
else if (
TII->isMIMG(Inst)) {
854 setExpScore(&Inst,
TII,
TRI,
MRI, 0, CurrScore);
861 }
else if (
TII->isMTBUF(Inst)) {
863 setExpScore(&Inst,
TII,
TRI,
MRI, 0, CurrScore);
865 }
else if (
TII->isMUBUF(Inst)) {
867 setExpScore(&Inst,
TII,
TRI,
MRI, 0, CurrScore);
874 }
else if (
TII->isLDSDIR(Inst)) {
881 if (
TII->isEXP(Inst)) {
900 setExpScore(&Inst,
TII,
TRI,
MRI,
I, CurrScore);
908 if (!
Op.isReg() || !
Op.isDef())
911 if (
T == LOAD_CNT ||
T == SAMPLE_CNT ||
T == BVH_CNT) {
912 if (
Interval.first >= NUM_ALL_VGPRS)
914 if (updateVMCntOnly(Inst)) {
919 VmemType
V = getVmemType(Inst);
921 VgprVmemTypes[RegNo] |= 1 <<
V;
925 setRegScore(RegNo,
T, CurrScore);
929 (
TII->isDS(Inst) ||
TII->mayWriteLDSThroughDMA(Inst))) {
934 if (!
MemOp->isStore() ||
939 auto AAI =
MemOp->getAAInfo();
947 if (!AAI || !AAI.Scope)
949 for (
unsigned I = 0, E = LDSDMAStores.size();
I != E && !Slot; ++
I) {
950 for (
const auto *
MemOp : LDSDMAStores[
I]->memoperands()) {
951 if (
MemOp->isStore() && AAI ==
MemOp->getAAInfo()) {
957 if (Slot || LDSDMAStores.size() == NUM_EXTRA_VGPRS - 1)
959 LDSDMAStores.push_back(&Inst);
960 Slot = LDSDMAStores.size();
963 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS + Slot,
T, CurrScore);
965 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS,
T, CurrScore);
972 for (
auto T : inst_counter_types(MaxCounter)) {
973 unsigned SR = getScoreRange(
T);
977 OS <<
" " << (
ST->hasExtendedWaitCounts() ?
"LOAD" :
"VM") <<
"_CNT("
981 OS <<
" " << (
ST->hasExtendedWaitCounts() ?
"DS" :
"LGKM") <<
"_CNT("
985 OS <<
" EXP_CNT(" << SR <<
"): ";
988 OS <<
" " << (
ST->hasExtendedWaitCounts() ?
"STORE" :
"VS") <<
"_CNT("
992 OS <<
" SAMPLE_CNT(" << SR <<
"): ";
995 OS <<
" BVH_CNT(" << SR <<
"): ";
998 OS <<
" KM_CNT(" << SR <<
"): ";
1001 OS <<
" UNKNOWN(" << SR <<
"): ";
1007 unsigned LB = getScoreLB(
T);
1009 for (
int J = 0; J <= VgprUB; J++) {
1010 unsigned RegScore = getRegScore(J,
T);
1013 unsigned RelScore = RegScore - LB - 1;
1014 if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) {
1015 OS << RelScore <<
":v" << J <<
" ";
1017 OS << RelScore <<
":ds ";
1021 if (
T == SmemAccessCounter) {
1022 for (
int J = 0; J <= SgprUB; J++) {
1023 unsigned RegScore = getRegScore(J + NUM_ALL_VGPRS,
T);
1026 unsigned RelScore = RegScore - LB - 1;
1027 OS << RelScore <<
":s" << J <<
" ";
1039 simplifyWaitcnt(LOAD_CNT,
Wait.LoadCnt);
1040 simplifyWaitcnt(EXP_CNT,
Wait.ExpCnt);
1041 simplifyWaitcnt(DS_CNT,
Wait.DsCnt);
1042 simplifyWaitcnt(STORE_CNT,
Wait.StoreCnt);
1043 simplifyWaitcnt(SAMPLE_CNT,
Wait.SampleCnt);
1044 simplifyWaitcnt(BVH_CNT,
Wait.BvhCnt);
1045 simplifyWaitcnt(KM_CNT,
Wait.KmCnt);
1048void WaitcntBrackets::simplifyWaitcnt(InstCounterType
T,
1049 unsigned &Count)
const {
1053 if (Count >= getScoreRange(
T))
1057void WaitcntBrackets::determineWait(InstCounterType
T,
int RegNo,
1059 unsigned ScoreToWait = getRegScore(RegNo,
T);
1063 const unsigned LB = getScoreLB(
T);
1064 const unsigned UB = getScoreUB(
T);
1065 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
1066 if ((
T == LOAD_CNT ||
T == DS_CNT) && hasPendingFlat() &&
1067 !
ST->hasFlatLgkmVMemCountInOrder()) {
1071 addWait(
Wait,
T, 0);
1072 }
else if (counterOutOfOrder(
T)) {
1076 addWait(
Wait,
T, 0);
1080 unsigned NeededWait = std::min(UB - ScoreToWait, getWaitCountMax(
T) - 1);
1081 addWait(
Wait,
T, NeededWait);
1087 applyWaitcnt(LOAD_CNT,
Wait.LoadCnt);
1088 applyWaitcnt(EXP_CNT,
Wait.ExpCnt);
1089 applyWaitcnt(DS_CNT,
Wait.DsCnt);
1090 applyWaitcnt(STORE_CNT,
Wait.StoreCnt);
1091 applyWaitcnt(SAMPLE_CNT,
Wait.SampleCnt);
1092 applyWaitcnt(BVH_CNT,
Wait.BvhCnt);
1093 applyWaitcnt(KM_CNT,
Wait.KmCnt);
1096void WaitcntBrackets::applyWaitcnt(InstCounterType
T,
unsigned Count) {
1097 const unsigned UB = getScoreUB(
T);
1101 if (counterOutOfOrder(
T))
1103 setScoreLB(
T, std::max(getScoreLB(
T), UB - Count));
1106 PendingEvents &= ~WaitEventMaskForInst[
T];
1112bool WaitcntBrackets::counterOutOfOrder(InstCounterType
T)
const {
1114 if (
T == SmemAccessCounter && hasPendingEvent(SMEM_ACCESS))
1116 return hasMixedPendingEvents(
T);
1126char SIInsertWaitcnts::
ID = 0;
1131 return new SIInsertWaitcnts();
1141 if (NewEnc == MO.
getImm())
1152 case AMDGPU::S_WAIT_LOADCNT:
1154 case AMDGPU::S_WAIT_EXPCNT:
1156 case AMDGPU::S_WAIT_STORECNT:
1158 case AMDGPU::S_WAIT_SAMPLECNT:
1160 case AMDGPU::S_WAIT_BVHCNT:
1162 case AMDGPU::S_WAIT_DSCNT:
1164 case AMDGPU::S_WAIT_KMCNT:
1171bool WaitcntGenerator::promoteSoftWaitCnt(
MachineInstr *Waitcnt)
const {
1185bool WaitcntGeneratorPreGFX12::applyPreexistingWaitcnt(
1186 WaitcntBrackets &ScoreBrackets,
MachineInstr &OldWaitcntInstr,
1189 assert(isNormalMode(MaxCounter));
1197 if (
II.isMetaInstruction())
1201 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
1205 if (Opcode == AMDGPU::S_WAITCNT) {
1206 unsigned IEnc =
II.getOperand(0).getImm();
1209 ScoreBrackets.simplifyWaitcnt(OldWait);
1213 if (WaitcntInstr || (!
Wait.hasWaitExceptStoreCnt() && TrySimplify)) {
1214 II.eraseFromParent();
1219 assert(Opcode == AMDGPU::S_WAITCNT_VSCNT);
1220 assert(
II.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1223 TII->getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1225 ScoreBrackets.simplifyWaitcnt(InstCounterType::STORE_CNT, OldVSCnt);
1226 Wait.StoreCnt = std::min(
Wait.StoreCnt, OldVSCnt);
1228 if (WaitcntVsCntInstr || (!
Wait.hasWaitStoreCnt() && TrySimplify)) {
1229 II.eraseFromParent();
1232 WaitcntVsCntInstr = &
II;
1239 Modified |= promoteSoftWaitCnt(WaitcntInstr);
1241 ScoreBrackets.applyWaitcnt(LOAD_CNT,
Wait.LoadCnt);
1242 ScoreBrackets.applyWaitcnt(EXP_CNT,
Wait.ExpCnt);
1243 ScoreBrackets.applyWaitcnt(DS_CNT,
Wait.DsCnt);
1250 <<
"applyPreexistingWaitcnt\n"
1251 <<
"New Instr at block end: " << *WaitcntInstr <<
'\n'
1252 :
dbgs() <<
"applyPreexistingWaitcnt\n"
1253 <<
"Old Instr: " << *It
1254 <<
"New Instr: " << *WaitcntInstr <<
'\n');
1257 if (WaitcntVsCntInstr) {
1259 AMDGPU::OpName::simm16,
Wait.StoreCnt);
1260 Modified |= promoteSoftWaitCnt(WaitcntVsCntInstr);
1262 ScoreBrackets.applyWaitcnt(STORE_CNT,
Wait.StoreCnt);
1263 Wait.StoreCnt = ~0
u;
1266 ?
dbgs() <<
"applyPreexistingWaitcnt\n"
1267 <<
"New Instr at block end: " << *WaitcntVsCntInstr
1269 :
dbgs() <<
"applyPreexistingWaitcnt\n"
1270 <<
"Old Instr: " << *It
1271 <<
"New Instr: " << *WaitcntVsCntInstr <<
'\n');
1279bool WaitcntGeneratorPreGFX12::createNewWaitcnt(
1283 assert(isNormalMode(MaxCounter));
1290 if (
Wait.hasWaitExceptStoreCnt()) {
1292 [[maybe_unused]]
auto SWaitInst =
1297 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1298 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1301 if (
Wait.hasWaitStoreCnt()) {
1304 [[maybe_unused]]
auto SWaitInst =
1311 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1312 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1319WaitcntGeneratorPreGFX12::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
1324WaitcntGeneratorGFX12Plus::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
1332bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt(
1333 WaitcntBrackets &ScoreBrackets,
MachineInstr &OldWaitcntInstr,
1336 assert(!isNormalMode(MaxCounter));
1345 if (
II.isMetaInstruction())
1354 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
1358 if (Opcode == AMDGPU::S_WAITCNT)
1361 if (Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT) {
1363 TII->getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1366 ScoreBrackets.simplifyWaitcnt(OldWait);
1368 UpdatableInstr = &CombinedLoadDsCntInstr;
1369 }
else if (Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT) {
1371 TII->getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1374 ScoreBrackets.simplifyWaitcnt(OldWait);
1376 UpdatableInstr = &CombinedStoreDsCntInstr;
1381 TII->getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1383 ScoreBrackets.simplifyWaitcnt(CT.value(), OldCnt);
1384 addWait(
Wait, CT.value(), OldCnt);
1385 UpdatableInstr = &WaitInstrs[CT.value()];
1389 if (!*UpdatableInstr) {
1390 *UpdatableInstr = &
II;
1392 II.eraseFromParent();
1397 if (CombinedLoadDsCntInstr) {
1405 if (
Wait.LoadCnt != ~0u &&
Wait.DsCnt != ~0u) {
1408 AMDGPU::OpName::simm16, NewEnc);
1409 Modified |= promoteSoftWaitCnt(CombinedLoadDsCntInstr);
1410 ScoreBrackets.applyWaitcnt(LOAD_CNT,
Wait.LoadCnt);
1411 ScoreBrackets.applyWaitcnt(DS_CNT,
Wait.DsCnt);
1416 ?
dbgs() <<
"applyPreexistingWaitcnt\n"
1417 <<
"New Instr at block end: "
1418 << *CombinedLoadDsCntInstr <<
'\n'
1419 :
dbgs() <<
"applyPreexistingWaitcnt\n"
1420 <<
"Old Instr: " << *It <<
"New Instr: "
1421 << *CombinedLoadDsCntInstr <<
'\n');
1428 if (CombinedStoreDsCntInstr) {
1430 if (
Wait.StoreCnt != ~0u &&
Wait.DsCnt != ~0u) {
1433 AMDGPU::OpName::simm16, NewEnc);
1434 Modified |= promoteSoftWaitCnt(CombinedStoreDsCntInstr);
1435 ScoreBrackets.applyWaitcnt(STORE_CNT,
Wait.StoreCnt);
1436 ScoreBrackets.applyWaitcnt(DS_CNT,
Wait.DsCnt);
1437 Wait.StoreCnt = ~0
u;
1441 ?
dbgs() <<
"applyPreexistingWaitcnt\n"
1442 <<
"New Instr at block end: "
1443 << *CombinedStoreDsCntInstr <<
'\n'
1444 :
dbgs() <<
"applyPreexistingWaitcnt\n"
1445 <<
"Old Instr: " << *It <<
"New Instr: "
1446 << *CombinedStoreDsCntInstr <<
'\n');
1459 if (
Wait.DsCnt != ~0u) {
1468 if (
Wait.LoadCnt != ~0u) {
1469 WaitsToErase.
push_back(&WaitInstrs[LOAD_CNT]);
1470 WaitsToErase.
push_back(&WaitInstrs[DS_CNT]);
1471 }
else if (
Wait.StoreCnt != ~0u) {
1472 WaitsToErase.
push_back(&WaitInstrs[STORE_CNT]);
1473 WaitsToErase.
push_back(&WaitInstrs[DS_CNT]);
1480 (*WI)->eraseFromParent();
1486 for (
auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
1487 if (!WaitInstrs[CT])
1490 unsigned NewCnt = getWait(
Wait, CT);
1491 if (NewCnt != ~0u) {
1493 AMDGPU::OpName::simm16, NewCnt);
1494 Modified |= promoteSoftWaitCnt(WaitInstrs[CT]);
1496 ScoreBrackets.applyWaitcnt(CT, NewCnt);
1497 setNoWait(
Wait, CT);
1500 ?
dbgs() <<
"applyPreexistingWaitcnt\n"
1501 <<
"New Instr at block end: " << *WaitInstrs[CT]
1503 :
dbgs() <<
"applyPreexistingWaitcnt\n"
1504 <<
"Old Instr: " << *It
1505 <<
"New Instr: " << *WaitInstrs[CT] <<
'\n');
1516bool WaitcntGeneratorGFX12Plus::createNewWaitcnt(
1520 assert(!isNormalMode(MaxCounter));
1526 if (
Wait.DsCnt != ~0u) {
1529 if (
Wait.LoadCnt != ~0u) {
1537 }
else if (
Wait.StoreCnt != ~0u) {
1544 Wait.StoreCnt = ~0
u;
1552 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1553 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1560 for (
auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
1561 unsigned Count = getWait(
Wait, CT);
1565 [[maybe_unused]]
auto SWaitInst =
1572 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1573 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1580 unsigned Opc =
MI.getOpcode();
1581 return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
1582 !
MI.getOperand(1).isUndef();
1612bool SIInsertWaitcnts::generateWaitcntInstBefore(
MachineInstr &
MI,
1613 WaitcntBrackets &ScoreBrackets,
1616 setForceEmitWaitcnt();
1618 if (
MI.isMetaInstruction())
1627 if (
MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 ||
1628 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC ||
1629 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL ||
1630 MI.getOpcode() == AMDGPU::BUFFER_GL0_INV ||
1631 MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) {
1638 if (
MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
1639 MI.getOpcode() == AMDGPU::SI_RETURN ||
1640 MI.getOpcode() == AMDGPU::S_SETPC_B64_return ||
1642 Wait =
Wait.combined(WCG->getAllZeroWaitcnt(
false));
1650 else if (
MI.getOpcode() == AMDGPU::S_ENDPGM ||
1651 MI.getOpcode() == AMDGPU::S_ENDPGM_SAVED) {
1653 ScoreBrackets.getScoreRange(STORE_CNT) != 0 &&
1654 !ScoreBrackets.hasPendingEvent(SCRATCH_WRITE_ACCESS))
1658 else if ((
MI.getOpcode() == AMDGPU::S_SENDMSG ||
1659 MI.getOpcode() == AMDGPU::S_SENDMSGHALT) &&
1660 ST->hasLegacyGeometry() &&
1671 if (
MI.modifiesRegister(AMDGPU::EXEC,
TRI)) {
1674 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
1675 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
1676 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
1677 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
1691 if (
MI.getOperand(CallAddrOpIdx).isReg()) {
1692 RegInterval CallAddrOpInterval =
1693 ScoreBrackets.getRegInterval(&
MI,
MRI,
TRI, CallAddrOpIdx);
1695 for (
int RegNo = CallAddrOpInterval.first;
1696 RegNo < CallAddrOpInterval.second; ++RegNo)
1697 ScoreBrackets.determineWait(SmemAccessCounter, RegNo,
Wait);
1701 if (RtnAddrOpIdx != -1) {
1702 RegInterval RtnAddrOpInterval =
1703 ScoreBrackets.getRegInterval(&
MI,
MRI,
TRI, RtnAddrOpIdx);
1705 for (
int RegNo = RtnAddrOpInterval.first;
1706 RegNo < RtnAddrOpInterval.second; ++RegNo)
1707 ScoreBrackets.determineWait(SmemAccessCounter, RegNo,
Wait);
1726 const Value *
Ptr = Memop->getValue();
1727 if (Memop->isStore() && SLoadAddresses.
count(
Ptr)) {
1728 addWait(
Wait, SmemAccessCounter, 0);
1732 unsigned AS = Memop->getAddrSpace();
1736 if (
TII->mayWriteLDSThroughDMA(
MI))
1740 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
1741 bool FoundAliasingStore =
false;
1748 if (
Ptr && Memop->getAAInfo() && Memop->getAAInfo().Scope) {
1749 const auto &LDSDMAStores = ScoreBrackets.getLDSDMAStores();
1750 for (
unsigned I = 0, E = LDSDMAStores.size();
I != E; ++
I) {
1751 if (
MI.mayAlias(AA, *LDSDMAStores[
I],
true)) {
1752 FoundAliasingStore =
true;
1753 ScoreBrackets.determineWait(LOAD_CNT, RegNo +
I + 1,
Wait);
1757 if (!FoundAliasingStore)
1758 ScoreBrackets.determineWait(LOAD_CNT, RegNo,
Wait);
1759 if (Memop->isStore()) {
1760 ScoreBrackets.determineWait(EXP_CNT, RegNo,
Wait);
1765 for (
unsigned I = 0, E =
MI.getNumOperands();
I != E; ++
I) {
1771 if (
Op.isTied() &&
Op.isUse() &&
TII->doesNotReadTiedSource(
MI))
1776 const bool IsVGPR =
TRI->isVectorRegister(*
MRI,
Op.getReg());
1783 if (
Op.isUse() || !updateVMCntOnly(
MI) ||
1784 ScoreBrackets.hasOtherPendingVmemTypes(RegNo,
1786 ScoreBrackets.determineWait(LOAD_CNT, RegNo,
Wait);
1787 ScoreBrackets.determineWait(SAMPLE_CNT, RegNo,
Wait);
1788 ScoreBrackets.determineWait(BVH_CNT, RegNo,
Wait);
1789 ScoreBrackets.clearVgprVmemTypes(RegNo);
1791 if (
Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) {
1792 ScoreBrackets.determineWait(EXP_CNT, RegNo,
Wait);
1794 ScoreBrackets.determineWait(DS_CNT, RegNo,
Wait);
1796 ScoreBrackets.determineWait(SmemAccessCounter, RegNo,
Wait);
1807 if (
TII->isBarrierStart(
MI.getOpcode()) &&
1808 !
ST->hasAutoWaitcntBeforeBarrier() && !
ST->supportsBackOffBarrier()) {
1809 Wait =
Wait.combined(WCG->getAllZeroWaitcnt(
true));
1816 if (ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1822 ScoreBrackets.simplifyWaitcnt(
Wait);
1824 if (ForceEmitZeroWaitcnts)
1825 Wait = WCG->getAllZeroWaitcnt(
false);
1827 if (ForceEmitWaitcnt[LOAD_CNT])
1829 if (ForceEmitWaitcnt[EXP_CNT])
1831 if (ForceEmitWaitcnt[DS_CNT])
1833 if (ForceEmitWaitcnt[SAMPLE_CNT])
1835 if (ForceEmitWaitcnt[BVH_CNT])
1837 if (ForceEmitWaitcnt[KM_CNT])
1841 if (ScoreBrackets.hasPendingEvent(LOAD_CNT))
1843 if (ScoreBrackets.hasPendingEvent(SAMPLE_CNT))
1845 if (ScoreBrackets.hasPendingEvent(BVH_CNT))
1849 return generateWaitcnt(
Wait,
MI.getIterator(), *
MI.getParent(), ScoreBrackets,
1856 WaitcntBrackets &ScoreBrackets,
1860 if (OldWaitcntInstr)
1864 WCG->applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr,
Wait, It);
1868 ScoreBrackets.applyWaitcnt(
Wait);
1871 if (
Wait.ExpCnt != ~0u && It !=
Block.instr_end() &&
1874 TII->getNamedOperand(*It, AMDGPU::OpName::waitexp);
1882 <<
"Update Instr: " << *It);
1885 if (WCG->createNewWaitcnt(
Block, It,
Wait))
1894bool SIInsertWaitcnts::mayAccessVMEMThroughFlat(
const MachineInstr &
MI)
const {
1902 if (
MI.memoperands_empty())
1911 unsigned AS = Memop->getAddrSpace();
1922bool SIInsertWaitcnts::mayAccessLDSThroughFlat(
const MachineInstr &
MI)
const {
1926 if (!
TII->usesLGKM_CNT(
MI))
1930 if (
ST->isTgSplitEnabled())
1935 if (
MI.memoperands_empty())
1940 unsigned AS = Memop->getAddrSpace();
1950bool SIInsertWaitcnts::mayAccessScratchThroughFlat(
1955 if (
TII->isFLATScratch(
MI))
1959 if (
TII->isFLATGlobal(
MI))
1964 if (
MI.memoperands_empty())
1969 unsigned AS = Memop->getAddrSpace();
1970 return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
1976 return Opc == AMDGPU::GLOBAL_INV || Opc == AMDGPU::GLOBAL_WB ||
1977 Opc == AMDGPU::GLOBAL_WBINV;
1980void SIInsertWaitcnts::updateEventWaitcntAfter(
MachineInstr &Inst,
1981 WaitcntBrackets *ScoreBrackets) {
1987 if (
TII->isDS(Inst) &&
TII->usesLGKM_CNT(Inst)) {
1989 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
1990 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, GDS_ACCESS, Inst);
1991 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, GDS_GPR_LOCK, Inst);
1993 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, LDS_ACCESS, Inst);
1995 }
else if (
TII->isFLAT(Inst)) {
2002 int FlatASCount = 0;
2004 if (mayAccessVMEMThroughFlat(Inst)) {
2006 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, getVmemWaitEventType(Inst),
2010 if (mayAccessLDSThroughFlat(Inst)) {
2012 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, LDS_ACCESS, Inst);
2021 if (FlatASCount > 1)
2022 ScoreBrackets->setPendingFlat();
2025 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, getVmemWaitEventType(Inst),
2028 if (
ST->vmemWriteNeedsExpWaitcnt() &&
2030 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, VMW_GPR_LOCK, Inst);
2032 }
else if (
TII->isSMRD(Inst)) {
2033 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, SMEM_ACCESS, Inst);
2034 }
else if (Inst.
isCall()) {
2037 ScoreBrackets->applyWaitcnt(
2038 WCG->getAllZeroWaitcnt(
false));
2039 ScoreBrackets->setStateOnFunctionEntryOrReturn();
2045 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, EXP_LDS_ACCESS, Inst);
2046 }
else if (
TII->isVINTERP(Inst)) {
2047 int64_t
Imm =
TII->getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm();
2048 ScoreBrackets->applyWaitcnt(EXP_CNT, Imm);
2050 unsigned Imm =
TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
2052 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, EXP_PARAM_ACCESS, Inst);
2054 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, EXP_POS_ACCESS, Inst);
2056 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, EXP_GPR_LOCK, Inst);
2059 case AMDGPU::S_SENDMSG:
2060 case AMDGPU::S_SENDMSG_RTN_B32:
2061 case AMDGPU::S_SENDMSG_RTN_B64:
2062 case AMDGPU::S_SENDMSGHALT:
2063 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, SQ_MESSAGE, Inst);
2065 case AMDGPU::S_MEMTIME:
2066 case AMDGPU::S_MEMREALTIME:
2067 case AMDGPU::S_BARRIER_SIGNAL_ISFIRST_M0:
2068 case AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM:
2069 case AMDGPU::S_BARRIER_LEAVE:
2070 case AMDGPU::S_GET_BARRIER_STATE_M0:
2071 case AMDGPU::S_GET_BARRIER_STATE_IMM:
2072 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, SMEM_ACCESS, Inst);
2078bool WaitcntBrackets::mergeScore(
const MergeInfo &M,
unsigned &Score,
2079 unsigned OtherScore) {
2080 unsigned MyShifted = Score <=
M.OldLB ? 0 : Score +
M.MyShift;
2081 unsigned OtherShifted =
2082 OtherScore <=
M.OtherLB ? 0 : OtherScore +
M.OtherShift;
2083 Score = std::max(MyShifted, OtherShifted);
2084 return OtherShifted > MyShifted;
2092bool WaitcntBrackets::merge(
const WaitcntBrackets &
Other) {
2093 bool StrictDom =
false;
2095 VgprUB = std::max(VgprUB,
Other.VgprUB);
2096 SgprUB = std::max(SgprUB,
Other.SgprUB);
2098 for (
auto T : inst_counter_types(MaxCounter)) {
2100 const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[
T];
2101 const unsigned OtherEvents =
Other.PendingEvents & WaitEventMaskForInst[
T];
2102 if (OtherEvents & ~OldEvents)
2104 PendingEvents |= OtherEvents;
2107 const unsigned MyPending = ScoreUBs[
T] - ScoreLBs[
T];
2108 const unsigned OtherPending =
Other.ScoreUBs[
T] -
Other.ScoreLBs[
T];
2109 const unsigned NewUB = ScoreLBs[
T] + std::max(MyPending, OtherPending);
2110 if (NewUB < ScoreLBs[
T])
2114 M.OldLB = ScoreLBs[
T];
2115 M.OtherLB =
Other.ScoreLBs[
T];
2116 M.MyShift = NewUB - ScoreUBs[
T];
2117 M.OtherShift = NewUB -
Other.ScoreUBs[
T];
2119 ScoreUBs[
T] = NewUB;
2121 StrictDom |= mergeScore(M, LastFlat[
T],
Other.LastFlat[
T]);
2123 for (
int J = 0; J <= VgprUB; J++)
2124 StrictDom |= mergeScore(M, VgprScores[
T][J],
Other.VgprScores[
T][J]);
2126 if (
T == SmemAccessCounter) {
2127 for (
int J = 0; J <= SgprUB; J++)
2128 StrictDom |= mergeScore(M, SgprScores[J],
Other.SgprScores[J]);
2132 for (
int J = 0; J <= VgprUB; J++) {
2133 unsigned char NewVmemTypes = VgprVmemTypes[J] |
Other.VgprVmemTypes[J];
2134 StrictDom |= NewVmemTypes != VgprVmemTypes[J];
2135 VgprVmemTypes[J] = NewVmemTypes;
2143 return Opcode == AMDGPU::S_WAITCNT ||
2146 Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT ||
2147 Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT ||
2154 WaitcntBrackets &ScoreBrackets) {
2158 dbgs() <<
"*** Block" <<
Block.getNumber() <<
" ***";
2159 ScoreBrackets.dump();
2165 bool VCCZCorrect =
true;
2166 if (
ST->hasReadVCCZBug()) {
2169 VCCZCorrect =
false;
2170 }
else if (!
ST->partialVCCWritesUpdateVCCZ()) {
2173 VCCZCorrect =
false;
2180 E =
Block.instr_end();
2187 if (!OldWaitcntInstr)
2188 OldWaitcntInstr = &Inst;
2193 bool FlushVmCnt =
Block.getFirstTerminator() == Inst &&
2194 isPreheaderToFlush(
Block, ScoreBrackets);
2197 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr,
2199 OldWaitcntInstr =
nullptr;
2202 bool RestoreVCCZ = !VCCZCorrect &&
readsVCCZ(Inst);
2205 if (
ST->hasReadVCCZBug() || !
ST->partialVCCWritesUpdateVCCZ()) {
2209 if (!
ST->partialVCCWritesUpdateVCCZ())
2210 VCCZCorrect =
false;
2219 if (
ST->hasReadVCCZBug() &&
2220 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
2223 VCCZCorrect =
false;
2231 if (
TII->isSMRD(Inst)) {
2235 if (!Memop->isInvariant()) {
2236 const Value *
Ptr = Memop->getValue();
2240 if (
ST->hasReadVCCZBug()) {
2242 VCCZCorrect =
false;
2246 updateEventWaitcntAfter(Inst, &ScoreBrackets);
2251 ScoreBrackets.simplifyWaitcnt(
Wait);
2253 ScoreBrackets,
nullptr);
2258 ScoreBrackets.dump();
2268 TII->get(
ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
2281 if (
Block.getFirstTerminator() ==
Block.end() &&
2282 isPreheaderToFlush(
Block, ScoreBrackets)) {
2283 if (ScoreBrackets.hasPendingEvent(LOAD_CNT))
2285 if (ScoreBrackets.hasPendingEvent(SAMPLE_CNT))
2287 if (ScoreBrackets.hasPendingEvent(BVH_CNT))
2301 WaitcntBrackets &ScoreBrackets) {
2302 auto [Iterator, IsInserted] = PreheadersToFlush.
try_emplace(&
MBB,
false);
2304 return Iterator->second;
2315 shouldFlushVmCnt(
Loop, ScoreBrackets)) {
2316 Iterator->second =
true;
2323bool SIInsertWaitcnts::isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const {
2337 WaitcntBrackets &Brackets) {
2338 bool HasVMemLoad =
false;
2339 bool HasVMemStore =
false;
2340 bool UsesVgprLoadedOutside =
false;
2346 if (isVMEMOrFlatVMEM(
MI)) {
2350 HasVMemStore =
true;
2352 for (
unsigned I = 0;
I <
MI.getNumOperands();
I++) {
2354 if (!
Op.isReg() || !
TRI->isVectorRegister(*
MRI,
Op.getReg()))
2367 if (Brackets.getRegScore(RegNo, LOAD_CNT) >
2368 Brackets.getScoreLB(LOAD_CNT) ||
2369 Brackets.getRegScore(RegNo, SAMPLE_CNT) >
2370 Brackets.getScoreLB(SAMPLE_CNT) ||
2371 Brackets.getRegScore(RegNo, BVH_CNT) >
2372 Brackets.getScoreLB(BVH_CNT)) {
2373 UsesVgprLoadedOutside =
true;
2379 else if (isVMEMOrFlatVMEM(
MI) &&
MI.mayLoad() &&
Op.isDef())
2390 if (!
ST->hasVscnt() && HasVMemStore && !HasVMemLoad && UsesVgprLoadedOutside)
2392 return HasVMemLoad && UsesVgprLoadedOutside;
2397 TII =
ST->getInstrInfo();
2398 TRI = &
TII->getRegisterInfo();
2401 MLI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
2402 PDT = &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
2403 if (
auto AAR = getAnalysisIfAvailable<AAResultsWrapperPass>())
2404 AA = &AAR->getAAResults();
2408 if (
ST->hasExtendedWaitCounts()) {
2409 MaxCounter = NUM_EXTENDED_INST_CNTS;
2410 WCGGFX12Plus = WaitcntGeneratorGFX12Plus(MF, MaxCounter);
2411 WCG = &WCGGFX12Plus;
2413 MaxCounter = NUM_NORMAL_INST_CNTS;
2414 WCGPreGFX12 = WaitcntGeneratorPreGFX12(MF);
2419 for (
auto T : inst_counter_types())
2420 ForceEmitWaitcnt[
T] =
false;
2422 const unsigned *WaitEventMaskForInst = WCG->getWaitEventMask();
2424 SmemAccessCounter = eventCounter(WaitEventMaskForInst, SMEM_ACCESS);
2426 HardwareLimits Limits = {};
2427 if (
ST->hasExtendedWaitCounts()) {
2440 unsigned NumVGPRsMax =
ST->getAddressableNumVGPRs();
2441 unsigned NumSGPRsMax =
ST->getAddressableNumSGPRs();
2442 assert(NumVGPRsMax <= SQ_MAX_PGM_VGPRS);
2443 assert(NumSGPRsMax <= SQ_MAX_PGM_SGPRS);
2445 RegisterEncoding Encoding = {};
2448 Encoding.VGPRL = Encoding.VGPR0 + NumVGPRsMax - 1;
2451 Encoding.SGPRL = Encoding.SGPR0 + NumSGPRsMax - 1;
2467 I != E && (
I->isPHI() ||
I->isMetaInstruction()); ++
I)
2470 if (
ST->hasExtendedWaitCounts()) {
2473 for (
auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
2474 if (CT == LOAD_CNT || CT == DS_CNT || CT == STORE_CNT)
2478 TII->get(instrsForExtendedCounterTypes[CT]))
2485 auto NonKernelInitialState = std::make_unique<WaitcntBrackets>(
2486 ST, MaxCounter, Limits, Encoding, WaitEventMaskForInst,
2488 NonKernelInitialState->setStateOnFunctionEntryOrReturn();
2489 BlockInfos[&EntryBB].Incoming = std::move(NonKernelInitialState);
2499 std::unique_ptr<WaitcntBrackets> Brackets;
2504 for (
auto BII = BlockInfos.
begin(), BIE = BlockInfos.
end(); BII != BIE;
2507 BlockInfo &BI = BII->second;
2513 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
2515 *Brackets = *BI.Incoming;
2518 Brackets = std::make_unique<WaitcntBrackets>(
2519 ST, MaxCounter, Limits, Encoding, WaitEventMaskForInst,
2522 *Brackets = WaitcntBrackets(ST, MaxCounter, Limits, Encoding,
2523 WaitEventMaskForInst, SmemAccessCounter);
2526 Modified |= insertWaitcntInBlock(MF, *
MBB, *Brackets);
2529 if (Brackets->hasPendingEvent()) {
2530 BlockInfo *MoveBracketsToSucc =
nullptr;
2532 auto SuccBII = BlockInfos.
find(Succ);
2533 BlockInfo &SuccBI = SuccBII->second;
2534 if (!SuccBI.Incoming) {
2535 SuccBI.Dirty =
true;
2538 if (!MoveBracketsToSucc) {
2539 MoveBracketsToSucc = &SuccBI;
2541 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
2543 }
else if (SuccBI.Incoming->merge(*Brackets)) {
2544 SuccBI.Dirty =
true;
2549 if (MoveBracketsToSucc)
2550 MoveBracketsToSucc->Incoming = std::move(Brackets);
2555 if (
ST->hasScalarStores()) {
2557 bool HaveScalarStores =
false;
2561 if (!HaveScalarStores &&
TII->isScalarStore(
MI))
2562 HaveScalarStores =
true;
2564 if (
MI.getOpcode() == AMDGPU::S_ENDPGM ||
2565 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
2570 if (HaveScalarStores) {
2580 bool SeenDCacheWB =
false;
2584 if (
I->getOpcode() == AMDGPU::S_DCACHE_WB)
2585 SeenDCacheWB =
true;
2586 else if (
TII->isScalarStore(*
I))
2587 SeenDCacheWB =
false;
2590 if ((
I->getOpcode() == AMDGPU::S_ENDPGM ||
2591 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
2604 if (
ST->requiresNopBeforeDeallocVGPRs()) {
2609 TII->get(AMDGPU::S_SENDMSG))
2613 ReleaseVGPRInsts.clear();
2614 SLoadAddresses.
clear();
unsigned const MachineRegisterInfo * MRI
Provides AMDGPU specific target descriptions.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
Analysis containing CSE Info
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
std::optional< std::vector< StOtherPiece > > Other
static Function * getFunction(Constant *C)
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
static bool isOptNone(const MachineFunction &MF)
static LoopDeletionResult merge(LoopDeletionResult A, LoopDeletionResult B)
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
std::pair< uint64_t, uint64_t > Interval
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static bool callWaitsOnFunctionReturn(const MachineInstr &MI)
static bool isCacheInvOrWBInst(MachineInstr &Inst)
static bool callWaitsOnFunctionEntry(const MachineInstr &MI)
static bool updateOperandIfDifferent(MachineInstr &MI, uint16_t OpName, unsigned NewEnc)
static bool isWaitInstr(MachineInstr &Inst)
static std::optional< InstCounterType > counterTypeForInstr(unsigned Opcode)
Determine if MI is a gfx12+ single-counter S_WAIT_*CNT instruction, and if so, which counter it is wa...
static bool readsVCCZ(const MachineInstr &MI)
static cl::opt< bool > ForceEmitZeroFlag("amdgpu-waitcnt-forcezero", cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), cl::init(false), cl::Hidden)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Provides some synthesis utilities to produce sequences of values.
static const uint32_t IV[8]
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool isEntryFunction() const
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This class represents an Operation in the Expression.
static bool isCounterSet(unsigned ID)
static bool shouldExecute(unsigned CounterName)
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
bool erase(const KeyT &Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
FunctionPass class - This class is used to implement most global optimizations.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
const MachineBasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Instructions::iterator instr_iterator
iterator_range< succ_iterator > successors()
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements a map that also provides access to all stored values in a deterministic order.
iterator find(const KeyT &Key)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
static bool isVMEM(const MachineInstr &MI)
static bool isFLATScratch(const MachineInstr &MI)
static bool isEXP(const MachineInstr &MI)
static bool mayWriteLDSThroughDMA(const MachineInstr &MI)
static bool isVIMAGE(const MachineInstr &MI)
static bool isLDSDIR(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
static bool isFLATGlobal(const MachineInstr &MI)
static bool isVSAMPLE(const MachineInstr &MI)
static bool isAtomicRet(const MachineInstr &MI)
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static bool isVINTERP(const MachineInstr &MI)
static bool isMIMG(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isAtomicNoRet(const MachineInstr &MI)
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
LLVM Value Representation.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
@ ID_DEALLOC_VGPRS_GFX11Plus
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
unsigned getStorecntBitMask(const IsaVersion &Version)
IsaVersion getIsaVersion(StringRef GPU)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
unsigned getSamplecntBitMask(const IsaVersion &Version)
unsigned getKmcntBitMask(const IsaVersion &Version)
unsigned getVmcntBitMask(const IsaVersion &Version)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, uint64_t NamedIdx)
unsigned getLgkmcntBitMask(const IsaVersion &Version)
unsigned getBvhcntBitMask(const IsaVersion &Version)
unsigned getExpcntBitMask(const IsaVersion &Version)
unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool getMUBUFIsBufferInv(unsigned Opc)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
unsigned getLoadcntBitMask(const IsaVersion &Version)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
unsigned getDscntBitMask(const IsaVersion &Version)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
static StringRef getCPU(StringRef CPU)
Processes a CPU name.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
char & SIInsertWaitcntsID
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
CodeGenOptLevel
Code generation optimization level.
FunctionPass * createSIInsertWaitcntsPass()
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static constexpr bool is_iterable