47#define DEBUG_TYPE "si-insert-waitcnts"
50 "Force emit s_waitcnt expcnt(0) instrs");
52 "Force emit s_waitcnt lgkmcnt(0) instrs");
54 "Force emit s_waitcnt vmcnt(0) instrs");
58 cl::desc(
"Force all waitcnt instrs to be emitted as "
59 "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
63 "amdgpu-waitcnt-load-forcezero",
64 cl::desc(
"Force all waitcnt load counters to wait until 0"),
68 "amdgpu-expert-scheduling-mode",
69 cl::desc(
"Enable expert scheduling mode 2 for all functions (GFX12+ only)"),
117 TRACKINGID_RANGE_LEN = (1 << 16),
122 REGUNITS_END = REGUNITS_BEGIN + TRACKINGID_RANGE_LEN,
127 NUM_LDSDMA = TRACKINGID_RANGE_LEN,
128 LDSDMA_BEGIN = REGUNITS_END,
129 LDSDMA_END = LDSDMA_BEGIN + NUM_LDSDMA,
133static constexpr VMEMID toVMEMID(MCRegUnit RU) {
134 return static_cast<unsigned>(RU);
137#define AMDGPU_DECLARE_WAIT_EVENTS(DECL) \
139 DECL(VMEM_SAMPLER_READ_ACCESS) \
140 DECL(VMEM_BVH_READ_ACCESS) \
141 DECL(GLOBAL_INV_ACCESS) \
142 DECL(VMEM_WRITE_ACCESS) \
143 DECL(SCRATCH_WRITE_ACCESS) \
153 DECL(EXP_POS_ACCESS) \
154 DECL(EXP_PARAM_ACCESS) \
156 DECL(EXP_LDS_ACCESS) \
157 DECL(VGPR_CSMACC_WRITE) \
158 DECL(VGPR_DPMACC_WRITE) \
159 DECL(VGPR_TRANS_WRITE) \
160 DECL(VGPR_XDL_WRITE) \
161 DECL(VGPR_LDS_READ) \
162 DECL(VGPR_FLAT_READ) \
163 DECL(VGPR_VMEM_READ) \
167#define AMDGPU_EVENT_ENUM(Name) Name,
172#undef AMDGPU_EVENT_ENUM
186auto wait_events(WaitEventType MaxEvent = NUM_WAIT_EVENTS) {
187 return enum_seq(VMEM_ACCESS, MaxEvent);
190#define AMDGPU_EVENT_NAME(Name) #Name,
194#undef AMDGPU_EVENT_NAME
195static constexpr StringLiteral getWaitEventTypeName(WaitEventType Event) {
196 return WaitEventTypeName[
Event];
220 AMDGPU::S_WAIT_LOADCNT, AMDGPU::S_WAIT_DSCNT,
221 AMDGPU::S_WAIT_EXPCNT, AMDGPU::S_WAIT_STORECNT,
222 AMDGPU::S_WAIT_SAMPLECNT, AMDGPU::S_WAIT_BVHCNT,
223 AMDGPU::S_WAIT_KMCNT, AMDGPU::S_WAIT_XCNT,
224 AMDGPU::S_WAIT_ASYNCCNT};
238 assert(updateVMCntOnly(Inst));
240 return VMEM_NOSAMPLER;
254 return VMEM_NOSAMPLER;
270 WaitEventSet() =
default;
271 explicit constexpr WaitEventSet(WaitEventType Event) {
272 static_assert(NUM_WAIT_EVENTS <=
sizeof(Mask) * 8,
273 "Not enough bits in Mask for all the events");
276 constexpr WaitEventSet(std::initializer_list<WaitEventType> Events) {
277 for (
auto &
E : Events) {
281 void insert(
const WaitEventType &Event) { Mask |= 1 <<
Event; }
282 void remove(
const WaitEventType &Event) { Mask &= ~(1 <<
Event); }
283 void remove(
const WaitEventSet &
Other) { Mask &= ~Other.Mask; }
284 bool contains(
const WaitEventType &Event)
const {
285 return Mask & (1 <<
Event);
289 return (~Mask &
Other.Mask) == 0;
314 return Mask ==
Other.Mask;
317 bool empty()
const {
return Mask == 0; }
319 bool twoOrMore()
const {
return Mask & (Mask - 1); }
320 operator bool()
const {
return !
empty(); }
321 void print(raw_ostream &OS)
const {
322 ListSeparator
LS(
", ");
323 for (WaitEventType Event : wait_events()) {
325 OS <<
LS << getWaitEventTypeName(Event);
331void WaitEventSet::dump()
const {
336class WaitcntBrackets;
344class WaitcntGenerator {
346 const GCNSubtarget &ST;
347 const SIInstrInfo &
TII;
348 AMDGPU::IsaVersion
IV;
351 bool ExpandWaitcntProfiling =
false;
352 const AMDGPU::HardwareLimits &Limits;
355 WaitcntGenerator() =
delete;
356 WaitcntGenerator(
const WaitcntGenerator &) =
delete;
357 WaitcntGenerator(
const MachineFunction &MF,
359 const AMDGPU::HardwareLimits &Limits)
360 :
ST(MF.getSubtarget<GCNSubtarget>()),
TII(*
ST.getInstrInfo()),
364 ExpandWaitcntProfiling(
365 MF.
getFunction().hasFnAttribute(
"amdgpu-expand-waitcnt-profiling")),
370 bool isOptNone()
const {
return OptNone; }
372 const AMDGPU::HardwareLimits &getLimits()
const {
return Limits; }
386 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
387 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
391 bool promoteSoftWaitCnt(MachineInstr *Waitcnt)
const;
396 virtual bool createNewWaitcnt(MachineBasicBlock &
Block,
398 AMDGPU::Waitcnt
Wait,
399 const WaitcntBrackets &ScoreBrackets) = 0;
402 virtual const WaitEventSet &
418 virtual AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const = 0;
420 virtual ~WaitcntGenerator() =
default;
423class WaitcntGeneratorPreGFX12 final :
public WaitcntGenerator {
424 static constexpr const WaitEventSet
427 {VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS}),
428 WaitEventSet({SMEM_ACCESS, LDS_ACCESS, GDS_ACCESS, SQ_MESSAGE}),
429 WaitEventSet({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK,
430 EXP_PARAM_ACCESS, EXP_POS_ACCESS, EXP_LDS_ACCESS}),
431 WaitEventSet({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
440 using WaitcntGenerator::WaitcntGenerator;
442 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
443 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
446 bool createNewWaitcnt(MachineBasicBlock &
Block,
448 AMDGPU::Waitcnt
Wait,
449 const WaitcntBrackets &ScoreBrackets)
override;
452 return WaitEventMaskForInstPreGFX12[
T];
455 AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const override;
458class WaitcntGeneratorGFX12Plus final :
public WaitcntGenerator {
461 static constexpr const WaitEventSet
463 WaitEventSet({VMEM_ACCESS, GLOBAL_INV_ACCESS}),
464 WaitEventSet({LDS_ACCESS, GDS_ACCESS}),
465 WaitEventSet({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK,
466 EXP_PARAM_ACCESS, EXP_POS_ACCESS, EXP_LDS_ACCESS}),
467 WaitEventSet({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
468 WaitEventSet({VMEM_SAMPLER_READ_ACCESS}),
469 WaitEventSet({VMEM_BVH_READ_ACCESS}),
470 WaitEventSet({SMEM_ACCESS, SQ_MESSAGE, SCC_WRITE}),
471 WaitEventSet({VMEM_GROUP, SMEM_GROUP}),
472 WaitEventSet({ASYNC_ACCESS}),
473 WaitEventSet({VGPR_CSMACC_WRITE, VGPR_DPMACC_WRITE, VGPR_TRANS_WRITE,
475 WaitEventSet({VGPR_LDS_READ, VGPR_FLAT_READ, VGPR_VMEM_READ})};
478 WaitcntGeneratorGFX12Plus() =
delete;
479 WaitcntGeneratorGFX12Plus(
const MachineFunction &MF,
481 const AMDGPU::HardwareLimits &Limits,
483 : WaitcntGenerator(MF, MaxCounter, Limits), IsExpertMode(IsExpertMode) {}
486 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
487 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
490 bool createNewWaitcnt(MachineBasicBlock &
Block,
492 AMDGPU::Waitcnt
Wait,
493 const WaitcntBrackets &ScoreBrackets)
override;
496 return WaitEventMaskForInstGFX12Plus[
T];
499 AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const override;
503struct PreheaderFlushFlags {
504 bool FlushVmCnt =
false;
505 bool FlushDsCnt =
false;
508class SIInsertWaitcnts {
509 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses;
510 DenseMap<MachineBasicBlock *, PreheaderFlushFlags> PreheadersToFlush;
511 MachineLoopInfo &MLI;
512 MachinePostDominatorTree &PDT;
517 std::unique_ptr<WaitcntBrackets> Incoming;
521 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos;
525 std::unique_ptr<WaitcntGenerator> WCG;
528 DenseSet<MachineInstr *> CallInsts;
529 DenseSet<MachineInstr *> ReturnInsts;
534 DenseMap<MachineInstr *, bool> EndPgmInsts;
536 AMDGPU::HardwareLimits Limits;
539 const GCNSubtarget &
ST;
540 const SIInstrInfo &
TII;
541 const SIRegisterInfo &
TRI;
542 const MachineRegisterInfo &MRI;
545 bool IsExpertMode =
false;
547 SIInsertWaitcnts(MachineLoopInfo &MLI, MachinePostDominatorTree &PDT,
549 : MLI(MLI), PDT(PDT), AA(AA), MF(MF),
ST(MF.getSubtarget<GCNSubtarget>()),
550 TII(*
ST.getInstrInfo()),
TRI(
TII.getRegisterInfo()),
551 MRI(MF.getRegInfo()) {
552 (void)ForceExpCounter;
553 (void)ForceLgkmCounter;
554 (void)ForceVMCounter;
557 const AMDGPU::HardwareLimits &getLimits()
const {
return Limits; }
559 PreheaderFlushFlags getPreheaderFlushFlags(MachineLoop *
ML,
560 const WaitcntBrackets &Brackets);
561 PreheaderFlushFlags isPreheaderToFlush(MachineBasicBlock &
MBB,
562 const WaitcntBrackets &ScoreBrackets);
563 bool isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const;
564 bool isDSRead(
const MachineInstr &
MI)
const;
565 bool mayStoreIncrementingDSCNT(
const MachineInstr &
MI)
const;
568 void setForceEmitWaitcnt() {
606 WaitEventType getVmemWaitEventType(
const MachineInstr &Inst)
const {
609 case AMDGPU::GLOBAL_INV:
610 return GLOBAL_INV_ACCESS;
612 case AMDGPU::GLOBAL_WB:
613 case AMDGPU::GLOBAL_WBINV:
614 return VMEM_WRITE_ACCESS;
620 static const WaitEventType VmemReadMapping[NUM_VMEM_TYPES] = {
621 VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS};
630 if (
TII.mayAccessScratch(Inst))
631 return SCRATCH_WRITE_ACCESS;
632 return VMEM_WRITE_ACCESS;
636 return VmemReadMapping[getVmemType(Inst)];
639 std::optional<WaitEventType>
640 getExpertSchedulingEventType(
const MachineInstr &Inst)
const;
642 bool isAsync(
const MachineInstr &
MI)
const {
647 const MachineOperand *
Async =
648 TII.getNamedOperand(
MI, AMDGPU::OpName::IsAsync);
652 bool isNonAsyncLdsDmaWrite(
const MachineInstr &
MI)
const {
656 bool isAsyncLdsDmaWrite(
const MachineInstr &
MI)
const {
660 bool shouldUpdateAsyncMark(
const MachineInstr &
MI,
662 if (!isAsyncLdsDmaWrite(
MI))
669 bool isVmemAccess(
const MachineInstr &
MI)
const;
670 bool generateWaitcntInstBefore(MachineInstr &
MI,
671 WaitcntBrackets &ScoreBrackets,
672 MachineInstr *OldWaitcntInstr,
673 PreheaderFlushFlags FlushFlags);
674 bool generateWaitcnt(AMDGPU::Waitcnt
Wait,
676 MachineBasicBlock &
Block, WaitcntBrackets &ScoreBrackets,
677 MachineInstr *OldWaitcntInstr);
679 WaitEventSet getEventsFor(
const MachineInstr &Inst)
const;
680 void updateEventWaitcntAfter(MachineInstr &Inst,
681 WaitcntBrackets *ScoreBrackets);
683 MachineBasicBlock *
Block)
const;
684 bool insertForcedWaitAfter(MachineInstr &Inst, MachineBasicBlock &
Block,
685 WaitcntBrackets &ScoreBrackets);
686 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &
Block,
687 WaitcntBrackets &ScoreBrackets);
690 bool removeRedundantSoftXcnts(MachineBasicBlock &
Block);
692 bool ExpertMode)
const;
694 return WCG->getWaitEvents(
T);
697 return WCG->getCounterFromEvent(
E);
709class WaitcntBrackets {
717 unsigned NumUnusedVmem = 0, NumUnusedSGPRs = 0;
718 for (
auto &[
ID, Val] : VMem) {
722 for (
auto &[
ID, Val] : SGPRs) {
727 if (NumUnusedVmem || NumUnusedSGPRs) {
728 errs() <<
"WaitcntBracket had unused entries at destruction time: "
729 << NumUnusedVmem <<
" VMem and " << NumUnusedSGPRs
730 <<
" SGPR unused entries\n";
741 return ScoreUBs[
T] - ScoreLBs[
T];
745 return getVMemScore(
ID,
T) > getScoreLB(
T);
763 return getScoreUB(
T) - getScoreLB(
T);
767 auto It = SGPRs.find(RU);
768 return It != SGPRs.end() ? It->second.get(
T) : 0;
772 auto It = VMem.find(TID);
773 return It != VMem.end() ? It->second.Scores[
T] : 0;
780 void simplifyWaitcnt(AMDGPU::Waitcnt &
Wait)
const {
783 void simplifyWaitcnt(
const AMDGPU::Waitcnt &CheckWait,
784 AMDGPU::Waitcnt &UpdateWait)
const;
787 void simplifyXcnt(
const AMDGPU::Waitcnt &CheckWait,
788 AMDGPU::Waitcnt &UpdateWait)
const;
789 void simplifyVmVsrc(
const AMDGPU::Waitcnt &CheckWait,
790 AMDGPU::Waitcnt &UpdateWait)
const;
793 AMDGPU::Waitcnt &
Wait)
const;
795 AMDGPU::Waitcnt &
Wait)
const;
796 AMDGPU::Waitcnt determineAsyncWait(
unsigned N);
797 void tryClearSCCWriteEvent(MachineInstr *Inst);
799 void applyWaitcnt(
const AMDGPU::Waitcnt &
Wait);
802 void updateByEvent(WaitEventType
E, MachineInstr &
MI);
803 void recordAsyncMark(MachineInstr &
MI);
805 bool hasPendingEvent()
const {
return !PendingEvents.empty(); }
806 bool hasPendingEvent(WaitEventType
E)
const {
807 return PendingEvents.contains(
E);
810 bool HasPending = PendingEvents &
Context->getWaitEvents(
T);
812 "Expected pending events iff scoreboard is not empty");
817 WaitEventSet Events = PendingEvents &
Context->getWaitEvents(
T);
819 return Events.twoOrMore();
822 bool hasPendingFlat()
const {
829 void setPendingFlat() {
834 bool hasPendingGDS()
const {
839 unsigned getPendingGDSWait()
const {
848 bool hasOtherPendingVmemTypes(
MCPhysReg Reg, VmemType V)
const {
849 for (MCRegUnit RU : regunits(
Reg)) {
850 auto It = VMem.find(toVMEMID(RU));
851 if (It != VMem.end() && (It->second.VMEMTypes & ~(1 << V)))
858 for (MCRegUnit RU : regunits(
Reg)) {
859 if (
auto It = VMem.find(toVMEMID(RU)); It != VMem.end()) {
860 It->second.VMEMTypes = 0;
861 if (It->second.empty())
867 void setStateOnFunctionEntryOrReturn() {
874 ArrayRef<const MachineInstr *> getLDSDMAStores()
const {
878 bool hasPointSampleAccel(
const MachineInstr &
MI)
const;
879 bool hasPointSamplePendingVmemTypes(
const MachineInstr &
MI,
882 void print(raw_ostream &)
const;
887 void purgeEmptyTrackingData();
897 using CounterValueArray = std::array<unsigned, AMDGPU::NUM_INST_CNTS>;
900 AMDGPU::Waitcnt &
Wait)
const;
902 static bool mergeScore(
const MergeInfo &M,
unsigned &Score,
903 unsigned OtherScore);
908 assert(
Reg != AMDGPU::SCC &&
"Shouldn't be used on SCC");
911 const TargetRegisterClass *RC =
Context->TRI.getPhysRegBaseClass(
Reg);
912 unsigned Size =
Context->TRI.getRegSizeInBits(*RC);
913 if (
Size == 16 &&
Context->ST.hasD16Writes32BitVgpr())
939 if (
Reg == AMDGPU::SCC) {
942 for (MCRegUnit RU : regunits(
Reg))
943 VMem[toVMEMID(RU)].Scores[
T] = Val;
945 for (MCRegUnit RU : regunits(
Reg))
946 SGPRs[RU].get(
T) = Val;
953 VMem[TID].Scores[
T] = Val;
956 void setScoreByOperand(
const MachineOperand &
Op,
959 const SIInsertWaitcnts *
Context;
963 WaitEventSet PendingEvents;
965 unsigned LastFlatDsCnt = 0;
966 unsigned LastFlatLoadCnt = 0;
968 unsigned LastGDS = 0;
985 CounterValueArray Scores{};
987 unsigned VMEMTypes = 0;
996 unsigned ScoreDsKmCnt = 0;
997 unsigned ScoreXCnt = 0;
1013 bool empty()
const {
return !ScoreDsKmCnt && !ScoreXCnt; }
1016 DenseMap<VMEMID, VMEMInfo> VMem;
1017 DenseMap<MCRegUnit, SGPRInfo> SGPRs;
1020 unsigned SCCScore = 0;
1022 const MachineInstr *PendingSCCWrite =
nullptr;
1026 SmallVector<const MachineInstr *> LDSDMAStores;
1035 static constexpr unsigned MaxAsyncMarks = 16;
1039 CounterValueArray AsyncScore{};
1042class SIInsertWaitcntsLegacy :
public MachineFunctionPass {
1045 SIInsertWaitcntsLegacy() : MachineFunctionPass(
ID) {}
1047 bool runOnMachineFunction(MachineFunction &MF)
override;
1049 StringRef getPassName()
const override {
1050 return "SI insert wait instructions";
1053 void getAnalysisUsage(AnalysisUsage &AU)
const override {
1056 AU.
addRequired<MachinePostDominatorTreeWrapperPass>();
1065void WaitcntBrackets::setScoreByOperand(
const MachineOperand &
Op,
1068 setRegScore(
Op.getReg().asMCReg(), CntTy, Score);
1076bool WaitcntBrackets::hasPointSampleAccel(
const MachineInstr &
MI)
const {
1081 const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo =
1091bool WaitcntBrackets::hasPointSamplePendingVmemTypes(
const MachineInstr &
MI,
1093 if (!hasPointSampleAccel(
MI))
1096 return hasOtherPendingVmemTypes(
Reg, VMEM_NOSAMPLER);
1099void WaitcntBrackets::updateByEvent(WaitEventType
E, MachineInstr &Inst) {
1103 unsigned UB = getScoreUB(
T);
1104 unsigned CurrScore = UB + 1;
1110 PendingEvents.insert(
E);
1111 setScoreUB(
T, CurrScore);
1114 const MachineRegisterInfo &MRI =
Context->MRI;
1123 if (
const auto *AddrOp =
TII.getNamedOperand(Inst, AMDGPU::OpName::addr))
1127 if (
const auto *Data0 =
1128 TII.getNamedOperand(Inst, AMDGPU::OpName::data0))
1130 if (
const auto *Data1 =
1131 TII.getNamedOperand(Inst, AMDGPU::OpName::data1))
1134 Inst.
getOpcode() != AMDGPU::DS_APPEND &&
1135 Inst.
getOpcode() != AMDGPU::DS_CONSUME &&
1136 Inst.
getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
1137 for (
const MachineOperand &
Op : Inst.
all_uses()) {
1138 if (
TRI.isVectorRegister(MRI,
Op.getReg()))
1142 }
else if (
TII.isFLAT(Inst)) {
1144 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1147 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1150 }
else if (
TII.isMIMG(Inst)) {
1154 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1157 }
else if (
TII.isMTBUF(Inst)) {
1160 }
else if (
TII.isMUBUF(Inst)) {
1164 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1167 }
else if (
TII.isLDSDIR(Inst)) {
1169 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::vdst),
1172 if (
TII.isEXP(Inst)) {
1177 for (MachineOperand &DefMO : Inst.
all_defs()) {
1178 if (
TRI.isVGPR(MRI, DefMO.getReg())) {
1183 for (
const MachineOperand &
Op : Inst.
all_uses()) {
1184 if (
TRI.isVectorRegister(MRI,
Op.getReg()))
1189 WaitEventType OtherEvent =
E == SMEM_GROUP ? VMEM_GROUP : SMEM_GROUP;
1190 if (PendingEvents.contains(OtherEvent)) {
1195 setScoreLB(
T, getScoreUB(
T) - 1);
1196 PendingEvents.remove(OtherEvent);
1198 for (
const MachineOperand &
Op : Inst.
all_uses())
1199 setScoreByOperand(
Op,
T, CurrScore);
1203 for (
const MachineOperand &
Op : Inst.
operands()) {
1208 setScoreByOperand(
Op,
T, CurrScore);
1220 for (
const MachineOperand &
Op : Inst.
defs()) {
1223 if (!
TRI.isVectorRegister(MRI,
Op.getReg()))
1225 if (updateVMCntOnly(Inst)) {
1230 VmemType
V = getVmemType(Inst);
1231 unsigned char TypesMask = 1 <<
V;
1234 if (hasPointSampleAccel(Inst))
1235 TypesMask |= 1 << VMEM_NOSAMPLER;
1236 for (MCRegUnit RU : regunits(
Op.getReg().asMCReg()))
1237 VMem[toVMEMID(RU)].VMEMTypes |= TypesMask;
1240 setScoreByOperand(
Op,
T, CurrScore);
1243 (
TII.isDS(Inst) ||
Context->isNonAsyncLdsDmaWrite(Inst))) {
1252 if (!MemOp->isStore() ||
1257 auto AAI = MemOp->getAAInfo();
1263 if (!AAI || !AAI.Scope)
1265 for (
unsigned I = 0,
E = LDSDMAStores.
size();
I !=
E && !Slot; ++
I) {
1266 for (
const auto *MemOp : LDSDMAStores[
I]->memoperands()) {
1267 if (MemOp->isStore() && AAI == MemOp->getAAInfo()) {
1282 setVMemScore(LDSDMA_BEGIN,
T, CurrScore);
1283 if (Slot && Slot < NUM_LDSDMA)
1284 setVMemScore(LDSDMA_BEGIN + Slot,
T, CurrScore);
1287 if (
Context->shouldUpdateAsyncMark(Inst,
T)) {
1288 AsyncScore[
T] = CurrScore;
1292 setRegScore(AMDGPU::SCC,
T, CurrScore);
1293 PendingSCCWrite = &Inst;
1298void WaitcntBrackets::recordAsyncMark(MachineInstr &Inst) {
1304 AsyncMarks.push_back(AsyncScore);
1307 dbgs() <<
"recordAsyncMark:\n" << Inst;
1308 for (
const auto &Mark : AsyncMarks) {
1315void WaitcntBrackets::print(raw_ostream &OS)
const {
1319 unsigned SR = getScoreRange(
T);
1322 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"LOAD" :
"VM") <<
"_CNT("
1326 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"DS" :
"LGKM") <<
"_CNT("
1330 OS <<
" EXP_CNT(" << SR <<
"):";
1333 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"STORE" :
"VS") <<
"_CNT("
1337 OS <<
" SAMPLE_CNT(" << SR <<
"):";
1340 OS <<
" BVH_CNT(" << SR <<
"):";
1343 OS <<
" KM_CNT(" << SR <<
"):";
1346 OS <<
" X_CNT(" << SR <<
"):";
1349 OS <<
" ASYNC_CNT(" << SR <<
"):";
1352 OS <<
" VA_VDST(" << SR <<
"): ";
1355 OS <<
" VM_VSRC(" << SR <<
"): ";
1358 OS <<
" UNKNOWN(" << SR <<
"):";
1364 unsigned LB = getScoreLB(
T);
1367 sort(SortedVMEMIDs);
1369 for (
auto ID : SortedVMEMIDs) {
1370 unsigned RegScore = VMem.at(
ID).Scores[
T];
1373 unsigned RelScore = RegScore - LB - 1;
1374 if (
ID < REGUNITS_END) {
1375 OS <<
' ' << RelScore <<
":vRU" <<
ID;
1377 assert(
ID >= LDSDMA_BEGIN &&
ID < LDSDMA_END &&
1378 "Unhandled/unexpected ID value!");
1379 OS <<
' ' << RelScore <<
":LDSDMA" <<
ID;
1384 if (isSmemCounter(
T)) {
1386 sort(SortedSMEMIDs);
1387 for (
auto ID : SortedSMEMIDs) {
1388 unsigned RegScore = SGPRs.at(
ID).get(
T);
1391 unsigned RelScore = RegScore - LB - 1;
1392 OS <<
' ' << RelScore <<
":sRU" <<
static_cast<unsigned>(
ID);
1397 OS <<
' ' << SCCScore <<
":scc";
1402 OS <<
"Pending Events: ";
1403 if (hasPendingEvent()) {
1405 for (
unsigned I = 0;
I != NUM_WAIT_EVENTS; ++
I) {
1406 if (hasPendingEvent((WaitEventType)
I)) {
1407 OS <<
LS << WaitEventTypeName[
I];
1415 OS <<
"Async score: ";
1416 if (AsyncScore.empty())
1422 OS <<
"Async marks: " << AsyncMarks.size() <<
'\n';
1424 for (
const auto &Mark : AsyncMarks) {
1426 unsigned MarkedScore = Mark[
T];
1429 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"LOAD" :
"VM")
1430 <<
"_CNT: " << MarkedScore;
1433 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"DS" :
"LGKM")
1434 <<
"_CNT: " << MarkedScore;
1437 OS <<
" EXP_CNT: " << MarkedScore;
1440 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"STORE" :
"VS")
1441 <<
"_CNT: " << MarkedScore;
1444 OS <<
" SAMPLE_CNT: " << MarkedScore;
1447 OS <<
" BVH_CNT: " << MarkedScore;
1450 OS <<
" KM_CNT: " << MarkedScore;
1453 OS <<
" X_CNT: " << MarkedScore;
1456 OS <<
" ASYNC_CNT: " << MarkedScore;
1459 OS <<
" UNKNOWN: " << MarkedScore;
1470void WaitcntBrackets::simplifyWaitcnt(
const AMDGPU::Waitcnt &CheckWait,
1471 AMDGPU::Waitcnt &UpdateWait)
const {
1479 simplifyXcnt(CheckWait, UpdateWait);
1481 simplifyVmVsrc(CheckWait, UpdateWait);
1486 unsigned &
Count)
const {
1490 if (
Count >= getScoreRange(
T))
1494void WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &
Wait,
1496 unsigned Cnt =
Wait.get(
T);
1497 simplifyWaitcnt(
T, Cnt);
1501void WaitcntBrackets::simplifyXcnt(
const AMDGPU::Waitcnt &CheckWait,
1502 AMDGPU::Waitcnt &UpdateWait)
const {
1523void WaitcntBrackets::simplifyVmVsrc(
const AMDGPU::Waitcnt &CheckWait,
1524 AMDGPU::Waitcnt &UpdateWait)
const {
1529 std::min({CheckWait.get(AMDGPU::LOAD_CNT),
1530 CheckWait.get(AMDGPU::STORE_CNT),
1531 CheckWait.get(AMDGPU::SAMPLE_CNT),
1532 CheckWait.get(AMDGPU::BVH_CNT), CheckWait.get(AMDGPU::DS_CNT)}))
1537void WaitcntBrackets::purgeEmptyTrackingData() {
1549 unsigned ScoreToWait,
1550 AMDGPU::Waitcnt &
Wait)
const {
1551 const unsigned LB = getScoreLB(
T);
1552 const unsigned UB = getScoreUB(
T);
1555 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
1557 !
Context->ST.hasFlatLgkmVMemCountInOrder()) {
1561 addWait(
Wait,
T, 0);
1562 }
else if (counterOutOfOrder(
T)) {
1566 addWait(
Wait,
T, 0);
1570 unsigned NeededWait = std::min(
1571 UB - ScoreToWait, getWaitCountMax(
Context->getLimits(),
T) - 1);
1572 addWait(
Wait,
T, NeededWait);
1577AMDGPU::Waitcnt WaitcntBrackets::determineAsyncWait(
unsigned N) {
1579 dbgs() <<
"Need " <<
N <<
" async marks. Found " << AsyncMarks.size()
1581 for (
const auto &Mark : AsyncMarks) {
1587 if (AsyncMarks.size() == MaxAsyncMarks) {
1592 LLVM_DEBUG(
dbgs() <<
"Possible truncation. Ensuring a non-trivial wait.\n");
1593 N = std::min(
N, (
unsigned)MaxAsyncMarks - 1);
1596 AMDGPU::Waitcnt
Wait;
1597 if (AsyncMarks.size() <=
N) {
1602 size_t MarkIndex = AsyncMarks.size() -
N - 1;
1603 const auto &RequiredMark = AsyncMarks[MarkIndex];
1605 determineWaitForScore(
T, RequiredMark[
T],
Wait);
1611 dbgs() <<
"Removing " << (MarkIndex + 1)
1612 <<
" async marks after determining wait\n";
1614 AsyncMarks.erase(AsyncMarks.begin(), AsyncMarks.begin() + MarkIndex + 1);
1622 AMDGPU::Waitcnt &
Wait)
const {
1623 if (
Reg == AMDGPU::SCC) {
1624 determineWaitForScore(
T, SCCScore,
Wait);
1627 for (MCRegUnit RU : regunits(
Reg))
1628 determineWaitForScore(
1629 T, IsVGPR ? getVMemScore(toVMEMID(RU),
T) : getSGPRScore(RU,
T),
1636 AMDGPU::Waitcnt &
Wait)
const {
1637 assert(TID >= LDSDMA_BEGIN && TID < LDSDMA_END);
1638 determineWaitForScore(
T, getVMemScore(TID,
T),
Wait);
1641void WaitcntBrackets::tryClearSCCWriteEvent(MachineInstr *Inst) {
1644 if (PendingSCCWrite &&
1645 PendingSCCWrite->
getOpcode() == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM &&
1647 WaitEventSet SCC_WRITE_PendingEvent(SCC_WRITE);
1650 SCC_WRITE_PendingEvent) {
1654 PendingEvents.remove(SCC_WRITE_PendingEvent);
1655 PendingSCCWrite =
nullptr;
1659void WaitcntBrackets::applyWaitcnt(
const AMDGPU::Waitcnt &
Wait) {
1661 applyWaitcnt(
Wait,
T);
1665 const unsigned UB = getScoreUB(
T);
1669 if (counterOutOfOrder(
T))
1671 setScoreLB(
T, std::max(getScoreLB(
T), UB -
Count));
1674 PendingEvents.remove(
Context->getWaitEvents(
T));
1681 PendingEvents.remove(SMEM_GROUP);
1687 else if (
Count == 0)
1688 PendingEvents.remove(VMEM_GROUP);
1692void WaitcntBrackets::applyWaitcnt(
const AMDGPU::Waitcnt &
Wait,
1694 unsigned Cnt =
Wait.get(
T);
1695 applyWaitcnt(
T, Cnt);
1702 if ((
T ==
Context->SmemAccessCounter && hasPendingEvent(SMEM_ACCESS)) ||
1710 WaitEventSet Events = PendingEvents &
Context->getWaitEvents(
T);
1713 Events.remove(GLOBAL_INV_ACCESS);
1716 return Events.twoOrMore();
1719 return hasMixedPendingEvents(
T);
1729char SIInsertWaitcntsLegacy::
ID = 0;
1734 return new SIInsertWaitcntsLegacy();
1739 int OpIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
OpName);
1744 if (NewEnc == MO.
getImm())
1753static std::optional<AMDGPU::InstCounterType>
1756 case AMDGPU::S_WAIT_LOADCNT:
1758 case AMDGPU::S_WAIT_EXPCNT:
1760 case AMDGPU::S_WAIT_STORECNT:
1762 case AMDGPU::S_WAIT_SAMPLECNT:
1764 case AMDGPU::S_WAIT_BVHCNT:
1766 case AMDGPU::S_WAIT_DSCNT:
1768 case AMDGPU::S_WAIT_KMCNT:
1770 case AMDGPU::S_WAIT_XCNT:
1772 case AMDGPU::S_WAIT_ASYNCCNT:
1779bool WaitcntGenerator::promoteSoftWaitCnt(MachineInstr *Waitcnt)
const {
1793bool WaitcntGeneratorPreGFX12::applyPreexistingWaitcnt(
1794 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
1796 assert(isNormalMode(MaxCounter));
1799 MachineInstr *WaitcntInstr =
nullptr;
1800 MachineInstr *WaitcntVsCntInstr =
nullptr;
1803 dbgs() <<
"PreGFX12::applyPreexistingWaitcnt at: ";
1805 dbgs() <<
"end of block\n";
1813 if (
II.isMetaInstruction()) {
1819 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
1823 if (Opcode == AMDGPU::S_WAITCNT) {
1824 unsigned IEnc =
II.getOperand(0).getImm();
1827 ScoreBrackets.simplifyWaitcnt(OldWait);
1831 if (WaitcntInstr || (!
Wait.hasWaitExceptStoreCnt() && TrySimplify)) {
1832 II.eraseFromParent();
1836 }
else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
1839 <<
"Before: " <<
Wait <<
'\n';);
1850 II.eraseFromParent();
1851 }
else if (Opcode == AMDGPU::WAIT_ASYNCMARK) {
1852 unsigned N =
II.getOperand(0).getImm();
1854 AMDGPU::Waitcnt OldWait = ScoreBrackets.determineAsyncWait(
N);
1857 assert(Opcode == AMDGPU::S_WAITCNT_VSCNT);
1858 assert(
II.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1861 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1867 if (WaitcntVsCntInstr || (!
Wait.hasWaitStoreCnt() && TrySimplify)) {
1868 II.eraseFromParent();
1871 WaitcntVsCntInstr = &
II;
1878 Modified |= promoteSoftWaitCnt(WaitcntInstr);
1887 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
1888 <<
"New Instr at block end: "
1889 << *WaitcntInstr <<
'\n'
1890 :
dbgs() <<
"applied pre-existing waitcnt\n"
1891 <<
"Old Instr: " << *It
1892 <<
"New Instr: " << *WaitcntInstr <<
'\n');
1895 if (WaitcntVsCntInstr) {
1899 Modified |= promoteSoftWaitCnt(WaitcntVsCntInstr);
1905 ?
dbgs() <<
"applied pre-existing waitcnt\n"
1906 <<
"New Instr at block end: " << *WaitcntVsCntInstr
1908 :
dbgs() <<
"applied pre-existing waitcnt\n"
1909 <<
"Old Instr: " << *It
1910 <<
"New Instr: " << *WaitcntVsCntInstr <<
'\n');
1918bool WaitcntGeneratorPreGFX12::createNewWaitcnt(
1920 AMDGPU::Waitcnt
Wait,
const WaitcntBrackets &ScoreBrackets) {
1921 assert(isNormalMode(MaxCounter));
1929 auto EmitExpandedWaitcnt = [&](
unsigned Outstanding,
unsigned Target,
1932 EmitWaitcnt(--Outstanding);
1933 }
while (Outstanding > Target);
1939 if (
Wait.hasWaitExceptStoreCnt()) {
1941 if (ExpandWaitcntProfiling) {
1945 bool AnyOutOfOrder =
false;
1947 unsigned WaitCnt =
Wait.get(CT);
1948 if (WaitCnt != ~0u && ScoreBrackets.counterOutOfOrder(CT)) {
1949 AnyOutOfOrder =
true;
1954 if (AnyOutOfOrder) {
1962 unsigned WaitCnt =
Wait.get(CT);
1966 unsigned Outstanding = std::min(ScoreBrackets.getOutstanding(CT),
1967 getWaitCountMax(getLimits(), CT) - 1);
1968 EmitExpandedWaitcnt(Outstanding, WaitCnt, [&](
unsigned Count) {
1979 [[maybe_unused]]
auto SWaitInst =
1984 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1985 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1989 if (
Wait.hasWaitStoreCnt()) {
1995 unsigned Outstanding =
1998 EmitExpandedWaitcnt(
2000 BuildMI(Block, It, DL, TII.get(AMDGPU::S_WAITCNT_VSCNT))
2001 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
2005 [[maybe_unused]]
auto SWaitInst =
2007 .
addReg(AMDGPU::SGPR_NULL, RegState::Undef)
2012 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2013 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2021WaitcntGeneratorPreGFX12::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
2022 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt &&
ST.hasVscnt() ? 0 : ~0u);
2026WaitcntGeneratorGFX12Plus::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
2027 unsigned ExpertVal = IsExpertMode ? 0 : ~0
u;
2028 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt ? 0 : ~0u, 0, 0, 0,
2029 ~0u , ~0u , ExpertVal,
2037bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt(
2038 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
2040 assert(!isNormalMode(MaxCounter));
2043 MachineInstr *CombinedLoadDsCntInstr =
nullptr;
2044 MachineInstr *CombinedStoreDsCntInstr =
nullptr;
2045 MachineInstr *WaitcntDepctrInstr =
nullptr;
2049 dbgs() <<
"GFX12Plus::applyPreexistingWaitcnt at: ";
2051 dbgs() <<
"end of block\n";
2057 AMDGPU::Waitcnt RequiredWait;
2062 if (
II.isMetaInstruction()) {
2071 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
2075 if (Opcode == AMDGPU::S_WAITCNT)
2078 if (Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT) {
2080 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2085 RequiredWait = RequiredWait.combined(OldWait);
2087 if (CombinedLoadDsCntInstr ==
nullptr) {
2088 CombinedLoadDsCntInstr = &
II;
2090 II.eraseFromParent();
2093 }
else if (Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT) {
2095 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2100 RequiredWait = RequiredWait.combined(OldWait);
2102 if (CombinedStoreDsCntInstr ==
nullptr) {
2103 CombinedStoreDsCntInstr = &
II;
2105 II.eraseFromParent();
2108 }
else if (Opcode == AMDGPU::S_WAITCNT_DEPCTR) {
2110 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2111 AMDGPU::Waitcnt OldWait;
2115 ScoreBrackets.simplifyWaitcnt(OldWait);
2117 if (WaitcntDepctrInstr ==
nullptr) {
2118 WaitcntDepctrInstr = &
II;
2127 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2135 II.eraseFromParent();
2139 }
else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
2142 II.eraseFromParent();
2144 }
else if (Opcode == AMDGPU::WAIT_ASYNCMARK) {
2147 unsigned N =
II.getOperand(0).getImm();
2148 AMDGPU::Waitcnt OldWait = ScoreBrackets.determineAsyncWait(
N);
2154 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2156 addWait(
Wait, CT.value(), OldCnt);
2158 addWait(RequiredWait, CT.value(), OldCnt);
2160 if (WaitInstrs[CT.value()] ==
nullptr) {
2161 WaitInstrs[CT.value()] = &
II;
2163 II.eraseFromParent();
2169 ScoreBrackets.simplifyWaitcnt(
Wait.combined(RequiredWait),
Wait);
2170 Wait =
Wait.combined(RequiredWait);
2172 if (CombinedLoadDsCntInstr) {
2188 AMDGPU::OpName::simm16, NewEnc);
2189 Modified |= promoteSoftWaitCnt(CombinedLoadDsCntInstr);
2195 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
2196 <<
"New Instr at block end: "
2197 << *CombinedLoadDsCntInstr <<
'\n'
2198 :
dbgs() <<
"applied pre-existing waitcnt\n"
2199 <<
"Old Instr: " << *It <<
"New Instr: "
2200 << *CombinedLoadDsCntInstr <<
'\n');
2207 if (CombinedStoreDsCntInstr) {
2212 AMDGPU::OpName::simm16, NewEnc);
2213 Modified |= promoteSoftWaitCnt(CombinedStoreDsCntInstr);
2219 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
2220 <<
"New Instr at block end: "
2221 << *CombinedStoreDsCntInstr <<
'\n'
2222 :
dbgs() <<
"applied pre-existing waitcnt\n"
2223 <<
"Old Instr: " << *It <<
"New Instr: "
2224 << *CombinedStoreDsCntInstr <<
'\n');
2254 for (MachineInstr **WI : WaitsToErase) {
2258 (*WI)->eraseFromParent();
2265 if (!WaitInstrs[CT])
2268 unsigned NewCnt =
Wait.get(CT);
2269 if (NewCnt != ~0u) {
2271 AMDGPU::OpName::simm16, NewCnt);
2272 Modified |= promoteSoftWaitCnt(WaitInstrs[CT]);
2274 ScoreBrackets.applyWaitcnt(CT, NewCnt);
2275 setNoWait(
Wait, CT);
2278 ?
dbgs() <<
"applied pre-existing waitcnt\n"
2279 <<
"New Instr at block end: " << *WaitInstrs[CT]
2281 :
dbgs() <<
"applied pre-existing waitcnt\n"
2282 <<
"Old Instr: " << *It
2283 <<
"New Instr: " << *WaitInstrs[CT] <<
'\n');
2290 if (WaitcntDepctrInstr) {
2294 TII.getNamedOperand(*WaitcntDepctrInstr, AMDGPU::OpName::simm16)
2309 AMDGPU::OpName::simm16, Enc);
2311 <<
"New Instr at block end: "
2312 << *WaitcntDepctrInstr <<
'\n'
2313 :
dbgs() <<
"applyPreexistingWaitcnt\n"
2314 <<
"Old Instr: " << *It <<
"New Instr: "
2315 << *WaitcntDepctrInstr <<
'\n');
2326bool WaitcntGeneratorGFX12Plus::createNewWaitcnt(
2328 AMDGPU::Waitcnt
Wait,
const WaitcntBrackets &ScoreBrackets) {
2329 assert(!isNormalMode(MaxCounter));
2335 auto EmitExpandedWaitcnt = [&](
unsigned Outstanding,
unsigned Target,
2337 for (
unsigned I = Outstanding - 1;
I >
Target &&
I != ~0
u; --
I)
2339 EmitWaitcnt(Target);
2345 if (ExpandWaitcntProfiling) {
2352 if (ScoreBrackets.counterOutOfOrder(CT)) {
2359 unsigned Outstanding = std::min(ScoreBrackets.getOutstanding(CT),
2360 getWaitCountMax(getLimits(), CT) - 1);
2361 EmitExpandedWaitcnt(Outstanding,
Count, [&](
unsigned Val) {
2372 MachineInstr *SWaitInst =
nullptr;
2396 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2397 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2409 [[maybe_unused]]
auto SWaitInst =
2416 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2417 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2420 if (
Wait.hasWaitDepctr()) {
2426 [[maybe_unused]]
auto SWaitInst =
2432 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2433 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2452bool SIInsertWaitcnts::generateWaitcntInstBefore(
2453 MachineInstr &
MI, WaitcntBrackets &ScoreBrackets,
2454 MachineInstr *OldWaitcntInstr, PreheaderFlushFlags FlushFlags) {
2456 setForceEmitWaitcnt();
2460 AMDGPU::Waitcnt
Wait;
2461 const unsigned Opc =
MI.getOpcode();
2464 case AMDGPU::BUFFER_WBINVL1:
2465 case AMDGPU::BUFFER_WBINVL1_SC:
2466 case AMDGPU::BUFFER_WBINVL1_VOL:
2467 case AMDGPU::BUFFER_GL0_INV:
2468 case AMDGPU::BUFFER_GL1_INV: {
2476 case AMDGPU::SI_RETURN_TO_EPILOG:
2477 case AMDGPU::SI_RETURN:
2478 case AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN:
2479 case AMDGPU::S_SETPC_B64_return: {
2484 AMDGPU::Waitcnt AllZeroWait =
2485 WCG->getAllZeroWaitcnt(
false);
2490 if (
ST.hasExtendedWaitCounts() &&
2491 !ScoreBrackets.hasPendingEvent(VMEM_ACCESS))
2496 case AMDGPU::S_ENDPGM:
2497 case AMDGPU::S_ENDPGM_SAVED: {
2507 !ScoreBrackets.hasPendingEvent(SCRATCH_WRITE_ACCESS);
2510 case AMDGPU::S_SENDMSG:
2511 case AMDGPU::S_SENDMSGHALT: {
2512 if (
ST.hasLegacyGeometry() &&
2527 if (
MI.modifiesRegister(AMDGPU::EXEC, &
TRI)) {
2530 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
2531 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
2532 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
2533 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
2540 if (
TII.isAlwaysGDS(
Opc) && ScoreBrackets.hasPendingGDS())
2548 Wait = AMDGPU::Waitcnt();
2550 const MachineOperand &CallAddrOp =
TII.getCalleeOperand(
MI);
2551 if (CallAddrOp.
isReg()) {
2552 ScoreBrackets.determineWaitForPhysReg(
2555 if (
const auto *RtnAddrOp =
2556 TII.getNamedOperand(
MI, AMDGPU::OpName::dst)) {
2557 ScoreBrackets.determineWaitForPhysReg(
2558 SmemAccessCounter, RtnAddrOp->getReg().asMCReg(),
Wait);
2561 }
else if (
Opc == AMDGPU::S_BARRIER_WAIT) {
2562 ScoreBrackets.tryClearSCCWriteEvent(&
MI);
2578 for (
const MachineMemOperand *Memop :
MI.memoperands()) {
2579 const Value *Ptr = Memop->getValue();
2580 if (Memop->isStore()) {
2581 if (
auto It = SLoadAddresses.
find(Ptr); It != SLoadAddresses.
end()) {
2582 addWait(
Wait, SmemAccessCounter, 0);
2584 SLoadAddresses.
erase(It);
2587 unsigned AS = Memop->getAddrSpace();
2591 if (
TII.mayWriteLDSThroughDMA(
MI))
2595 unsigned TID = LDSDMA_BEGIN;
2596 if (Ptr && Memop->getAAInfo()) {
2597 const auto &LDSDMAStores = ScoreBrackets.getLDSDMAStores();
2598 for (
unsigned I = 0,
E = LDSDMAStores.size();
I !=
E; ++
I) {
2599 if (
MI.mayAlias(AA, *LDSDMAStores[
I],
true)) {
2600 if ((
I + 1) >= NUM_LDSDMA) {
2615 if (Memop->isStore()) {
2621 for (
const MachineOperand &
Op :
MI.operands()) {
2626 if (
Op.isTied() &&
Op.isUse() &&
TII.doesNotReadTiedSource(
MI))
2631 const bool IsVGPR =
TRI.isVectorRegister(MRI,
Op.getReg());
2638 if (
Op.isImplicit() &&
MI.mayLoadOrStore())
2650 if (
Op.isUse() || !updateVMCntOnly(
MI) ||
2651 ScoreBrackets.hasOtherPendingVmemTypes(
Reg, getVmemType(
MI)) ||
2652 ScoreBrackets.hasPointSamplePendingVmemTypes(
MI,
Reg) ||
2653 !
ST.hasVmemWriteVgprInOrder()) {
2658 ScoreBrackets.clearVgprVmemTypes(
Reg);
2661 if (
Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) {
2665 }
else if (
Op.getReg() == AMDGPU::SCC) {
2668 ScoreBrackets.determineWaitForPhysReg(SmemAccessCounter,
Reg,
Wait);
2671 if (
ST.hasWaitXcnt() &&
Op.isDef())
2690 if (
Opc == AMDGPU::S_BARRIER && !
ST.hasAutoWaitcntBeforeBarrier() &&
2691 !
ST.hasBackOffBarrier()) {
2692 Wait =
Wait.combined(WCG->getAllZeroWaitcnt(
true));
2699 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
2704 ScoreBrackets.simplifyWaitcnt(
Wait);
2724 Wait = WCG->getAllZeroWaitcnt(
false);
2728 if (!ForceEmitWaitcnt[
T])
2733 if (FlushFlags.FlushVmCnt) {
2739 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(
AMDGPU::DS_CNT))
2745 return generateWaitcnt(
Wait,
MI.getIterator(), *
MI.getParent(), ScoreBrackets,
2749bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt
Wait,
2751 MachineBasicBlock &
Block,
2752 WaitcntBrackets &ScoreBrackets,
2753 MachineInstr *OldWaitcntInstr) {
2756 if (OldWaitcntInstr)
2760 WCG->applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr,
Wait, It);
2765 MachineOperand *WaitExp =
TII.getNamedOperand(*It, AMDGPU::OpName::waitexp);
2775 <<
"Update Instr: " << *It);
2778 if (WCG->createNewWaitcnt(
Block, It,
Wait, ScoreBrackets))
2783 ScoreBrackets.applyWaitcnt(
Wait);
2788std::optional<WaitEventType>
2789SIInsertWaitcnts::getExpertSchedulingEventType(
const MachineInstr &Inst)
const {
2790 if (
TII.isVALU(Inst)) {
2795 if (
TII.isXDL(Inst))
2796 return VGPR_XDL_WRITE;
2798 if (
TII.isTRANS(Inst))
2799 return VGPR_TRANS_WRITE;
2802 return VGPR_DPMACC_WRITE;
2804 return VGPR_CSMACC_WRITE;
2811 if (
TII.isFLAT(Inst))
2812 return VGPR_FLAT_READ;
2815 return VGPR_LDS_READ;
2817 if (
TII.isVMEM(Inst) ||
TII.isVIMAGE(Inst) ||
TII.isVSAMPLE(Inst))
2818 return VGPR_VMEM_READ;
2825bool SIInsertWaitcnts::isVmemAccess(
const MachineInstr &
MI)
const {
2826 return (
TII.isFLAT(
MI) &&
TII.mayAccessVMEMThroughFlat(
MI)) ||
2833 MachineBasicBlock *
Block)
const {
2834 auto BlockEnd =
Block->getParent()->end();
2835 auto BlockIter =
Block->getIterator();
2839 if (++BlockIter != BlockEnd) {
2840 It = BlockIter->instr_begin();
2847 if (!It->isMetaInstruction())
2855 return It->getOpcode() == AMDGPU::S_ENDPGM;
2859bool SIInsertWaitcnts::insertForcedWaitAfter(MachineInstr &Inst,
2860 MachineBasicBlock &
Block,
2861 WaitcntBrackets &ScoreBrackets) {
2862 AMDGPU::Waitcnt
Wait;
2863 bool NeedsEndPGMCheck =
false;
2871 NeedsEndPGMCheck =
true;
2874 ScoreBrackets.simplifyWaitcnt(
Wait);
2877 bool Result = generateWaitcnt(
Wait, SuccessorIt,
Block, ScoreBrackets,
2880 if (Result && NeedsEndPGMCheck && isNextENDPGM(SuccessorIt, &
Block)) {
2888WaitEventSet SIInsertWaitcnts::getEventsFor(
const MachineInstr &Inst)
const {
2889 WaitEventSet Events;
2891 if (
const auto ET = getExpertSchedulingEventType(Inst))
2895 if (
TII.isDS(Inst) &&
TII.usesLGKM_CNT(Inst)) {
2897 TII.hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
2898 Events.insert(GDS_ACCESS);
2899 Events.insert(GDS_GPR_LOCK);
2901 Events.insert(LDS_ACCESS);
2903 }
else if (
TII.isFLAT(Inst)) {
2905 Events.insert(getVmemWaitEventType(Inst));
2908 if (
TII.mayAccessVMEMThroughFlat(Inst)) {
2909 if (
ST.hasWaitXcnt())
2910 Events.insert(VMEM_GROUP);
2911 Events.insert(getVmemWaitEventType(Inst));
2913 if (
TII.mayAccessLDSThroughFlat(Inst))
2914 Events.insert(LDS_ACCESS);
2918 Inst.
getOpcode() == AMDGPU::BUFFER_WBL2)) {
2922 if (
ST.hasWaitXcnt())
2923 Events.insert(VMEM_GROUP);
2924 Events.insert(getVmemWaitEventType(Inst));
2925 if (
ST.vmemWriteNeedsExpWaitcnt() &&
2927 Events.insert(VMW_GPR_LOCK);
2929 }
else if (
TII.isSMRD(Inst)) {
2930 if (
ST.hasWaitXcnt())
2931 Events.insert(SMEM_GROUP);
2932 Events.insert(SMEM_ACCESS);
2934 Events.insert(EXP_LDS_ACCESS);
2936 unsigned Imm =
TII.getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
2938 Events.insert(EXP_PARAM_ACCESS);
2940 Events.insert(EXP_POS_ACCESS);
2942 Events.insert(EXP_GPR_LOCK);
2944 Events.insert(SCC_WRITE);
2947 case AMDGPU::S_SENDMSG:
2948 case AMDGPU::S_SENDMSG_RTN_B32:
2949 case AMDGPU::S_SENDMSG_RTN_B64:
2950 case AMDGPU::S_SENDMSGHALT:
2951 Events.insert(SQ_MESSAGE);
2953 case AMDGPU::S_MEMTIME:
2954 case AMDGPU::S_MEMREALTIME:
2955 case AMDGPU::S_GET_BARRIER_STATE_M0:
2956 case AMDGPU::S_GET_BARRIER_STATE_IMM:
2957 Events.insert(SMEM_ACCESS);
2964void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
2965 WaitcntBrackets *ScoreBrackets) {
2967 WaitEventSet InstEvents = getEventsFor(Inst);
2968 for (WaitEventType
E : wait_events()) {
2969 if (InstEvents.contains(
E))
2970 ScoreBrackets->updateByEvent(
E, Inst);
2973 if (
TII.isDS(Inst) &&
TII.usesLGKM_CNT(Inst)) {
2975 TII.hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
2976 ScoreBrackets->setPendingGDS();
2978 }
else if (
TII.isFLAT(Inst)) {
2986 ScoreBrackets->setPendingFlat();
2989 ScoreBrackets->updateByEvent(ASYNC_ACCESS, Inst);
2991 }
else if (Inst.
isCall()) {
2994 ScoreBrackets->applyWaitcnt(WCG->getAllZeroWaitcnt(
false));
2995 ScoreBrackets->setStateOnFunctionEntryOrReturn();
2996 }
else if (
TII.isVINTERP(Inst)) {
2997 int64_t
Imm =
TII.getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm();
3002bool WaitcntBrackets::mergeScore(
const MergeInfo &M,
unsigned &Score,
3003 unsigned OtherScore) {
3004 unsigned MyShifted = Score <=
M.OldLB ? 0 : Score +
M.MyShift;
3005 unsigned OtherShifted =
3006 OtherScore <=
M.OtherLB ? 0 : OtherScore +
M.OtherShift;
3007 Score = std::max(MyShifted, OtherShifted);
3008 return OtherShifted > MyShifted;
3013 bool StrictDom =
false;
3017 if (AsyncMarks.empty() && OtherMarks.
empty()) {
3024 auto MaxSize = (unsigned)std::max(AsyncMarks.size(), OtherMarks.
size());
3025 MaxSize = std::min(MaxSize, MaxAsyncMarks);
3028 if (AsyncMarks.size() > MaxSize)
3029 AsyncMarks.erase(AsyncMarks.begin(),
3030 AsyncMarks.begin() + (AsyncMarks.size() - MaxSize));
3036 constexpr CounterValueArray ZeroMark{};
3037 AsyncMarks.insert(AsyncMarks.begin(), MaxSize - AsyncMarks.size(), ZeroMark);
3040 dbgs() <<
"Before merge:\n";
3041 for (
const auto &Mark : AsyncMarks) {
3045 dbgs() <<
"Other marks:\n";
3046 for (
const auto &Mark : OtherMarks) {
3055 unsigned OtherSize = OtherMarks.size();
3056 unsigned OurSize = AsyncMarks.size();
3057 unsigned MergeCount = std::min(OtherSize, OurSize);
3060 StrictDom |= mergeScore(MergeInfos[
T], AsyncMarks[OurSize - Idx][
T],
3061 OtherMarks[OtherSize - Idx][
T]);
3066 dbgs() <<
"After merge:\n";
3067 for (
const auto &Mark : AsyncMarks) {
3081bool WaitcntBrackets::merge(
const WaitcntBrackets &
Other) {
3082 bool StrictDom =
false;
3086 for (
auto K :
Other.VMem.keys())
3087 VMem.try_emplace(K);
3088 for (
auto K :
Other.SGPRs.keys())
3089 SGPRs.try_emplace(K);
3096 const WaitEventSet &EventsForT =
Context->getWaitEvents(
T);
3097 const WaitEventSet OldEvents = PendingEvents & EventsForT;
3098 const WaitEventSet OtherEvents =
Other.PendingEvents & EventsForT;
3099 if (!OldEvents.contains(OtherEvents))
3101 PendingEvents |= OtherEvents;
3104 const unsigned MyPending = ScoreUBs[
T] - ScoreLBs[
T];
3105 const unsigned OtherPending =
Other.ScoreUBs[
T] -
Other.ScoreLBs[
T];
3106 const unsigned NewUB = ScoreLBs[
T] + std::max(MyPending, OtherPending);
3107 if (NewUB < ScoreLBs[
T])
3110 MergeInfo &
M = MergeInfos[
T];
3111 M.OldLB = ScoreLBs[
T];
3112 M.OtherLB =
Other.ScoreLBs[
T];
3113 M.MyShift = NewUB - ScoreUBs[
T];
3114 M.OtherShift = NewUB -
Other.ScoreUBs[
T];
3116 ScoreUBs[
T] = NewUB;
3119 StrictDom |= mergeScore(M, LastFlatLoadCnt,
Other.LastFlatLoadCnt);
3122 StrictDom |= mergeScore(M, LastFlatDsCnt,
Other.LastFlatDsCnt);
3123 StrictDom |= mergeScore(M, LastGDS,
Other.LastGDS);
3127 StrictDom |= mergeScore(M, SCCScore,
Other.SCCScore);
3128 if (
Other.hasPendingEvent(SCC_WRITE)) {
3129 if (!OldEvents.contains(SCC_WRITE)) {
3130 PendingSCCWrite =
Other.PendingSCCWrite;
3131 }
else if (PendingSCCWrite !=
Other.PendingSCCWrite) {
3132 PendingSCCWrite =
nullptr;
3137 for (
auto &[RegID, Info] : VMem)
3138 StrictDom |= mergeScore(M,
Info.Scores[
T],
Other.getVMemScore(RegID,
T));
3140 if (isSmemCounter(
T)) {
3141 for (
auto &[RegID, Info] : SGPRs) {
3142 auto It =
Other.SGPRs.find(RegID);
3143 unsigned OtherScore = (It !=
Other.SGPRs.end()) ? It->second.get(
T) : 0;
3144 StrictDom |= mergeScore(M,
Info.get(
T), OtherScore);
3149 for (
auto &[TID, Info] : VMem) {
3150 if (
auto It =
Other.VMem.find(TID); It !=
Other.VMem.end()) {
3151 unsigned char NewVmemTypes =
Info.VMEMTypes | It->second.VMEMTypes;
3152 StrictDom |= NewVmemTypes !=
Info.VMEMTypes;
3153 Info.VMEMTypes = NewVmemTypes;
3157 StrictDom |= mergeAsyncMarks(MergeInfos,
Other.AsyncMarks);
3159 StrictDom |= mergeScore(MergeInfos[
T], AsyncScore[
T],
Other.AsyncScore[
T]);
3161 purgeEmptyTrackingData();
3167 return Opcode == AMDGPU::S_WAITCNT ||
3170 Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT ||
3171 Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT ||
3172 Opcode == AMDGPU::S_WAITCNT_lds_direct ||
3173 Opcode == AMDGPU::WAIT_ASYNCMARK ||
3177void SIInsertWaitcnts::setSchedulingMode(MachineBasicBlock &
MBB,
3179 bool ExpertMode)
const {
3183 .
addImm(ExpertMode ? 2 : 0)
3201class VCCZWorkaround {
3202 const WaitcntBrackets &ScoreBrackets;
3203 const GCNSubtarget &
ST;
3204 const SIInstrInfo &
TII;
3205 const SIRegisterInfo &
TRI;
3206 bool VCCZCorruptionBug =
false;
3207 bool VCCZNotUpdatedByPartialWrites =
false;
3210 bool MustRecomputeVCCZ =
true;
3213 VCCZWorkaround(
const WaitcntBrackets &ScoreBrackets,
const GCNSubtarget &ST,
3214 const SIInstrInfo &
TII,
const SIRegisterInfo &
TRI)
3216 VCCZCorruptionBug =
ST.hasReadVCCZBug();
3217 VCCZNotUpdatedByPartialWrites = !
ST.partialVCCWritesUpdateVCCZ();
3224 bool tryRecomputeVCCZ(MachineInstr &
MI) {
3226 if (!VCCZCorruptionBug && !VCCZNotUpdatedByPartialWrites)
3236 MustRecomputeVCCZ |= VCCZCorruptionBug &&
TII.isSMRD(
MI);
3242 std::optional<bool> PartiallyWritesToVCCOpt;
3243 auto PartiallyWritesToVCC = [](MachineInstr &
MI) {
3244 return MI.definesRegister(AMDGPU::VCC_LO,
nullptr) ||
3245 MI.definesRegister(AMDGPU::VCC_HI,
nullptr);
3247 if (VCCZNotUpdatedByPartialWrites) {
3248 PartiallyWritesToVCCOpt = PartiallyWritesToVCC(
MI);
3251 MustRecomputeVCCZ |= *PartiallyWritesToVCCOpt;
3257 if (!ScoreBrackets.hasPendingEvent(SMEM_ACCESS) || !VCCZCorruptionBug) {
3259 if (!PartiallyWritesToVCCOpt)
3260 PartiallyWritesToVCCOpt = PartiallyWritesToVCC(
MI);
3261 bool FullyWritesToVCC = !*PartiallyWritesToVCCOpt &&
3262 MI.definesRegister(AMDGPU::VCC,
nullptr);
3265 bool UpdatesVCCZ = FullyWritesToVCC || (!VCCZNotUpdatedByPartialWrites &&
3266 *PartiallyWritesToVCCOpt);
3268 MustRecomputeVCCZ =
false;
3278 TII.get(
ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
3281 MustRecomputeVCCZ =
false;
3291bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
3292 MachineBasicBlock &
Block,
3293 WaitcntBrackets &ScoreBrackets) {
3297 dbgs() <<
"*** Begin Block: ";
3299 ScoreBrackets.dump();
3301 VCCZWorkaround VCCZW(ScoreBrackets, ST,
TII,
TRI);
3304 MachineInstr *OldWaitcntInstr =
nullptr;
3309 Iter !=
E; ++Iter) {
3310 MachineInstr &Inst = *Iter;
3316 (IsExpertMode && Inst.
getOpcode() == AMDGPU::S_WAITCNT_DEPCTR)) {
3317 if (!OldWaitcntInstr)
3318 OldWaitcntInstr = &Inst;
3322 PreheaderFlushFlags FlushFlags;
3323 if (
Block.getFirstTerminator() == Inst)
3324 FlushFlags = isPreheaderToFlush(
Block, ScoreBrackets);
3327 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr,
3329 OldWaitcntInstr =
nullptr;
3331 if (Inst.
getOpcode() == AMDGPU::ASYNCMARK) {
3335 ScoreBrackets.recordAsyncMark(Inst);
3339 if (
TII.isSMRD(Inst)) {
3340 for (
const MachineMemOperand *Memop : Inst.
memoperands()) {
3343 if (!Memop->isInvariant()) {
3344 const Value *Ptr = Memop->getValue();
3350 updateEventWaitcntAfter(Inst, &ScoreBrackets);
3354 Modified |= insertForcedWaitAfter(Inst,
Block, ScoreBrackets);
3358 ScoreBrackets.dump();
3363 Modified |= VCCZW.tryRecomputeVCCZ(Inst);
3368 AMDGPU::Waitcnt
Wait;
3369 if (
Block.getFirstTerminator() ==
Block.end()) {
3370 PreheaderFlushFlags FlushFlags = isPreheaderToFlush(
Block, ScoreBrackets);
3371 if (FlushFlags.FlushVmCnt) {
3379 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(
AMDGPU::DS_CNT))
3388 dbgs() <<
"*** End Block: ";
3390 ScoreBrackets.dump();
3396bool SIInsertWaitcnts::removeRedundantSoftXcnts(MachineBasicBlock &
Block) {
3397 if (
Block.size() <= 1)
3405 MachineInstr *LastAtomicWithSoftXcnt =
nullptr;
3411 if (!IsLDS && (
MI.mayLoad() ^
MI.mayStore()))
3412 LastAtomicWithSoftXcnt =
nullptr;
3415 MI.mayLoad() &&
MI.mayStore();
3416 MachineInstr &PrevMI = *
MI.getPrevNode();
3418 if (PrevMI.
getOpcode() == AMDGPU::S_WAIT_XCNT_soft && IsAtomicRMW) {
3421 if (LastAtomicWithSoftXcnt) {
3425 LastAtomicWithSoftXcnt = &
MI;
3433SIInsertWaitcnts::isPreheaderToFlush(MachineBasicBlock &
MBB,
3434 const WaitcntBrackets &ScoreBrackets) {
3435 auto [Iterator, IsInserted] =
3438 return Iterator->second;
3442 return PreheaderFlushFlags();
3446 return PreheaderFlushFlags();
3449 Iterator->second = getPreheaderFlushFlags(Loop, ScoreBrackets);
3450 return Iterator->second;
3453 return PreheaderFlushFlags();
3456bool SIInsertWaitcnts::isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const {
3458 return TII.mayAccessVMEMThroughFlat(
MI);
3462bool SIInsertWaitcnts::isDSRead(
const MachineInstr &
MI)
const {
3468bool SIInsertWaitcnts::mayStoreIncrementingDSCNT(
const MachineInstr &
MI)
const {
3497SIInsertWaitcnts::getPreheaderFlushFlags(MachineLoop *
ML,
3498 const WaitcntBrackets &Brackets) {
3499 PreheaderFlushFlags
Flags;
3500 bool HasVMemLoad =
false;
3501 bool HasVMemStore =
false;
3502 bool UsesVgprVMEMLoadedOutside =
false;
3503 bool UsesVgprDSReadOutside =
false;
3504 bool VMemInvalidated =
false;
3508 bool TrackSimpleDSOpt =
ST.hasExtendedWaitCounts();
3509 DenseSet<MCRegUnit> VgprUse;
3510 DenseSet<MCRegUnit> VgprDefVMEM;
3511 DenseSet<MCRegUnit> VgprDefDS;
3517 DenseMap<MCRegUnit, unsigned> LastDSReadPositionMap;
3518 unsigned DSReadPosition = 0;
3519 bool IsSingleBlock =
ML->getNumBlocks() == 1;
3520 bool TrackDSFlushPoint =
ST.hasExtendedWaitCounts() && IsSingleBlock;
3521 unsigned LastDSFlushPosition = 0;
3523 for (MachineBasicBlock *
MBB :
ML->blocks()) {
3524 for (MachineInstr &
MI : *
MBB) {
3525 if (isVMEMOrFlatVMEM(
MI)) {
3526 HasVMemLoad |=
MI.mayLoad();
3527 HasVMemStore |=
MI.mayStore();
3531 if (mayStoreIncrementingDSCNT(
MI)) {
3534 if (VMemInvalidated)
3536 TrackSimpleDSOpt =
false;
3537 TrackDSFlushPoint =
false;
3539 bool IsDSRead = isDSRead(
MI);
3544 auto updateDSReadFlushTracking = [&](MCRegUnit RU) {
3545 if (!TrackDSFlushPoint)
3547 if (
auto It = LastDSReadPositionMap.
find(RU);
3548 It != LastDSReadPositionMap.
end()) {
3552 LastDSFlushPosition = std::max(LastDSFlushPosition, It->second);
3556 for (
const MachineOperand &
Op :
MI.all_uses()) {
3557 if (
Op.isDebug() || !
TRI.isVectorRegister(MRI,
Op.getReg()))
3560 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3564 VMemInvalidated =
true;
3568 TrackSimpleDSOpt =
false;
3571 if (VMemInvalidated && !TrackSimpleDSOpt && !TrackDSFlushPoint)
3575 updateDSReadFlushTracking(RU);
3580 VMEMID
ID = toVMEMID(RU);
3584 UsesVgprVMEMLoadedOutside =
true;
3589 UsesVgprDSReadOutside =
true;
3594 if (isVMEMOrFlatVMEM(
MI) &&
MI.mayLoad()) {
3595 for (
const MachineOperand &
Op :
MI.all_defs()) {
3596 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3600 VMemInvalidated =
true;
3605 if (VMemInvalidated && !TrackSimpleDSOpt && !TrackDSFlushPoint)
3616 if (IsDSRead || TrackDSFlushPoint) {
3617 for (
const MachineOperand &
Op :
MI.all_defs()) {
3618 if (!
TRI.isVectorRegister(MRI,
Op.getReg()))
3620 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3623 updateDSReadFlushTracking(RU);
3626 if (TrackDSFlushPoint)
3627 LastDSReadPositionMap[RU] = DSReadPosition;
3636 if (!VMemInvalidated && UsesVgprVMEMLoadedOutside &&
3637 ((!
ST.hasVscnt() && HasVMemStore && !HasVMemLoad) ||
3638 (HasVMemLoad &&
ST.hasVmemWriteVgprInOrder())))
3639 Flags.FlushVmCnt =
true;
3645 bool SimpleDSOpt = TrackSimpleDSOpt && UsesVgprDSReadOutside;
3648 bool HasUnflushedDSReads = DSReadPosition > LastDSFlushPosition;
3649 bool DSFlushPointPrefetch =
3650 TrackDSFlushPoint && UsesVgprDSReadOutside && HasUnflushedDSReads;
3652 if (SimpleDSOpt || DSFlushPointPrefetch)
3653 Flags.FlushDsCnt =
true;
3658bool SIInsertWaitcntsLegacy::runOnMachineFunction(MachineFunction &MF) {
3659 auto &MLI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
3661 getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
3663 if (
auto *AAR = getAnalysisIfAvailable<AAResultsWrapperPass>())
3664 AA = &AAR->getAAResults();
3666 return SIInsertWaitcnts(MLI, PDT, AA, MF).run();
3678 if (!SIInsertWaitcnts(MLI, PDT,
AA, MF).
run())
3683 .preserve<AAManager>();
3686bool SIInsertWaitcnts::run() {
3694 if (ST.hasExtendedWaitCounts()) {
3695 IsExpertMode = ST.hasExpertSchedulingMode() &&
3704 WCG = std::make_unique<WaitcntGeneratorGFX12Plus>(MF, MaxCounter, Limits,
3709 WCG = std::make_unique<WaitcntGeneratorPreGFX12>(
3713 SmemAccessCounter = getCounterFromEvent(SMEM_ACCESS);
3717 MachineBasicBlock &EntryBB = MF.
front();
3728 while (
I != EntryBB.
end() &&
I->isMetaInstruction())
3731 if (
ST.hasExtendedWaitCounts()) {
3740 if (!
ST.hasImageInsts() &&
3746 TII.get(instrsForExtendedCounterTypes[CT]))
3759 auto NonKernelInitialState = std::make_unique<WaitcntBrackets>(
this);
3760 NonKernelInitialState->setStateOnFunctionEntryOrReturn();
3761 BlockInfos[&EntryBB].Incoming = std::move(NonKernelInitialState);
3768 for (
auto *
MBB : ReversePostOrderTraversal<MachineFunction *>(&MF))
3771 std::unique_ptr<WaitcntBrackets> Brackets;
3776 for (
auto BII = BlockInfos.
begin(), BIE = BlockInfos.
end(); BII != BIE;
3778 MachineBasicBlock *
MBB = BII->first;
3779 BlockInfo &BI = BII->second;
3785 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
3787 *Brackets = *BI.Incoming;
3790 Brackets = std::make_unique<WaitcntBrackets>(
this);
3795 Brackets->~WaitcntBrackets();
3796 new (Brackets.get()) WaitcntBrackets(
this);
3800 if (
ST.hasWaitXcnt())
3802 Modified |= insertWaitcntInBlock(MF, *
MBB, *Brackets);
3805 if (Brackets->hasPendingEvent()) {
3806 BlockInfo *MoveBracketsToSucc =
nullptr;
3808 auto *SuccBII = BlockInfos.
find(Succ);
3809 BlockInfo &SuccBI = SuccBII->second;
3810 if (!SuccBI.Incoming) {
3811 SuccBI.Dirty =
true;
3812 if (SuccBII <= BII) {
3816 if (!MoveBracketsToSucc) {
3817 MoveBracketsToSucc = &SuccBI;
3819 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
3823 dbgs() <<
"Try to merge ";
3829 if (SuccBI.Incoming->merge(*Brackets)) {
3830 SuccBI.Dirty =
true;
3831 if (SuccBII <= BII) {
3838 if (MoveBracketsToSucc)
3839 MoveBracketsToSucc->Incoming = std::move(Brackets);
3844 if (
ST.hasScalarStores()) {
3845 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
3846 bool HaveScalarStores =
false;
3848 for (MachineBasicBlock &
MBB : MF) {
3849 for (MachineInstr &
MI :
MBB) {
3850 if (!HaveScalarStores &&
TII.isScalarStore(
MI))
3851 HaveScalarStores =
true;
3853 if (
MI.getOpcode() == AMDGPU::S_ENDPGM ||
3854 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
3859 if (HaveScalarStores) {
3868 for (MachineBasicBlock *
MBB : EndPgmBlocks) {
3869 bool SeenDCacheWB =
false;
3873 if (
I->getOpcode() == AMDGPU::S_DCACHE_WB)
3874 SeenDCacheWB =
true;
3875 else if (
TII.isScalarStore(*
I))
3876 SeenDCacheWB =
false;
3879 if ((
I->getOpcode() == AMDGPU::S_ENDPGM ||
3880 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
3896 while (
I != EntryBB.
end() &&
I->isMetaInstruction())
3898 setSchedulingMode(EntryBB,
I,
true);
3900 for (MachineInstr *
MI : CallInsts) {
3901 MachineBasicBlock &
MBB = *
MI->getParent();
3902 setSchedulingMode(
MBB,
MI,
false);
3903 setSchedulingMode(
MBB, std::next(
MI->getIterator()),
true);
3906 for (MachineInstr *
MI : ReturnInsts)
3907 setSchedulingMode(*
MI->getParent(),
MI,
false);
3918 for (
auto [
MI,
_] : EndPgmInsts) {
3920 TII.get(AMDGPU::S_ALLOC_VGPR))
3924 }
else if (!WCG->isOptNone() &&
3925 ST.getGeneration() >= AMDGPUSubtarget::GFX11 &&
3926 (MF.getFrameInfo().hasCalls() ||
3927 ST.getOccupancyWithNumVGPRs(
3928 TRI.getNumUsedPhysRegs(MRI, AMDGPU::VGPR_32RegClass),
3931 for (
auto [
MI, Flag] : EndPgmInsts) {
3933 if (
ST.requiresNopBeforeDeallocVGPRs()) {
3935 TII.get(AMDGPU::S_NOP))
3939 TII.get(AMDGPU::S_SENDMSG))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
static bool isOptNone(const MachineFunction &MF)
static LoopDeletionResult merge(LoopDeletionResult A, LoopDeletionResult B)
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
static bool isReg(const MCInst &MI, unsigned OpNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static cl::opt< bool > ForceEmitZeroLoadFlag("amdgpu-waitcnt-load-forcezero", cl::desc("Force all waitcnt load counters to wait until 0"), cl::init(false), cl::Hidden)
#define AMDGPU_EVENT_NAME(Name)
static bool updateOperandIfDifferent(MachineInstr &MI, AMDGPU::OpName OpName, unsigned NewEnc)
static std::optional< AMDGPU::InstCounterType > counterTypeForInstr(unsigned Opcode)
Determine if MI is a gfx12+ single-counter S_WAIT_*CNT instruction, and if so, which counter it is wa...
static bool isWaitInstr(MachineInstr &Inst)
static cl::opt< bool > ExpertSchedulingModeFlag("amdgpu-expert-scheduling-mode", cl::desc("Enable expert scheduling mode 2 for all functions (GFX12+ only)"), cl::init(false), cl::Hidden)
static cl::opt< bool > ForceEmitZeroFlag("amdgpu-waitcnt-forcezero", cl::desc("Force all waitcnt instrs to be emitted as " "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), cl::init(false), cl::Hidden)
#define AMDGPU_DECLARE_WAIT_EVENTS(DECL)
#define AMDGPU_EVENT_ENUM(Name)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Provides some synthesis utilities to produce sequences of values.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const uint32_t IV[8]
A manager for alias analyses.
bool isEntryFunction() const
Represents the counter values to wait for in an s_waitcnt instruction.
unsigned get(InstCounterType T) const
void set(InstCounterType T, unsigned Val)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
Represents analyses that only rely on functions' control flow.
static bool shouldExecute(CounterInfo &Counter)
static bool isCounterSet(CounterInfo &Info)
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
FunctionPass class - This class is used to implement most global optimizations.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LLVM_ABI const MachineBasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
Instructions::iterator instr_iterator
iterator_range< succ_iterator > successors()
LLVM_ABI void printName(raw_ostream &os, unsigned printNameFlags=PrintNameIr, ModuleSlotTracker *moduleSlotTracker=nullptr) const
Print the basic block's name as:
MachineInstrBundleIterator< MachineInstr > iterator
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool isCall(QueryType Type=AnyInBundle) const
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
LLVM_ABI void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool isMetaInstruction(QueryType Type=IgnoreBundle) const
Return true if this instruction doesn't produce any output in the form of executable instructions.
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
iterator find(const KeyT &Key)
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
static bool isCBranchVCCZRead(const MachineInstr &MI)
static bool isDS(const MachineInstr &MI)
static bool isVMEM(const MachineInstr &MI)
static bool isFLATScratch(const MachineInstr &MI)
static bool isEXP(const MachineInstr &MI)
static bool mayWriteLDSThroughDMA(const MachineInstr &MI)
static bool isLDSDIR(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
static bool isFLATGlobal(const MachineInstr &MI)
static bool isVSAMPLE(const MachineInstr &MI)
static bool isAtomicRet(const MachineInstr &MI)
static bool isImage(const MachineInstr &MI)
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static bool isVINTERP(const MachineInstr &MI)
static bool isGFX12CacheInvOrWBInst(unsigned Opc)
static bool isSBarrierSCCWrite(unsigned Opcode)
static bool isMIMG(const MachineInstr &MI)
static bool usesASYNC_CNT(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isLDSDMA(const MachineInstr &MI)
static bool isAtomicNoRet(const MachineInstr &MI)
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool isDynamicVGPREnabled() const
void push_back(const T &Elt)
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaVdst(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
@ ID_DEALLOC_VGPRS_GFX11Plus
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
bool isDPMACCInstruction(unsigned Opc)
iota_range< InstCounterType > inst_counter_types(InstCounterType MaxCounter)
unsigned encodeLoadcntDscnt(const IsaVersion &Version, const Waitcnt &Decoded)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded)
unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
unsigned encodeStorecntDscnt(const IsaVersion &Version, const Waitcnt &Decoded)
bool getMUBUFIsBufferInv(unsigned Opc)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
LLVM_ABI std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
APInt operator&(APInt a, const APInt &b)
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
static StringRef getCPU(StringRef CPU)
Processes a CPU name.
bool operator!=(uint64_t V1, const APInt &V2)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
char & SIInsertWaitcntsID
@ Async
"Asynchronous" unwind tables (instr precise)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
APInt operator|(APInt a, const APInt &b)
FunctionPass * createSIInsertWaitcntsPass()
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static constexpr ValueType Default
static constexpr uint64_t encode(Fields... Values)
Represents the hardware counter limits for different wait count types.
Instruction set architecture version.
static constexpr bool is_iterable