46#define DEBUG_TYPE "si-insert-waitcnts"
49 "Force emit s_waitcnt expcnt(0) instrs");
51 "Force emit s_waitcnt lgkmcnt(0) instrs");
53 "Force emit s_waitcnt vmcnt(0) instrs");
57 cl::desc(
"Force all waitcnt instrs to be emitted as "
58 "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
62 "amdgpu-waitcnt-load-forcezero",
63 cl::desc(
"Force all waitcnt load counters to wait until 0"),
67 "amdgpu-expert-scheduling-mode",
68 cl::desc(
"Enable expert scheduling mode 2 for all functions (GFX12+ only)"),
82 SAMPLE_CNT = NUM_NORMAL_INST_CNTS,
86 NUM_EXTENDED_INST_CNTS,
87 VA_VDST = NUM_EXTENDED_INST_CNTS,
90 NUM_INST_CNTS = NUM_EXPERT_INST_CNTS
104auto inst_counter_types(InstCounterType MaxCounter = NUM_INST_CNTS) {
105 return enum_seq(LOAD_CNT, MaxCounter);
152 TRACKINGID_RANGE_LEN = (1 << 16),
157 REGUNITS_END = REGUNITS_BEGIN + TRACKINGID_RANGE_LEN,
162 NUM_LDSDMA = TRACKINGID_RANGE_LEN,
163 LDSDMA_BEGIN = REGUNITS_END,
164 LDSDMA_END = LDSDMA_BEGIN + NUM_LDSDMA,
168static constexpr VMEMID toVMEMID(MCRegUnit RU) {
169 return static_cast<unsigned>(RU);
172#define AMDGPU_DECLARE_WAIT_EVENTS(DECL) \
174 DECL(VMEM_SAMPLER_READ_ACCESS) \
175 DECL(VMEM_BVH_READ_ACCESS) \
176 DECL(GLOBAL_INV_ACCESS) \
177 DECL(VMEM_WRITE_ACCESS) \
178 DECL(SCRATCH_WRITE_ACCESS) \
188 DECL(EXP_POS_ACCESS) \
189 DECL(EXP_PARAM_ACCESS) \
191 DECL(EXP_LDS_ACCESS) \
192 DECL(VGPR_CSMACC_WRITE) \
193 DECL(VGPR_DPMACC_WRITE) \
194 DECL(VGPR_TRANS_WRITE) \
195 DECL(VGPR_XDL_WRITE) \
196 DECL(VGPR_LDS_READ) \
197 DECL(VGPR_FLAT_READ) \
201#define AMDGPU_EVENT_ENUM(Name) Name,
206#undef AMDGPU_EVENT_ENUM
208#define AMDGPU_EVENT_NAME(Name) #Name,
212#undef AMDGPU_EVENT_NAME
233static const unsigned instrsForExtendedCounterTypes[NUM_EXTENDED_INST_CNTS] = {
234 AMDGPU::S_WAIT_LOADCNT, AMDGPU::S_WAIT_DSCNT, AMDGPU::S_WAIT_EXPCNT,
235 AMDGPU::S_WAIT_STORECNT, AMDGPU::S_WAIT_SAMPLECNT, AMDGPU::S_WAIT_BVHCNT,
236 AMDGPU::S_WAIT_KMCNT, AMDGPU::S_WAIT_XCNT};
244static bool isNormalMode(InstCounterType MaxCounter) {
245 return MaxCounter == NUM_NORMAL_INST_CNTS;
250 assert(updateVMCntOnly(Inst));
252 return VMEM_NOSAMPLER;
266 return VMEM_NOSAMPLER;
278 return Wait.StoreCnt;
280 return Wait.SampleCnt;
297 unsigned &WC = getCounterRef(
Wait,
T);
298 WC = std::min(WC,
Count);
302 getCounterRef(
Wait,
T) = ~0
u;
306 return getCounterRef(
Wait,
T);
310InstCounterType eventCounter(
const unsigned *masks, WaitEventType
E) {
311 for (
auto T : inst_counter_types()) {
312 if (masks[
T] & (1 <<
E))
318class WaitcntBrackets;
326class WaitcntGenerator {
328 const GCNSubtarget *ST =
nullptr;
329 const SIInstrInfo *TII =
nullptr;
330 AMDGPU::IsaVersion IV;
331 InstCounterType MaxCounter;
333 bool ExpandWaitcntProfiling =
false;
334 const AMDGPU::HardwareLimits *Limits =
nullptr;
337 WaitcntGenerator() =
default;
338 WaitcntGenerator(
const MachineFunction &MF, InstCounterType MaxCounter,
339 const AMDGPU::HardwareLimits *Limits)
340 : ST(&MF.getSubtarget<GCNSubtarget>()), TII(ST->getInstrInfo()),
344 ExpandWaitcntProfiling(
345 MF.
getFunction().hasFnAttribute(
"amdgpu-expand-waitcnt-profiling")),
350 bool isOptNone()
const {
return OptNone; }
352 const AMDGPU::HardwareLimits &getLimits()
const {
return *Limits; }
366 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
367 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
371 bool promoteSoftWaitCnt(MachineInstr *Waitcnt)
const;
376 virtual bool createNewWaitcnt(MachineBasicBlock &
Block,
378 AMDGPU::Waitcnt
Wait,
379 const WaitcntBrackets &ScoreBrackets) = 0;
383 virtual const unsigned *getWaitEventMask()
const = 0;
387 virtual AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const = 0;
389 virtual ~WaitcntGenerator() =
default;
392 static constexpr unsigned
393 eventMask(std::initializer_list<WaitEventType> Events) {
395 for (
auto &
E : Events)
402class WaitcntGeneratorPreGFX12 :
public WaitcntGenerator {
404 using WaitcntGenerator::WaitcntGenerator;
407 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
408 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
411 bool createNewWaitcnt(MachineBasicBlock &
Block,
413 AMDGPU::Waitcnt
Wait,
414 const WaitcntBrackets &ScoreBrackets)
override;
416 const unsigned *getWaitEventMask()
const override {
419 static const unsigned WaitEventMaskForInstPreGFX12[NUM_INST_CNTS] = {
421 {VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS}),
422 eventMask({SMEM_ACCESS, LDS_ACCESS, GDS_ACCESS, SQ_MESSAGE}),
423 eventMask({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK, EXP_PARAM_ACCESS,
424 EXP_POS_ACCESS, EXP_LDS_ACCESS}),
425 eventMask({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
433 return WaitEventMaskForInstPreGFX12;
436 AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const override;
439class WaitcntGeneratorGFX12Plus :
public WaitcntGenerator {
444 WaitcntGeneratorGFX12Plus() =
default;
445 WaitcntGeneratorGFX12Plus(
const MachineFunction &MF,
446 InstCounterType MaxCounter,
447 const AMDGPU::HardwareLimits *Limits,
449 : WaitcntGenerator(MF, MaxCounter, Limits), IsExpertMode(IsExpertMode) {}
452 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
453 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
456 bool createNewWaitcnt(MachineBasicBlock &
Block,
458 AMDGPU::Waitcnt
Wait,
459 const WaitcntBrackets &ScoreBrackets)
override;
461 const unsigned *getWaitEventMask()
const override {
464 static const unsigned WaitEventMaskForInstGFX12Plus[NUM_INST_CNTS] = {
465 eventMask({VMEM_ACCESS, GLOBAL_INV_ACCESS}),
466 eventMask({LDS_ACCESS, GDS_ACCESS}),
467 eventMask({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK, EXP_PARAM_ACCESS,
468 EXP_POS_ACCESS, EXP_LDS_ACCESS}),
469 eventMask({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
470 eventMask({VMEM_SAMPLER_READ_ACCESS}),
471 eventMask({VMEM_BVH_READ_ACCESS}),
472 eventMask({SMEM_ACCESS, SQ_MESSAGE, SCC_WRITE}),
473 eventMask({VMEM_GROUP, SMEM_GROUP}),
474 eventMask({VGPR_CSMACC_WRITE, VGPR_DPMACC_WRITE, VGPR_TRANS_WRITE,
476 eventMask({VGPR_LDS_READ, VGPR_FLAT_READ, VGPR_VMEM_READ})};
478 return WaitEventMaskForInstGFX12Plus;
481 AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const override;
485struct PreheaderFlushFlags {
486 bool FlushVmCnt =
false;
487 bool FlushDsCnt =
false;
490class SIInsertWaitcnts {
492 const GCNSubtarget *ST;
493 const SIInstrInfo *TII =
nullptr;
494 const SIRegisterInfo *TRI =
nullptr;
495 const MachineRegisterInfo *MRI =
nullptr;
496 InstCounterType SmemAccessCounter;
497 InstCounterType MaxCounter;
498 bool IsExpertMode =
false;
499 const unsigned *WaitEventMaskForInst;
502 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses;
503 DenseMap<MachineBasicBlock *, PreheaderFlushFlags> PreheadersToFlush;
504 MachineLoopInfo *MLI;
505 MachinePostDominatorTree *PDT;
509 std::unique_ptr<WaitcntBrackets> Incoming;
513 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos;
515 bool ForceEmitWaitcnt[NUM_INST_CNTS];
520 WaitcntGeneratorPreGFX12 WCGPreGFX12;
521 WaitcntGeneratorGFX12Plus WCGGFX12Plus;
523 WaitcntGenerator *WCG =
nullptr;
526 DenseSet<MachineInstr *> CallInsts;
527 DenseSet<MachineInstr *> ReturnInsts;
531 DenseSet<MachineInstr *> ReleaseVGPRInsts;
533 AMDGPU::HardwareLimits Limits;
536 SIInsertWaitcnts(MachineLoopInfo *MLI, MachinePostDominatorTree *PDT,
538 : MLI(MLI), PDT(PDT), AA(AA) {
539 (void)ForceExpCounter;
540 (void)ForceLgkmCounter;
541 (void)ForceVMCounter;
544 const AMDGPU::HardwareLimits &getLimits()
const {
return Limits; }
546 PreheaderFlushFlags getPreheaderFlushFlags(MachineLoop *
ML,
547 const WaitcntBrackets &Brackets);
548 PreheaderFlushFlags isPreheaderToFlush(MachineBasicBlock &
MBB,
549 const WaitcntBrackets &ScoreBrackets);
550 bool isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const;
551 bool isDSRead(
const MachineInstr &
MI)
const;
552 bool mayStoreIncrementingDSCNT(
const MachineInstr &
MI)
const;
553 bool run(MachineFunction &MF);
555 void setForceEmitWaitcnt() {
561 ForceEmitWaitcnt[
EXP_CNT] =
true;
563 ForceEmitWaitcnt[
EXP_CNT] =
false;
568 ForceEmitWaitcnt[DS_CNT] =
true;
569 ForceEmitWaitcnt[KM_CNT] =
true;
571 ForceEmitWaitcnt[DS_CNT] =
false;
572 ForceEmitWaitcnt[KM_CNT] =
false;
577 ForceEmitWaitcnt[LOAD_CNT] =
true;
578 ForceEmitWaitcnt[SAMPLE_CNT] =
true;
579 ForceEmitWaitcnt[BVH_CNT] =
true;
581 ForceEmitWaitcnt[LOAD_CNT] =
false;
582 ForceEmitWaitcnt[SAMPLE_CNT] =
false;
583 ForceEmitWaitcnt[BVH_CNT] =
false;
586 ForceEmitWaitcnt[VA_VDST] =
false;
587 ForceEmitWaitcnt[VM_VSRC] =
false;
593 WaitEventType getVmemWaitEventType(
const MachineInstr &Inst)
const {
596 case AMDGPU::GLOBAL_INV:
597 return GLOBAL_INV_ACCESS;
599 case AMDGPU::GLOBAL_WB:
600 case AMDGPU::GLOBAL_WBINV:
601 return VMEM_WRITE_ACCESS;
607 static const WaitEventType VmemReadMapping[NUM_VMEM_TYPES] = {
608 VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS};
617 if (TII->mayAccessScratch(Inst))
618 return SCRATCH_WRITE_ACCESS;
619 return VMEM_WRITE_ACCESS;
623 return VmemReadMapping[getVmemType(Inst)];
626 std::optional<WaitEventType>
627 getExpertSchedulingEventType(
const MachineInstr &Inst)
const;
629 bool isVmemAccess(
const MachineInstr &
MI)
const;
630 bool generateWaitcntInstBefore(MachineInstr &
MI,
631 WaitcntBrackets &ScoreBrackets,
632 MachineInstr *OldWaitcntInstr,
633 PreheaderFlushFlags FlushFlags);
634 bool generateWaitcnt(AMDGPU::Waitcnt
Wait,
636 MachineBasicBlock &
Block, WaitcntBrackets &ScoreBrackets,
637 MachineInstr *OldWaitcntInstr);
638 void updateEventWaitcntAfter(MachineInstr &Inst,
639 WaitcntBrackets *ScoreBrackets);
641 MachineBasicBlock *
Block)
const;
642 bool insertForcedWaitAfter(MachineInstr &Inst, MachineBasicBlock &
Block,
643 WaitcntBrackets &ScoreBrackets);
644 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &
Block,
645 WaitcntBrackets &ScoreBrackets);
647 bool ExpertMode)
const;
658class WaitcntBrackets {
660 WaitcntBrackets(
const SIInsertWaitcnts *Context) : Context(Context) {
661 assert(Context->TRI->getNumRegUnits() < REGUNITS_END);
666 unsigned NumUnusedVmem = 0, NumUnusedSGPRs = 0;
667 for (
auto &[
ID, Val] : VMem) {
671 for (
auto &[
ID, Val] : SGPRs) {
676 if (NumUnusedVmem || NumUnusedSGPRs) {
677 errs() <<
"WaitcntBracket had unused entries at destruction time: "
678 << NumUnusedVmem <<
" VMem and " << NumUnusedSGPRs
679 <<
" SGPR unused entries\n";
685 bool isSmemCounter(InstCounterType
T)
const {
686 return T == Context->SmemAccessCounter ||
T == X_CNT;
689 unsigned getSgprScoresIdx(InstCounterType
T)
const {
690 assert(isSmemCounter(
T) &&
"Invalid SMEM counter");
691 return T == X_CNT ? 1 : 0;
694 unsigned getScoreLB(InstCounterType
T)
const {
699 unsigned getScoreUB(InstCounterType
T)
const {
704 unsigned getScoreRange(InstCounterType
T)
const {
705 return getScoreUB(
T) - getScoreLB(
T);
708 unsigned getSGPRScore(MCRegUnit RU, InstCounterType
T)
const {
709 auto It = SGPRs.find(RU);
710 return It != SGPRs.end() ? It->second.Scores[getSgprScoresIdx(
T)] : 0;
713 unsigned getVMemScore(VMEMID TID, InstCounterType
T)
const {
714 auto It = VMem.find(TID);
715 return It != VMem.end() ? It->second.Scores[
T] : 0;
720 bool counterOutOfOrder(InstCounterType
T)
const;
721 void simplifyWaitcnt(AMDGPU::Waitcnt &
Wait)
const {
724 void simplifyWaitcnt(
const AMDGPU::Waitcnt &CheckWait,
725 AMDGPU::Waitcnt &UpdateWait)
const;
726 void simplifyWaitcnt(InstCounterType
T,
unsigned &
Count)
const;
727 void simplifyXcnt(
const AMDGPU::Waitcnt &CheckWait,
728 AMDGPU::Waitcnt &UpdateWait)
const;
729 void simplifyVmVsrc(
const AMDGPU::Waitcnt &CheckWait,
730 AMDGPU::Waitcnt &UpdateWait)
const;
732 void determineWaitForPhysReg(InstCounterType
T,
MCPhysReg Reg,
733 AMDGPU::Waitcnt &
Wait)
const;
734 void determineWaitForLDSDMA(InstCounterType
T, VMEMID TID,
735 AMDGPU::Waitcnt &
Wait)
const;
736 void tryClearSCCWriteEvent(MachineInstr *Inst);
738 void applyWaitcnt(
const AMDGPU::Waitcnt &
Wait);
739 void applyWaitcnt(InstCounterType
T,
unsigned Count);
740 void updateByEvent(WaitEventType
E, MachineInstr &
MI);
742 unsigned hasPendingEvent()
const {
return PendingEvents; }
743 unsigned hasPendingEvent(WaitEventType
E)
const {
744 return PendingEvents & (1 <<
E);
746 unsigned hasPendingEvent(InstCounterType
T)
const {
747 unsigned HasPending = PendingEvents & Context->WaitEventMaskForInst[
T];
748 assert((HasPending != 0) == (getScoreRange(
T) != 0));
752 bool hasMixedPendingEvents(InstCounterType
T)
const {
753 unsigned Events = hasPendingEvent(
T);
755 return Events & (Events - 1);
758 bool hasPendingFlat()
const {
759 return ((LastFlat[DS_CNT] > ScoreLBs[DS_CNT] &&
760 LastFlat[DS_CNT] <= ScoreUBs[DS_CNT]) ||
761 (LastFlat[LOAD_CNT] > ScoreLBs[LOAD_CNT] &&
762 LastFlat[LOAD_CNT] <= ScoreUBs[LOAD_CNT]));
765 void setPendingFlat() {
766 LastFlat[LOAD_CNT] = ScoreUBs[LOAD_CNT];
767 LastFlat[DS_CNT] = ScoreUBs[DS_CNT];
770 bool hasPendingGDS()
const {
771 return LastGDS > ScoreLBs[DS_CNT] && LastGDS <= ScoreUBs[DS_CNT];
774 unsigned getPendingGDSWait()
const {
775 return std::min(getScoreUB(DS_CNT) - LastGDS,
776 getWaitCountMax(Context->getLimits(), DS_CNT) - 1);
779 void setPendingGDS() { LastGDS = ScoreUBs[DS_CNT]; }
783 bool hasOtherPendingVmemTypes(
MCPhysReg Reg, VmemType V)
const {
784 for (MCRegUnit RU : regunits(
Reg)) {
785 auto It = VMem.find(toVMEMID(RU));
786 if (It != VMem.end() && (It->second.VMEMTypes & ~(1 << V)))
793 for (MCRegUnit RU : regunits(
Reg)) {
794 if (
auto It = VMem.find(toVMEMID(RU)); It != VMem.end()) {
795 It->second.VMEMTypes = 0;
796 if (It->second.empty())
802 void setStateOnFunctionEntryOrReturn() {
803 setScoreUB(STORE_CNT, getScoreUB(STORE_CNT) +
804 getWaitCountMax(Context->getLimits(), STORE_CNT));
805 PendingEvents |= Context->WaitEventMaskForInst[STORE_CNT];
808 ArrayRef<const MachineInstr *> getLDSDMAStores()
const {
812 bool hasPointSampleAccel(
const MachineInstr &
MI)
const;
813 bool hasPointSamplePendingVmemTypes(
const MachineInstr &
MI,
816 void print(raw_ostream &)
const;
821 void purgeEmptyTrackingData();
831 void determineWaitForScore(InstCounterType
T,
unsigned Score,
832 AMDGPU::Waitcnt &
Wait)
const;
834 static bool mergeScore(
const MergeInfo &M,
unsigned &Score,
835 unsigned OtherScore);
838 assert(
Reg != AMDGPU::SCC &&
"Shouldn't be used on SCC");
839 if (!Context->TRI->isInAllocatableClass(
Reg))
841 const TargetRegisterClass *RC = Context->TRI->getPhysRegBaseClass(
Reg);
842 unsigned Size = Context->TRI->getRegSizeInBits(*RC);
843 if (
Size == 16 && Context->ST->hasD16Writes32BitVgpr())
844 Reg = Context->TRI->get32BitRegister(
Reg);
845 return Context->TRI->regunits(
Reg);
848 void setScoreLB(InstCounterType
T,
unsigned Val) {
853 void setScoreUB(InstCounterType
T,
unsigned Val) {
860 if (getScoreRange(EXP_CNT) > getWaitCountMax(Context->getLimits(), EXP_CNT))
862 ScoreUBs[
EXP_CNT] - getWaitCountMax(Context->getLimits(), EXP_CNT);
865 void setRegScore(
MCPhysReg Reg, InstCounterType
T,
unsigned Val) {
866 const SIRegisterInfo *
TRI = Context->TRI;
867 if (
Reg == AMDGPU::SCC) {
869 }
else if (
TRI->isVectorRegister(*Context->MRI,
Reg)) {
870 for (MCRegUnit RU : regunits(
Reg))
871 VMem[toVMEMID(RU)].Scores[
T] = Val;
872 }
else if (
TRI->isSGPRReg(*Context->MRI,
Reg)) {
873 auto STy = getSgprScoresIdx(
T);
874 for (MCRegUnit RU : regunits(
Reg))
875 SGPRs[RU].Scores[STy] = Val;
881 void setVMemScore(VMEMID TID, InstCounterType
T,
unsigned Val) {
882 VMem[TID].Scores[
T] = Val;
885 void setScoreByOperand(
const MachineOperand &
Op, InstCounterType CntTy,
888 const SIInsertWaitcnts *Context;
890 unsigned ScoreLBs[NUM_INST_CNTS] = {0};
891 unsigned ScoreUBs[NUM_INST_CNTS] = {0};
892 unsigned PendingEvents = 0;
894 unsigned LastFlat[NUM_INST_CNTS] = {0};
896 unsigned LastGDS = 0;
913 std::array<unsigned, NUM_INST_CNTS> Scores = {0};
915 unsigned VMEMTypes = 0;
925 std::array<unsigned, 2> Scores = {0};
927 bool empty()
const {
return !Scores[0] && !Scores[1]; }
930 DenseMap<VMEMID, VMEMInfo> VMem;
931 DenseMap<MCRegUnit, SGPRInfo> SGPRs;
934 unsigned SCCScore = 0;
936 const MachineInstr *PendingSCCWrite =
nullptr;
940 SmallVector<const MachineInstr *> LDSDMAStores;
946 SIInsertWaitcntsLegacy() : MachineFunctionPass(ID) {}
948 bool runOnMachineFunction(MachineFunction &MF)
override;
950 StringRef getPassName()
const override {
951 return "SI insert wait instructions";
954 void getAnalysisUsage(AnalysisUsage &AU)
const override {
957 AU.
addRequired<MachinePostDominatorTreeWrapperPass>();
967 InstCounterType CntTy,
unsigned Score) {
968 setRegScore(
Op.getReg().asMCReg(), CntTy, Score);
976bool WaitcntBrackets::hasPointSampleAccel(
const MachineInstr &
MI)
const {
981 const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo =
991bool WaitcntBrackets::hasPointSamplePendingVmemTypes(
const MachineInstr &
MI,
993 if (!hasPointSampleAccel(
MI))
996 return hasOtherPendingVmemTypes(
Reg, VMEM_NOSAMPLER);
999void WaitcntBrackets::updateByEvent(WaitEventType
E, MachineInstr &Inst) {
1000 InstCounterType
T = eventCounter(
Context->WaitEventMaskForInst,
E);
1003 unsigned UB = getScoreUB(
T);
1004 unsigned CurrScore = UB + 1;
1010 PendingEvents |= 1 <<
E;
1011 setScoreUB(
T, CurrScore);
1014 const MachineRegisterInfo *
MRI =
Context->MRI;
1023 if (
const auto *AddrOp =
TII->getNamedOperand(Inst, AMDGPU::OpName::addr))
1024 setScoreByOperand(*AddrOp, EXP_CNT, CurrScore);
1027 if (
const auto *Data0 =
1028 TII->getNamedOperand(Inst, AMDGPU::OpName::data0))
1029 setScoreByOperand(*Data0, EXP_CNT, CurrScore);
1030 if (
const auto *Data1 =
1031 TII->getNamedOperand(Inst, AMDGPU::OpName::data1))
1032 setScoreByOperand(*Data1, EXP_CNT, CurrScore);
1034 Inst.
getOpcode() != AMDGPU::DS_APPEND &&
1035 Inst.
getOpcode() != AMDGPU::DS_CONSUME &&
1036 Inst.
getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
1037 for (
const MachineOperand &
Op : Inst.
all_uses()) {
1038 if (
TRI->isVectorRegister(*
MRI,
Op.getReg()))
1039 setScoreByOperand(
Op, EXP_CNT, CurrScore);
1042 }
else if (
TII->isFLAT(Inst)) {
1044 setScoreByOperand(*
TII->getNamedOperand(Inst, AMDGPU::OpName::data),
1045 EXP_CNT, CurrScore);
1047 setScoreByOperand(*
TII->getNamedOperand(Inst, AMDGPU::OpName::data),
1048 EXP_CNT, CurrScore);
1050 }
else if (
TII->isMIMG(Inst)) {
1052 setScoreByOperand(Inst.
getOperand(0), EXP_CNT, CurrScore);
1054 setScoreByOperand(*
TII->getNamedOperand(Inst, AMDGPU::OpName::data),
1055 EXP_CNT, CurrScore);
1057 }
else if (
TII->isMTBUF(Inst)) {
1059 setScoreByOperand(Inst.
getOperand(0), EXP_CNT, CurrScore);
1060 }
else if (
TII->isMUBUF(Inst)) {
1062 setScoreByOperand(Inst.
getOperand(0), EXP_CNT, CurrScore);
1064 setScoreByOperand(*
TII->getNamedOperand(Inst, AMDGPU::OpName::data),
1065 EXP_CNT, CurrScore);
1067 }
else if (
TII->isLDSDIR(Inst)) {
1069 setScoreByOperand(*
TII->getNamedOperand(Inst, AMDGPU::OpName::vdst),
1070 EXP_CNT, CurrScore);
1072 if (
TII->isEXP(Inst)) {
1077 for (MachineOperand &DefMO : Inst.
all_defs()) {
1078 if (
TRI->isVGPR(*
MRI, DefMO.getReg())) {
1079 setScoreByOperand(DefMO, EXP_CNT, CurrScore);
1083 for (
const MachineOperand &
Op : Inst.
all_uses()) {
1084 if (
TRI->isVectorRegister(*
MRI,
Op.getReg()))
1085 setScoreByOperand(
Op, EXP_CNT, CurrScore);
1088 }
else if (
T == X_CNT) {
1089 WaitEventType OtherEvent =
E == SMEM_GROUP ? VMEM_GROUP : SMEM_GROUP;
1090 if (PendingEvents & (1 << OtherEvent)) {
1095 setScoreLB(
T, getScoreUB(
T) - 1);
1096 PendingEvents &= ~(1 << OtherEvent);
1098 for (
const MachineOperand &
Op : Inst.
all_uses())
1099 setScoreByOperand(
Op,
T, CurrScore);
1100 }
else if (
T == VA_VDST ||
T == VM_VSRC) {
1103 for (
const MachineOperand &
Op : Inst.
operands()) {
1104 if (!
Op.isReg() || (
T == VA_VDST &&
Op.isUse()) ||
1105 (
T == VM_VSRC &&
Op.isDef()))
1108 setScoreByOperand(
Op,
T, CurrScore);
1120 for (
const MachineOperand &
Op : Inst.
defs()) {
1121 if (
T == LOAD_CNT ||
T == SAMPLE_CNT ||
T == BVH_CNT) {
1122 if (!
TRI->isVectorRegister(*
MRI,
Op.getReg()))
1124 if (updateVMCntOnly(Inst)) {
1129 VmemType
V = getVmemType(Inst);
1130 unsigned char TypesMask = 1 <<
V;
1133 if (hasPointSampleAccel(Inst))
1134 TypesMask |= 1 << VMEM_NOSAMPLER;
1135 for (MCRegUnit RU : regunits(
Op.getReg().asMCReg()))
1136 VMem[toVMEMID(RU)].VMEMTypes |= TypesMask;
1139 setScoreByOperand(
Op,
T, CurrScore);
1142 (
TII->isDS(Inst) ||
TII->mayWriteLDSThroughDMA(Inst))) {
1151 if (!MemOp->isStore() ||
1156 auto AAI = MemOp->getAAInfo();
1162 if (!AAI || !AAI.Scope)
1164 for (
unsigned I = 0,
E = LDSDMAStores.
size();
I !=
E && !Slot; ++
I) {
1165 for (
const auto *MemOp : LDSDMAStores[
I]->memoperands()) {
1166 if (MemOp->isStore() && AAI == MemOp->getAAInfo()) {
1181 setVMemScore(LDSDMA_BEGIN,
T, CurrScore);
1182 if (Slot && Slot < NUM_LDSDMA)
1183 setVMemScore(LDSDMA_BEGIN + Slot,
T, CurrScore);
1187 setRegScore(AMDGPU::SCC,
T, CurrScore);
1188 PendingSCCWrite = &Inst;
1193void WaitcntBrackets::print(raw_ostream &OS)
const {
1197 for (
auto T : inst_counter_types(
Context->MaxCounter)) {
1198 unsigned SR = getScoreRange(
T);
1202 OS <<
" " << (
ST->hasExtendedWaitCounts() ?
"LOAD" :
"VM") <<
"_CNT("
1206 OS <<
" " << (
ST->hasExtendedWaitCounts() ?
"DS" :
"LGKM") <<
"_CNT("
1210 OS <<
" EXP_CNT(" << SR <<
"):";
1213 OS <<
" " << (
ST->hasExtendedWaitCounts() ?
"STORE" :
"VS") <<
"_CNT("
1217 OS <<
" SAMPLE_CNT(" << SR <<
"):";
1220 OS <<
" BVH_CNT(" << SR <<
"):";
1223 OS <<
" KM_CNT(" << SR <<
"):";
1226 OS <<
" X_CNT(" << SR <<
"):";
1229 OS <<
" VA_VDST(" << SR <<
"): ";
1232 OS <<
" VM_VSRC(" << SR <<
"): ";
1235 OS <<
" UNKNOWN(" << SR <<
"):";
1241 unsigned LB = getScoreLB(
T);
1244 sort(SortedVMEMIDs);
1246 for (
auto ID : SortedVMEMIDs) {
1247 unsigned RegScore = VMem.at(
ID).Scores[
T];
1250 unsigned RelScore = RegScore - LB - 1;
1251 if (
ID < REGUNITS_END) {
1252 OS <<
' ' << RelScore <<
":vRU" <<
ID;
1254 assert(
ID >= LDSDMA_BEGIN &&
ID < LDSDMA_END &&
1255 "Unhandled/unexpected ID value!");
1256 OS <<
' ' << RelScore <<
":LDSDMA" <<
ID;
1261 if (isSmemCounter(
T)) {
1263 sort(SortedSMEMIDs);
1264 for (
auto ID : SortedSMEMIDs) {
1265 unsigned RegScore = SGPRs.at(
ID).Scores[getSgprScoresIdx(
T)];
1268 unsigned RelScore = RegScore - LB - 1;
1269 OS <<
' ' << RelScore <<
":sRU" <<
static_cast<unsigned>(
ID);
1273 if (
T == KM_CNT && SCCScore > 0)
1274 OS <<
' ' << SCCScore <<
":scc";
1279 OS <<
"Pending Events: ";
1280 if (hasPendingEvent()) {
1282 for (
unsigned I = 0;
I != NUM_WAIT_EVENTS; ++
I) {
1283 if (hasPendingEvent((WaitEventType)
I)) {
1284 OS <<
LS << WaitEventTypeName[
I];
1297void WaitcntBrackets::simplifyWaitcnt(
const AMDGPU::Waitcnt &CheckWait,
1298 AMDGPU::Waitcnt &UpdateWait)
const {
1299 simplifyWaitcnt(LOAD_CNT, UpdateWait.
LoadCnt);
1300 simplifyWaitcnt(EXP_CNT, UpdateWait.
ExpCnt);
1301 simplifyWaitcnt(DS_CNT, UpdateWait.
DsCnt);
1302 simplifyWaitcnt(STORE_CNT, UpdateWait.
StoreCnt);
1303 simplifyWaitcnt(SAMPLE_CNT, UpdateWait.
SampleCnt);
1304 simplifyWaitcnt(BVH_CNT, UpdateWait.
BvhCnt);
1305 simplifyWaitcnt(KM_CNT, UpdateWait.
KmCnt);
1306 simplifyXcnt(CheckWait, UpdateWait);
1307 simplifyWaitcnt(VA_VDST, UpdateWait.
VaVdst);
1308 simplifyVmVsrc(CheckWait, UpdateWait);
1311void WaitcntBrackets::simplifyWaitcnt(InstCounterType
T,
1312 unsigned &
Count)
const {
1316 if (
Count >= getScoreRange(
T))
1320void WaitcntBrackets::simplifyVmVsrc(
const AMDGPU::Waitcnt &CheckWait,
1321 AMDGPU::Waitcnt &UpdateWait)
const {
1326 std::min({CheckWait.LoadCnt, CheckWait.StoreCnt, CheckWait.SampleCnt,
1327 CheckWait.BvhCnt, CheckWait.DsCnt}))
1329 simplifyWaitcnt(VM_VSRC, UpdateWait.
VmVsrc);
1332void WaitcntBrackets::purgeEmptyTrackingData() {
1343void WaitcntBrackets::determineWaitForScore(InstCounterType
T,
1344 unsigned ScoreToWait,
1345 AMDGPU::Waitcnt &
Wait)
const {
1346 const unsigned LB = getScoreLB(
T);
1347 const unsigned UB = getScoreUB(
T);
1350 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
1351 if ((
T == LOAD_CNT ||
T == DS_CNT) && hasPendingFlat() &&
1352 !
Context->ST->hasFlatLgkmVMemCountInOrder()) {
1356 addWait(
Wait,
T, 0);
1357 }
else if (counterOutOfOrder(
T)) {
1361 addWait(
Wait,
T, 0);
1365 unsigned NeededWait = std::min(
1366 UB - ScoreToWait, getWaitCountMax(
Context->getLimits(),
T) - 1);
1367 addWait(
Wait,
T, NeededWait);
1372void WaitcntBrackets::determineWaitForPhysReg(InstCounterType
T,
MCPhysReg Reg,
1373 AMDGPU::Waitcnt &
Wait)
const {
1374 if (
Reg == AMDGPU::SCC) {
1375 determineWaitForScore(
T, SCCScore,
Wait);
1378 for (MCRegUnit RU : regunits(
Reg))
1379 determineWaitForScore(
1380 T, IsVGPR ? getVMemScore(toVMEMID(RU),
T) : getSGPRScore(RU,
T),
1385void WaitcntBrackets::determineWaitForLDSDMA(InstCounterType
T, VMEMID TID,
1386 AMDGPU::Waitcnt &
Wait)
const {
1387 assert(TID >= LDSDMA_BEGIN && TID < LDSDMA_END);
1388 determineWaitForScore(
T, getVMemScore(TID,
T),
Wait);
1391void WaitcntBrackets::tryClearSCCWriteEvent(MachineInstr *Inst) {
1394 if (PendingSCCWrite &&
1395 PendingSCCWrite->
getOpcode() == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM &&
1397 unsigned SCC_WRITE_PendingEvent = 1 << SCC_WRITE;
1399 if ((PendingEvents &
Context->WaitEventMaskForInst[KM_CNT]) ==
1400 SCC_WRITE_PendingEvent) {
1401 setScoreLB(KM_CNT, getScoreUB(KM_CNT));
1404 PendingEvents &= ~SCC_WRITE_PendingEvent;
1405 PendingSCCWrite =
nullptr;
1409void WaitcntBrackets::applyWaitcnt(
const AMDGPU::Waitcnt &
Wait) {
1410 applyWaitcnt(LOAD_CNT,
Wait.LoadCnt);
1411 applyWaitcnt(EXP_CNT,
Wait.ExpCnt);
1412 applyWaitcnt(DS_CNT,
Wait.DsCnt);
1413 applyWaitcnt(STORE_CNT,
Wait.StoreCnt);
1414 applyWaitcnt(SAMPLE_CNT,
Wait.SampleCnt);
1415 applyWaitcnt(BVH_CNT,
Wait.BvhCnt);
1416 applyWaitcnt(KM_CNT,
Wait.KmCnt);
1417 applyWaitcnt(X_CNT,
Wait.XCnt);
1418 applyWaitcnt(VA_VDST,
Wait.VaVdst);
1419 applyWaitcnt(VM_VSRC,
Wait.VmVsrc);
1422void WaitcntBrackets::applyWaitcnt(InstCounterType
T,
unsigned Count) {
1423 const unsigned UB = getScoreUB(
T);
1427 if (counterOutOfOrder(
T))
1429 setScoreLB(
T, std::max(getScoreLB(
T), UB -
Count));
1432 PendingEvents &= ~Context->WaitEventMaskForInst[
T];
1435 if (
T == KM_CNT &&
Count == 0 && hasPendingEvent(SMEM_GROUP)) {
1436 if (!hasMixedPendingEvents(X_CNT))
1437 applyWaitcnt(X_CNT, 0);
1439 PendingEvents &= ~(1 << SMEM_GROUP);
1441 if (
T == LOAD_CNT && hasPendingEvent(VMEM_GROUP) &&
1442 !hasPendingEvent(STORE_CNT)) {
1443 if (!hasMixedPendingEvents(X_CNT))
1444 applyWaitcnt(X_CNT,
Count);
1445 else if (
Count == 0)
1446 PendingEvents &= ~(1 << VMEM_GROUP);
1450void WaitcntBrackets::simplifyXcnt(
const AMDGPU::Waitcnt &CheckWait,
1451 AMDGPU::Waitcnt &UpdateWait)
const {
1460 if (CheckWait.
KmCnt == 0 && hasPendingEvent(SMEM_GROUP))
1461 UpdateWait.
XCnt = ~0
u;
1465 if (CheckWait.
LoadCnt != ~0u && hasPendingEvent(VMEM_GROUP) &&
1466 !hasPendingEvent(STORE_CNT) && CheckWait.
XCnt >= CheckWait.
LoadCnt)
1467 UpdateWait.
XCnt = ~0
u;
1468 simplifyWaitcnt(X_CNT, UpdateWait.
XCnt);
1473bool WaitcntBrackets::counterOutOfOrder(InstCounterType
T)
const {
1475 if ((
T ==
Context->SmemAccessCounter && hasPendingEvent(SMEM_ACCESS)) ||
1476 (
T == X_CNT && hasPendingEvent(SMEM_GROUP)))
1482 if (
T == LOAD_CNT) {
1483 unsigned Events = hasPendingEvent(
T);
1486 Events &= ~(1 << GLOBAL_INV_ACCESS);
1489 return Events & (Events - 1);
1492 return hasMixedPendingEvents(
T);
1502char SIInsertWaitcntsLegacy::
ID = 0;
1507 return new SIInsertWaitcntsLegacy();
1512 int OpIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
OpName);
1517 if (NewEnc == MO.
getImm())
1528 case AMDGPU::S_WAIT_LOADCNT:
1530 case AMDGPU::S_WAIT_EXPCNT:
1532 case AMDGPU::S_WAIT_STORECNT:
1534 case AMDGPU::S_WAIT_SAMPLECNT:
1536 case AMDGPU::S_WAIT_BVHCNT:
1538 case AMDGPU::S_WAIT_DSCNT:
1540 case AMDGPU::S_WAIT_KMCNT:
1542 case AMDGPU::S_WAIT_XCNT:
1549bool WaitcntGenerator::promoteSoftWaitCnt(MachineInstr *Waitcnt)
const {
1563bool WaitcntGeneratorPreGFX12::applyPreexistingWaitcnt(
1564 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
1567 assert(isNormalMode(MaxCounter));
1570 MachineInstr *WaitcntInstr =
nullptr;
1571 MachineInstr *WaitcntVsCntInstr =
nullptr;
1574 dbgs() <<
"PreGFX12::applyPreexistingWaitcnt at: ";
1576 dbgs() <<
"end of block\n";
1584 if (
II.isMetaInstruction()) {
1590 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
1594 if (Opcode == AMDGPU::S_WAITCNT) {
1595 unsigned IEnc =
II.getOperand(0).getImm();
1598 ScoreBrackets.simplifyWaitcnt(OldWait);
1602 if (WaitcntInstr || (!
Wait.hasWaitExceptStoreCnt() && TrySimplify)) {
1603 II.eraseFromParent();
1607 }
else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
1610 <<
"Before: " <<
Wait <<
'\n';);
1611 ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, LDSDMA_BEGIN,
Wait);
1620 II.eraseFromParent();
1622 assert(Opcode == AMDGPU::S_WAITCNT_VSCNT);
1623 assert(
II.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1626 TII->getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1628 ScoreBrackets.simplifyWaitcnt(InstCounterType::STORE_CNT, OldVSCnt);
1629 Wait.StoreCnt = std::min(
Wait.StoreCnt, OldVSCnt);
1631 if (WaitcntVsCntInstr || (!
Wait.hasWaitStoreCnt() && TrySimplify)) {
1632 II.eraseFromParent();
1635 WaitcntVsCntInstr = &
II;
1642 Modified |= promoteSoftWaitCnt(WaitcntInstr);
1644 ScoreBrackets.applyWaitcnt(LOAD_CNT,
Wait.LoadCnt);
1645 ScoreBrackets.applyWaitcnt(EXP_CNT,
Wait.ExpCnt);
1646 ScoreBrackets.applyWaitcnt(DS_CNT,
Wait.DsCnt);
1651 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
1652 <<
"New Instr at block end: "
1653 << *WaitcntInstr <<
'\n'
1654 :
dbgs() <<
"applied pre-existing waitcnt\n"
1655 <<
"Old Instr: " << *It
1656 <<
"New Instr: " << *WaitcntInstr <<
'\n');
1659 if (WaitcntVsCntInstr) {
1661 AMDGPU::OpName::simm16,
Wait.StoreCnt);
1662 Modified |= promoteSoftWaitCnt(WaitcntVsCntInstr);
1664 ScoreBrackets.applyWaitcnt(STORE_CNT,
Wait.StoreCnt);
1665 Wait.StoreCnt = ~0
u;
1668 ?
dbgs() <<
"applied pre-existing waitcnt\n"
1669 <<
"New Instr at block end: " << *WaitcntVsCntInstr
1671 :
dbgs() <<
"applied pre-existing waitcnt\n"
1672 <<
"Old Instr: " << *It
1673 <<
"New Instr: " << *WaitcntVsCntInstr <<
'\n');
1681bool WaitcntGeneratorPreGFX12::createNewWaitcnt(
1683 AMDGPU::Waitcnt
Wait,
const WaitcntBrackets &ScoreBrackets) {
1685 assert(isNormalMode(MaxCounter));
1693 auto EmitExpandedWaitcnt = [&](
unsigned Outstanding,
unsigned Target,
1696 EmitWaitcnt(--Outstanding);
1697 }
while (Outstanding > Target);
1703 if (
Wait.hasWaitExceptStoreCnt()) {
1705 if (ExpandWaitcntProfiling) {
1709 bool AnyOutOfOrder =
false;
1710 for (
auto CT : {LOAD_CNT, DS_CNT,
EXP_CNT}) {
1711 unsigned &WaitCnt = getCounterRef(
Wait, CT);
1712 if (WaitCnt != ~0u && ScoreBrackets.counterOutOfOrder(CT)) {
1713 AnyOutOfOrder =
true;
1718 if (AnyOutOfOrder) {
1725 for (
auto CT : {LOAD_CNT, DS_CNT,
EXP_CNT}) {
1726 unsigned &WaitCnt = getCounterRef(
Wait, CT);
1730 unsigned Outstanding = std::min(ScoreBrackets.getScoreUB(CT) -
1731 ScoreBrackets.getScoreLB(CT),
1732 getWaitCountMax(getLimits(), CT) - 1);
1733 EmitExpandedWaitcnt(Outstanding, WaitCnt, [&](
unsigned Count) {
1735 getCounterRef(W, CT) =
Count;
1744 [[maybe_unused]]
auto SWaitInst =
1749 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1750 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1754 if (
Wait.hasWaitStoreCnt()) {
1757 if (ExpandWaitcntProfiling &&
Wait.StoreCnt != ~0u &&
1758 !ScoreBrackets.counterOutOfOrder(STORE_CNT)) {
1760 unsigned Outstanding =
1761 std::min(ScoreBrackets.getScoreUB(STORE_CNT) -
1762 ScoreBrackets.getScoreLB(STORE_CNT),
1763 getWaitCountMax(getLimits(), STORE_CNT) - 1);
1764 EmitExpandedWaitcnt(Outstanding,
Wait.StoreCnt, [&](
unsigned Count) {
1765 BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAITCNT_VSCNT))
1766 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1770 [[maybe_unused]]
auto SWaitInst =
1777 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1778 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1786WaitcntGeneratorPreGFX12::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
1787 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt &&
ST->hasVscnt() ? 0 : ~0u);
1791WaitcntGeneratorGFX12Plus::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
1792 unsigned ExpertVal = IsExpertMode ? 0 : ~0
u;
1793 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt ? 0 : ~0u, 0, 0, 0,
1794 ~0u , ExpertVal, ExpertVal);
1801bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt(
1802 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
1805 assert(!isNormalMode(MaxCounter));
1808 MachineInstr *CombinedLoadDsCntInstr =
nullptr;
1809 MachineInstr *CombinedStoreDsCntInstr =
nullptr;
1810 MachineInstr *WaitcntDepctrInstr =
nullptr;
1811 MachineInstr *WaitInstrs[NUM_EXTENDED_INST_CNTS] = {};
1814 dbgs() <<
"GFX12Plus::applyPreexistingWaitcnt at: ";
1816 dbgs() <<
"end of block\n";
1822 AMDGPU::Waitcnt RequiredWait;
1827 if (
II.isMetaInstruction()) {
1832 MachineInstr **UpdatableInstr;
1838 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
1842 if (Opcode == AMDGPU::S_WAITCNT)
1845 if (Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT) {
1847 TII->getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1852 RequiredWait = RequiredWait.combined(OldWait);
1853 UpdatableInstr = &CombinedLoadDsCntInstr;
1854 }
else if (Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT) {
1856 TII->getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1861 RequiredWait = RequiredWait.combined(OldWait);
1862 UpdatableInstr = &CombinedStoreDsCntInstr;
1863 }
else if (Opcode == AMDGPU::S_WAITCNT_DEPCTR) {
1865 TII->getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1866 AMDGPU::Waitcnt OldWait;
1870 ScoreBrackets.simplifyWaitcnt(OldWait);
1872 UpdatableInstr = &WaitcntDepctrInstr;
1873 }
else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
1876 II.eraseFromParent();
1882 TII->getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1884 addWait(
Wait, CT.value(), OldCnt);
1886 addWait(RequiredWait, CT.value(), OldCnt);
1887 UpdatableInstr = &WaitInstrs[CT.value()];
1891 if (!*UpdatableInstr) {
1892 *UpdatableInstr = &
II;
1893 }
else if (Opcode == AMDGPU::S_WAITCNT_DEPCTR) {
1900 unsigned Enc =
TII->getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1908 II.eraseFromParent();
1912 II.eraseFromParent();
1917 ScoreBrackets.simplifyWaitcnt(
Wait.combined(RequiredWait),
Wait);
1918 Wait =
Wait.combined(RequiredWait);
1920 if (CombinedLoadDsCntInstr) {
1933 if (
Wait.LoadCnt != ~0u &&
Wait.DsCnt != ~0u) {
1936 AMDGPU::OpName::simm16, NewEnc);
1937 Modified |= promoteSoftWaitCnt(CombinedLoadDsCntInstr);
1938 ScoreBrackets.applyWaitcnt(LOAD_CNT,
Wait.LoadCnt);
1939 ScoreBrackets.applyWaitcnt(DS_CNT,
Wait.DsCnt);
1943 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
1944 <<
"New Instr at block end: "
1945 << *CombinedLoadDsCntInstr <<
'\n'
1946 :
dbgs() <<
"applied pre-existing waitcnt\n"
1947 <<
"Old Instr: " << *It <<
"New Instr: "
1948 << *CombinedLoadDsCntInstr <<
'\n');
1955 if (CombinedStoreDsCntInstr) {
1957 if (
Wait.StoreCnt != ~0u &&
Wait.DsCnt != ~0u) {
1960 AMDGPU::OpName::simm16, NewEnc);
1961 Modified |= promoteSoftWaitCnt(CombinedStoreDsCntInstr);
1962 ScoreBrackets.applyWaitcnt(STORE_CNT,
Wait.StoreCnt);
1963 ScoreBrackets.applyWaitcnt(DS_CNT,
Wait.DsCnt);
1964 Wait.StoreCnt = ~0
u;
1967 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
1968 <<
"New Instr at block end: "
1969 << *CombinedStoreDsCntInstr <<
'\n'
1970 :
dbgs() <<
"applied pre-existing waitcnt\n"
1971 <<
"Old Instr: " << *It <<
"New Instr: "
1972 << *CombinedStoreDsCntInstr <<
'\n');
1985 if (
Wait.DsCnt != ~0u) {
1994 if (
Wait.LoadCnt != ~0u) {
1995 WaitsToErase.
push_back(&WaitInstrs[LOAD_CNT]);
1996 WaitsToErase.
push_back(&WaitInstrs[DS_CNT]);
1997 }
else if (
Wait.StoreCnt != ~0u) {
1998 WaitsToErase.
push_back(&WaitInstrs[STORE_CNT]);
1999 WaitsToErase.
push_back(&WaitInstrs[DS_CNT]);
2002 for (MachineInstr **WI : WaitsToErase) {
2006 (*WI)->eraseFromParent();
2012 for (
auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
2013 if (!WaitInstrs[CT])
2016 unsigned NewCnt = getWait(
Wait, CT);
2017 if (NewCnt != ~0u) {
2019 AMDGPU::OpName::simm16, NewCnt);
2020 Modified |= promoteSoftWaitCnt(WaitInstrs[CT]);
2022 ScoreBrackets.applyWaitcnt(CT, NewCnt);
2023 setNoWait(
Wait, CT);
2026 ?
dbgs() <<
"applied pre-existing waitcnt\n"
2027 <<
"New Instr at block end: " << *WaitInstrs[CT]
2029 :
dbgs() <<
"applied pre-existing waitcnt\n"
2030 <<
"Old Instr: " << *It
2031 <<
"New Instr: " << *WaitInstrs[CT] <<
'\n');
2038 if (WaitcntDepctrInstr) {
2042 TII->getNamedOperand(*WaitcntDepctrInstr, AMDGPU::OpName::simm16)
2047 ScoreBrackets.applyWaitcnt(VA_VDST,
Wait.VaVdst);
2048 ScoreBrackets.applyWaitcnt(VM_VSRC,
Wait.VmVsrc);
2057 AMDGPU::OpName::simm16, Enc);
2059 <<
"New Instr at block end: "
2060 << *WaitcntDepctrInstr <<
'\n'
2061 :
dbgs() <<
"applyPreexistingWaitcnt\n"
2062 <<
"Old Instr: " << *It <<
"New Instr: "
2063 << *WaitcntDepctrInstr <<
'\n');
2074bool WaitcntGeneratorGFX12Plus::createNewWaitcnt(
2076 AMDGPU::Waitcnt
Wait,
const WaitcntBrackets &ScoreBrackets) {
2078 assert(!isNormalMode(MaxCounter));
2084 auto EmitExpandedWaitcnt = [&](
unsigned Outstanding,
unsigned Target,
2086 for (
unsigned I = Outstanding - 1;
I >
Target &&
I != ~0
u; --
I)
2088 EmitWaitcnt(Target);
2094 if (ExpandWaitcntProfiling) {
2095 for (
auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
2101 if (ScoreBrackets.counterOutOfOrder(CT)) {
2108 unsigned Outstanding =
2109 std::min(ScoreBrackets.getScoreUB(CT) - ScoreBrackets.getScoreLB(CT),
2110 getWaitCountMax(getLimits(), CT) - 1);
2111 EmitExpandedWaitcnt(Outstanding,
Count, [&](
unsigned Val) {
2121 if (
Wait.DsCnt != ~0u) {
2122 MachineInstr *SWaitInst =
nullptr;
2124 if (
Wait.LoadCnt != ~0u) {
2132 }
else if (
Wait.StoreCnt != ~0u) {
2139 Wait.StoreCnt = ~0
u;
2147 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2148 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2155 for (
auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
2160 [[maybe_unused]]
auto SWaitInst =
2167 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2168 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2171 if (
Wait.hasWaitDepctr()) {
2176 [[maybe_unused]]
auto SWaitInst =
2182 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2183 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2202bool SIInsertWaitcnts::generateWaitcntInstBefore(
2203 MachineInstr &
MI, WaitcntBrackets &ScoreBrackets,
2204 MachineInstr *OldWaitcntInstr, PreheaderFlushFlags FlushFlags) {
2205 setForceEmitWaitcnt();
2209 AMDGPU::Waitcnt
Wait;
2210 const unsigned Opc =
MI.getOpcode();
2216 if (
Opc == AMDGPU::BUFFER_WBINVL1 ||
Opc == AMDGPU::BUFFER_WBINVL1_SC ||
2217 Opc == AMDGPU::BUFFER_WBINVL1_VOL ||
Opc == AMDGPU::BUFFER_GL0_INV ||
2218 Opc == AMDGPU::BUFFER_GL1_INV) {
2225 if (
Opc == AMDGPU::SI_RETURN_TO_EPILOG ||
Opc == AMDGPU::SI_RETURN ||
2226 Opc == AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN ||
2227 Opc == AMDGPU::S_SETPC_B64_return) {
2229 AMDGPU::Waitcnt AllZeroWait =
2230 WCG->getAllZeroWaitcnt(
false);
2235 if (
ST->hasExtendedWaitCounts() &&
2236 !ScoreBrackets.hasPendingEvent(VMEM_ACCESS))
2248 else if (
Opc == AMDGPU::S_ENDPGM ||
Opc == AMDGPU::S_ENDPGM_SAVED) {
2249 if (!WCG->isOptNone() &&
2250 (
MI.getMF()->getInfo<SIMachineFunctionInfo>()->isDynamicVGPREnabled() ||
2251 (
ST->getGeneration() >= AMDGPUSubtarget::GFX11 &&
2252 ScoreBrackets.getScoreRange(STORE_CNT) != 0 &&
2253 !ScoreBrackets.hasPendingEvent(SCRATCH_WRITE_ACCESS))))
2257 else if ((
Opc == AMDGPU::S_SENDMSG ||
Opc == AMDGPU::S_SENDMSGHALT) &&
2258 ST->hasLegacyGeometry() &&
2269 if (
MI.modifiesRegister(AMDGPU::EXEC,
TRI)) {
2272 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
2273 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
2274 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
2275 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
2282 if (
TII->isAlwaysGDS(
Opc) && ScoreBrackets.hasPendingGDS())
2283 addWait(
Wait, DS_CNT, ScoreBrackets.getPendingGDSWait());
2290 Wait = AMDGPU::Waitcnt();
2292 const MachineOperand &CallAddrOp =
TII->getCalleeOperand(
MI);
2293 if (CallAddrOp.
isReg()) {
2294 ScoreBrackets.determineWaitForPhysReg(
2297 if (
const auto *RtnAddrOp =
2298 TII->getNamedOperand(
MI, AMDGPU::OpName::dst)) {
2299 ScoreBrackets.determineWaitForPhysReg(
2300 SmemAccessCounter, RtnAddrOp->getReg().asMCReg(),
Wait);
2303 }
else if (
Opc == AMDGPU::S_BARRIER_WAIT) {
2304 ScoreBrackets.tryClearSCCWriteEvent(&
MI);
2320 for (
const MachineMemOperand *Memop :
MI.memoperands()) {
2321 const Value *Ptr = Memop->getValue();
2322 if (Memop->isStore()) {
2323 if (
auto It = SLoadAddresses.
find(Ptr); It != SLoadAddresses.
end()) {
2324 addWait(
Wait, SmemAccessCounter, 0);
2326 SLoadAddresses.
erase(It);
2329 unsigned AS = Memop->getAddrSpace();
2333 if (
TII->mayWriteLDSThroughDMA(
MI))
2337 unsigned TID = LDSDMA_BEGIN;
2338 if (Ptr && Memop->getAAInfo()) {
2339 const auto &LDSDMAStores = ScoreBrackets.getLDSDMAStores();
2340 for (
unsigned I = 0,
E = LDSDMAStores.size();
I !=
E; ++
I) {
2341 if (
MI.mayAlias(AA, *LDSDMAStores[
I],
true)) {
2342 if ((
I + 1) >= NUM_LDSDMA) {
2345 ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, TID,
Wait);
2349 ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, TID +
I + 1,
Wait);
2353 ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, TID,
Wait);
2355 if (Memop->isStore()) {
2356 ScoreBrackets.determineWaitForLDSDMA(EXP_CNT, TID,
Wait);
2361 for (
const MachineOperand &
Op :
MI.operands()) {
2366 if (
Op.isTied() &&
Op.isUse() &&
TII->doesNotReadTiedSource(
MI))
2371 const bool IsVGPR =
TRI->isVectorRegister(*
MRI,
Op.getReg());
2378 if (
Op.isImplicit() &&
MI.mayLoadOrStore())
2381 ScoreBrackets.determineWaitForPhysReg(VA_VDST,
Reg,
Wait);
2383 ScoreBrackets.determineWaitForPhysReg(VM_VSRC,
Reg,
Wait);
2390 if (
Op.isUse() || !updateVMCntOnly(
MI) ||
2391 ScoreBrackets.hasOtherPendingVmemTypes(
Reg, getVmemType(
MI)) ||
2392 ScoreBrackets.hasPointSamplePendingVmemTypes(
MI,
Reg) ||
2393 !
ST->hasVmemWriteVgprInOrder()) {
2394 ScoreBrackets.determineWaitForPhysReg(LOAD_CNT,
Reg,
Wait);
2395 ScoreBrackets.determineWaitForPhysReg(SAMPLE_CNT,
Reg,
Wait);
2396 ScoreBrackets.determineWaitForPhysReg(BVH_CNT,
Reg,
Wait);
2397 ScoreBrackets.clearVgprVmemTypes(
Reg);
2400 if (
Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) {
2401 ScoreBrackets.determineWaitForPhysReg(EXP_CNT,
Reg,
Wait);
2403 ScoreBrackets.determineWaitForPhysReg(DS_CNT,
Reg,
Wait);
2404 }
else if (
Op.getReg() == AMDGPU::SCC) {
2405 ScoreBrackets.determineWaitForPhysReg(KM_CNT,
Reg,
Wait);
2407 ScoreBrackets.determineWaitForPhysReg(SmemAccessCounter,
Reg,
Wait);
2410 if (
ST->hasWaitXCnt() &&
Op.isDef())
2411 ScoreBrackets.determineWaitForPhysReg(X_CNT,
Reg,
Wait);
2428 if (
Opc == AMDGPU::S_BARRIER && !
ST->hasAutoWaitcntBeforeBarrier() &&
2429 !
ST->supportsBackOffBarrier()) {
2430 Wait =
Wait.combined(WCG->getAllZeroWaitcnt(
true));
2437 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
2442 ScoreBrackets.simplifyWaitcnt(
Wait);
2448 if (
TII->isVALU(
MI))
2454 if (
Wait.XCnt != ~0u && isVmemAccess(
MI)) {
2455 ScoreBrackets.applyWaitcnt(X_CNT,
Wait.XCnt);
2462 Wait = WCG->getAllZeroWaitcnt(
false);
2464 if (ForceEmitWaitcnt[LOAD_CNT])
2466 if (ForceEmitWaitcnt[EXP_CNT])
2468 if (ForceEmitWaitcnt[DS_CNT])
2470 if (ForceEmitWaitcnt[SAMPLE_CNT])
2472 if (ForceEmitWaitcnt[BVH_CNT])
2474 if (ForceEmitWaitcnt[KM_CNT])
2476 if (ForceEmitWaitcnt[X_CNT])
2480 if (ForceEmitWaitcnt[VA_VDST])
2482 if (ForceEmitWaitcnt[VM_VSRC])
2486 if (FlushFlags.FlushVmCnt) {
2487 if (ScoreBrackets.hasPendingEvent(LOAD_CNT))
2489 if (ScoreBrackets.hasPendingEvent(SAMPLE_CNT))
2491 if (ScoreBrackets.hasPendingEvent(BVH_CNT))
2495 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(DS_CNT))
2501 return generateWaitcnt(
Wait,
MI.getIterator(), *
MI.getParent(), ScoreBrackets,
2505bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt
Wait,
2507 MachineBasicBlock &
Block,
2508 WaitcntBrackets &ScoreBrackets,
2509 MachineInstr *OldWaitcntInstr) {
2512 if (OldWaitcntInstr)
2516 WCG->applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr,
Wait, It);
2519 if (
Wait.ExpCnt != ~0u && It !=
Block.instr_end() &&
2521 MachineOperand *WaitExp =
2522 TII->getNamedOperand(*It, AMDGPU::OpName::waitexp);
2528 ScoreBrackets.applyWaitcnt(EXP_CNT,
Wait.ExpCnt);
2532 <<
"Update Instr: " << *It);
2535 if (WCG->createNewWaitcnt(
Block, It,
Wait, ScoreBrackets))
2540 ScoreBrackets.applyWaitcnt(
Wait);
2545std::optional<WaitEventType>
2546SIInsertWaitcnts::getExpertSchedulingEventType(
const MachineInstr &Inst)
const {
2547 if (
TII->isVALU(Inst)) {
2552 if (
TII->isXDL(Inst))
2553 return VGPR_XDL_WRITE;
2555 if (
TII->isTRANS(Inst))
2556 return VGPR_TRANS_WRITE;
2559 return VGPR_DPMACC_WRITE;
2561 return VGPR_CSMACC_WRITE;
2568 if (
TII->isFLAT(Inst))
2569 return VGPR_FLAT_READ;
2571 if (
TII->isDS(Inst))
2572 return VGPR_LDS_READ;
2574 if (
TII->isVMEM(Inst) ||
TII->isVIMAGE(Inst) ||
TII->isVSAMPLE(Inst))
2575 return VGPR_VMEM_READ;
2582bool SIInsertWaitcnts::isVmemAccess(
const MachineInstr &
MI)
const {
2583 return (
TII->isFLAT(
MI) &&
TII->mayAccessVMEMThroughFlat(
MI)) ||
2590 MachineBasicBlock *
Block)
const {
2591 auto BlockEnd =
Block->getParent()->end();
2592 auto BlockIter =
Block->getIterator();
2596 if (++BlockIter != BlockEnd) {
2597 It = BlockIter->instr_begin();
2604 if (!It->isMetaInstruction())
2612 return It->getOpcode() == AMDGPU::S_ENDPGM;
2616bool SIInsertWaitcnts::insertForcedWaitAfter(MachineInstr &Inst,
2617 MachineBasicBlock &
Block,
2618 WaitcntBrackets &ScoreBrackets) {
2619 AMDGPU::Waitcnt
Wait;
2620 bool NeedsEndPGMCheck =
false;
2628 NeedsEndPGMCheck =
true;
2631 ScoreBrackets.simplifyWaitcnt(
Wait);
2634 bool Result = generateWaitcnt(
Wait, SuccessorIt,
Block, ScoreBrackets,
2637 if (Result && NeedsEndPGMCheck && isNextENDPGM(SuccessorIt, &
Block)) {
2645void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
2646 WaitcntBrackets *ScoreBrackets) {
2654 bool IsVMEMAccess =
false;
2655 bool IsSMEMAccess =
false;
2658 if (
const auto ET = getExpertSchedulingEventType(Inst))
2659 ScoreBrackets->updateByEvent(*ET, Inst);
2662 if (
TII->isDS(Inst) &&
TII->usesLGKM_CNT(Inst)) {
2664 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
2665 ScoreBrackets->updateByEvent(GDS_ACCESS, Inst);
2666 ScoreBrackets->updateByEvent(GDS_GPR_LOCK, Inst);
2667 ScoreBrackets->setPendingGDS();
2669 ScoreBrackets->updateByEvent(LDS_ACCESS, Inst);
2671 }
else if (
TII->isFLAT(Inst)) {
2673 ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst);
2679 int FlatASCount = 0;
2681 if (
TII->mayAccessVMEMThroughFlat(Inst)) {
2683 IsVMEMAccess =
true;
2684 ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst);
2687 if (
TII->mayAccessLDSThroughFlat(Inst)) {
2689 ScoreBrackets->updateByEvent(LDS_ACCESS, Inst);
2698 ScoreBrackets->setPendingFlat();
2701 IsVMEMAccess =
true;
2702 ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst);
2704 if (
ST->vmemWriteNeedsExpWaitcnt() &&
2706 ScoreBrackets->updateByEvent(VMW_GPR_LOCK, Inst);
2708 }
else if (
TII->isSMRD(Inst)) {
2709 IsSMEMAccess =
true;
2710 ScoreBrackets->updateByEvent(SMEM_ACCESS, Inst);
2711 }
else if (Inst.
isCall()) {
2713 ScoreBrackets->applyWaitcnt(WCG->getAllZeroWaitcnt(
false));
2714 ScoreBrackets->setStateOnFunctionEntryOrReturn();
2716 ScoreBrackets->updateByEvent(EXP_LDS_ACCESS, Inst);
2717 }
else if (
TII->isVINTERP(Inst)) {
2718 int64_t
Imm =
TII->getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm();
2719 ScoreBrackets->applyWaitcnt(EXP_CNT, Imm);
2721 unsigned Imm =
TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
2723 ScoreBrackets->updateByEvent(EXP_PARAM_ACCESS, Inst);
2725 ScoreBrackets->updateByEvent(EXP_POS_ACCESS, Inst);
2727 ScoreBrackets->updateByEvent(EXP_GPR_LOCK, Inst);
2729 ScoreBrackets->updateByEvent(SCC_WRITE, Inst);
2732 case AMDGPU::S_SENDMSG:
2733 case AMDGPU::S_SENDMSG_RTN_B32:
2734 case AMDGPU::S_SENDMSG_RTN_B64:
2735 case AMDGPU::S_SENDMSGHALT:
2736 ScoreBrackets->updateByEvent(SQ_MESSAGE, Inst);
2738 case AMDGPU::S_MEMTIME:
2739 case AMDGPU::S_MEMREALTIME:
2740 case AMDGPU::S_GET_BARRIER_STATE_M0:
2741 case AMDGPU::S_GET_BARRIER_STATE_IMM:
2742 ScoreBrackets->updateByEvent(SMEM_ACCESS, Inst);
2747 if (!
ST->hasWaitXCnt())
2751 ScoreBrackets->updateByEvent(VMEM_GROUP, Inst);
2754 ScoreBrackets->updateByEvent(SMEM_GROUP, Inst);
2757bool WaitcntBrackets::mergeScore(
const MergeInfo &M,
unsigned &Score,
2758 unsigned OtherScore) {
2759 unsigned MyShifted = Score <=
M.OldLB ? 0 : Score +
M.MyShift;
2760 unsigned OtherShifted =
2761 OtherScore <=
M.OtherLB ? 0 : OtherScore +
M.OtherShift;
2762 Score = std::max(MyShifted, OtherShifted);
2763 return OtherShifted > MyShifted;
2771bool WaitcntBrackets::merge(
const WaitcntBrackets &
Other) {
2772 bool StrictDom =
false;
2776 for (
auto K :
Other.VMem.keys())
2777 VMem.try_emplace(K);
2778 for (
auto K :
Other.SGPRs.keys())
2779 SGPRs.try_emplace(K);
2781 for (
auto T : inst_counter_types(
Context->MaxCounter)) {
2783 const unsigned *WaitEventMaskForInst =
Context->WaitEventMaskForInst;
2784 const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[
T];
2785 const unsigned OtherEvents =
Other.PendingEvents & WaitEventMaskForInst[
T];
2786 if (OtherEvents & ~OldEvents)
2788 PendingEvents |= OtherEvents;
2791 const unsigned MyPending = ScoreUBs[
T] - ScoreLBs[
T];
2792 const unsigned OtherPending =
Other.ScoreUBs[
T] -
Other.ScoreLBs[
T];
2793 const unsigned NewUB = ScoreLBs[
T] + std::max(MyPending, OtherPending);
2794 if (NewUB < ScoreLBs[
T])
2798 M.OldLB = ScoreLBs[
T];
2799 M.OtherLB =
Other.ScoreLBs[
T];
2800 M.MyShift = NewUB - ScoreUBs[
T];
2801 M.OtherShift = NewUB -
Other.ScoreUBs[
T];
2803 ScoreUBs[
T] = NewUB;
2805 StrictDom |= mergeScore(M, LastFlat[
T],
Other.LastFlat[
T]);
2808 StrictDom |= mergeScore(M, LastGDS,
Other.LastGDS);
2811 StrictDom |= mergeScore(M, SCCScore,
Other.SCCScore);
2812 if (
Other.hasPendingEvent(SCC_WRITE)) {
2813 unsigned OldEventsHasSCCWrite = OldEvents & (1 << SCC_WRITE);
2814 if (!OldEventsHasSCCWrite) {
2815 PendingSCCWrite =
Other.PendingSCCWrite;
2816 }
else if (PendingSCCWrite !=
Other.PendingSCCWrite) {
2817 PendingSCCWrite =
nullptr;
2822 for (
auto &[RegID,
Info] : VMem)
2823 StrictDom |= mergeScore(M,
Info.Scores[
T],
Other.getVMemScore(RegID,
T));
2825 if (isSmemCounter(
T)) {
2826 unsigned Idx = getSgprScoresIdx(
T);
2827 for (
auto &[RegID,
Info] : SGPRs) {
2828 auto It =
Other.SGPRs.find(RegID);
2829 unsigned OtherScore =
2830 (It !=
Other.SGPRs.end()) ? It->second.Scores[Idx] : 0;
2831 StrictDom |= mergeScore(M,
Info.Scores[Idx], OtherScore);
2836 for (
auto &[TID,
Info] : VMem) {
2837 if (
auto It =
Other.VMem.find(TID); It !=
Other.VMem.end()) {
2838 unsigned char NewVmemTypes =
Info.VMEMTypes | It->second.VMEMTypes;
2839 StrictDom |= NewVmemTypes !=
Info.VMEMTypes;
2840 Info.VMEMTypes = NewVmemTypes;
2844 purgeEmptyTrackingData();
2850 return Opcode == AMDGPU::S_WAITCNT ||
2853 Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT ||
2854 Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT ||
2855 Opcode == AMDGPU::S_WAITCNT_lds_direct ||
2859void SIInsertWaitcnts::setSchedulingMode(MachineBasicBlock &
MBB,
2861 bool ExpertMode)
const {
2865 .
addImm(ExpertMode ? 2 : 0)
2870bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
2871 MachineBasicBlock &
Block,
2872 WaitcntBrackets &ScoreBrackets) {
2876 dbgs() <<
"*** Begin Block: ";
2878 ScoreBrackets.dump();
2884 bool VCCZCorrect =
true;
2885 if (
ST->hasReadVCCZBug()) {
2888 VCCZCorrect =
false;
2889 }
else if (!
ST->partialVCCWritesUpdateVCCZ()) {
2892 VCCZCorrect =
false;
2896 MachineInstr *OldWaitcntInstr =
nullptr;
2901 MachineInstr &Inst = *Iter;
2910 (IsExpertMode && Inst.
getOpcode() == AMDGPU::S_WAITCNT_DEPCTR)) {
2911 if (!OldWaitcntInstr)
2912 OldWaitcntInstr = &Inst;
2917 PreheaderFlushFlags FlushFlags;
2918 if (
Block.getFirstTerminator() == Inst)
2919 FlushFlags = isPreheaderToFlush(
Block, ScoreBrackets);
2922 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr,
2924 OldWaitcntInstr =
nullptr;
2930 if (
ST->hasReadVCCZBug() || !
ST->partialVCCWritesUpdateVCCZ()) {
2934 if (!
ST->partialVCCWritesUpdateVCCZ())
2935 VCCZCorrect =
false;
2944 if (
ST->hasReadVCCZBug() &&
2945 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
2948 VCCZCorrect =
false;
2956 if (
TII->isSMRD(Inst)) {
2957 for (
const MachineMemOperand *Memop : Inst.
memoperands()) {
2960 if (!Memop->isInvariant()) {
2961 const Value *Ptr = Memop->getValue();
2965 if (
ST->hasReadVCCZBug()) {
2967 VCCZCorrect =
false;
2971 updateEventWaitcntAfter(Inst, &ScoreBrackets);
2973 Modified |= insertForcedWaitAfter(Inst,
Block, ScoreBrackets);
2977 ScoreBrackets.dump();
2987 TII->get(
ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
2999 AMDGPU::Waitcnt
Wait;
3000 if (
Block.getFirstTerminator() ==
Block.end()) {
3001 PreheaderFlushFlags FlushFlags = isPreheaderToFlush(
Block, ScoreBrackets);
3002 if (FlushFlags.FlushVmCnt) {
3003 if (ScoreBrackets.hasPendingEvent(LOAD_CNT))
3005 if (ScoreBrackets.hasPendingEvent(SAMPLE_CNT))
3007 if (ScoreBrackets.hasPendingEvent(BVH_CNT))
3010 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(DS_CNT))
3019 dbgs() <<
"*** End Block: ";
3021 ScoreBrackets.dump();
3029SIInsertWaitcnts::isPreheaderToFlush(MachineBasicBlock &
MBB,
3030 const WaitcntBrackets &ScoreBrackets) {
3031 auto [Iterator, IsInserted] =
3034 return Iterator->second;
3038 return PreheaderFlushFlags();
3042 return PreheaderFlushFlags();
3045 Iterator->second = getPreheaderFlushFlags(Loop, ScoreBrackets);
3046 return Iterator->second;
3049 return PreheaderFlushFlags();
3052bool SIInsertWaitcnts::isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const {
3054 return TII->mayAccessVMEMThroughFlat(
MI);
3058bool SIInsertWaitcnts::isDSRead(
const MachineInstr &
MI)
const {
3064bool SIInsertWaitcnts::mayStoreIncrementingDSCNT(
const MachineInstr &
MI)
const {
3090SIInsertWaitcnts::getPreheaderFlushFlags(MachineLoop *
ML,
3091 const WaitcntBrackets &Brackets) {
3092 PreheaderFlushFlags
Flags;
3093 bool HasVMemLoad =
false;
3094 bool HasVMemStore =
false;
3095 bool SeenDSStoreInLoop =
false;
3096 bool UsesVgprLoadedOutsideVMEM =
false;
3097 bool UsesVgprLoadedOutsideDS =
false;
3098 bool VMemInvalidated =
false;
3100 bool DSInvalidated = !
ST->hasExtendedWaitCounts();
3101 DenseSet<MCRegUnit> VgprUse;
3102 DenseSet<MCRegUnit> VgprDefVMEM;
3103 DenseSet<MCRegUnit> VgprDefDS;
3105 for (MachineBasicBlock *
MBB :
ML->blocks()) {
3106 bool SeenDSStoreInCurrMBB =
false;
3107 for (MachineInstr &
MI : *
MBB) {
3108 if (isVMEMOrFlatVMEM(
MI)) {
3109 HasVMemLoad |=
MI.mayLoad();
3110 HasVMemStore |=
MI.mayStore();
3112 if (mayStoreIncrementingDSCNT(
MI))
3113 SeenDSStoreInCurrMBB =
true;
3118 if (
MI.getOpcode() == AMDGPU::S_BARRIER)
3119 SeenDSStoreInCurrMBB =
false;
3120 for (
const MachineOperand &
Op :
MI.all_uses()) {
3121 if (
Op.isDebug() || !
TRI->isVectorRegister(*
MRI,
Op.getReg()))
3124 for (MCRegUnit RU :
TRI->regunits(
Op.getReg().asMCReg())) {
3128 VMemInvalidated =
true;
3132 DSInvalidated =
true;
3135 if (VMemInvalidated && DSInvalidated)
3141 VMEMID
ID = toVMEMID(RU);
3142 bool HasPendingVMEM =
3143 Brackets.getVMemScore(
ID, LOAD_CNT) >
3144 Brackets.getScoreLB(LOAD_CNT) ||
3145 Brackets.getVMemScore(
ID, SAMPLE_CNT) >
3146 Brackets.getScoreLB(SAMPLE_CNT) ||
3147 Brackets.getVMemScore(
ID, BVH_CNT) > Brackets.getScoreLB(BVH_CNT);
3149 UsesVgprLoadedOutsideVMEM =
true;
3153 if (!HasPendingVMEM &&
3154 Brackets.getVMemScore(
ID, DS_CNT) > Brackets.getScoreLB(DS_CNT))
3155 UsesVgprLoadedOutsideDS =
true;
3160 if (isVMEMOrFlatVMEM(
MI) &&
MI.mayLoad()) {
3161 for (
const MachineOperand &
Op :
MI.all_defs()) {
3162 for (MCRegUnit RU :
TRI->regunits(
Op.getReg().asMCReg())) {
3166 VMemInvalidated =
true;
3171 if (VMemInvalidated && DSInvalidated)
3183 for (
const MachineOperand &
Op :
MI.all_defs()) {
3184 for (MCRegUnit RU :
TRI->regunits(
Op.getReg().asMCReg())) {
3191 SeenDSStoreInLoop |= SeenDSStoreInCurrMBB;
3195 if (!VMemInvalidated && UsesVgprLoadedOutsideVMEM &&
3196 ((!
ST->hasVscnt() && HasVMemStore && !HasVMemLoad) ||
3197 (HasVMemLoad &&
ST->hasVmemWriteVgprInOrder())))
3198 Flags.FlushVmCnt =
true;
3205 if (!DSInvalidated && !SeenDSStoreInLoop && UsesVgprLoadedOutsideDS)
3206 Flags.FlushDsCnt =
true;
3211bool SIInsertWaitcntsLegacy::runOnMachineFunction(MachineFunction &MF) {
3212 auto *MLI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
3214 &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
3216 if (
auto *AAR = getAnalysisIfAvailable<AAResultsWrapperPass>())
3217 AA = &AAR->getAAResults();
3219 return SIInsertWaitcnts(MLI, PDT, AA).run(MF);
3231 if (!SIInsertWaitcnts(MLI, PDT,
AA).
run(MF))
3236 .preserve<AAManager>();
3241 TII = ST->getInstrInfo();
3242 TRI = &
TII->getRegisterInfo();
3251 if (ST->hasExtendedWaitCounts()) {
3252 IsExpertMode = ST->hasExpertSchedulingMode() &&
3258 MaxCounter = IsExpertMode ? NUM_EXPERT_INST_CNTS : NUM_EXTENDED_INST_CNTS;
3260 WaitcntGeneratorGFX12Plus(MF, MaxCounter, &Limits, IsExpertMode);
3261 WCG = &WCGGFX12Plus;
3263 MaxCounter = NUM_NORMAL_INST_CNTS;
3264 WCGPreGFX12 = WaitcntGeneratorPreGFX12(MF, NUM_NORMAL_INST_CNTS, &Limits);
3268 for (
auto T : inst_counter_types())
3269 ForceEmitWaitcnt[
T] =
false;
3271 WaitEventMaskForInst = WCG->getWaitEventMask();
3273 SmemAccessCounter = eventCounter(WaitEventMaskForInst, SMEM_ACCESS);
3278 MachineBasicBlock &EntryBB = MF.
front();
3288 while (
I != EntryBB.
end() &&
I->isMetaInstruction())
3291 if (
ST->hasExtendedWaitCounts()) {
3294 for (
auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
3295 if (CT == LOAD_CNT || CT == DS_CNT || CT == STORE_CNT || CT == X_CNT)
3298 if (!
ST->hasImageInsts() &&
3299 (CT == EXP_CNT || CT == SAMPLE_CNT || CT == BVH_CNT))
3303 TII->get(instrsForExtendedCounterTypes[CT]))
3316 auto NonKernelInitialState = std::make_unique<WaitcntBrackets>(
this);
3317 NonKernelInitialState->setStateOnFunctionEntryOrReturn();
3318 BlockInfos[&EntryBB].Incoming = std::move(NonKernelInitialState);
3325 for (
auto *
MBB : ReversePostOrderTraversal<MachineFunction *>(&MF))
3328 std::unique_ptr<WaitcntBrackets> Brackets;
3333 for (
auto BII = BlockInfos.
begin(), BIE = BlockInfos.
end(); BII != BIE;
3335 MachineBasicBlock *
MBB = BII->first;
3336 BlockInfo &BI = BII->second;
3342 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
3344 *Brackets = *BI.Incoming;
3347 Brackets = std::make_unique<WaitcntBrackets>(
this);
3349 *Brackets = WaitcntBrackets(
this);
3352 Modified |= insertWaitcntInBlock(MF, *
MBB, *Brackets);
3355 if (Brackets->hasPendingEvent()) {
3356 BlockInfo *MoveBracketsToSucc =
nullptr;
3358 auto *SuccBII = BlockInfos.
find(Succ);
3359 BlockInfo &SuccBI = SuccBII->second;
3360 if (!SuccBI.Incoming) {
3361 SuccBI.Dirty =
true;
3362 if (SuccBII <= BII) {
3366 if (!MoveBracketsToSucc) {
3367 MoveBracketsToSucc = &SuccBI;
3369 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
3371 }
else if (SuccBI.Incoming->merge(*Brackets)) {
3372 SuccBI.Dirty =
true;
3373 if (SuccBII <= BII) {
3379 if (MoveBracketsToSucc)
3380 MoveBracketsToSucc->Incoming = std::move(Brackets);
3385 if (
ST->hasScalarStores()) {
3386 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
3387 bool HaveScalarStores =
false;
3389 for (MachineBasicBlock &
MBB : MF) {
3390 for (MachineInstr &
MI :
MBB) {
3391 if (!HaveScalarStores &&
TII->isScalarStore(
MI))
3392 HaveScalarStores =
true;
3394 if (
MI.getOpcode() == AMDGPU::S_ENDPGM ||
3395 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
3400 if (HaveScalarStores) {
3409 for (MachineBasicBlock *
MBB : EndPgmBlocks) {
3410 bool SeenDCacheWB =
false;
3414 if (
I->getOpcode() == AMDGPU::S_DCACHE_WB)
3415 SeenDCacheWB =
true;
3416 else if (
TII->isScalarStore(*
I))
3417 SeenDCacheWB =
false;
3420 if ((
I->getOpcode() == AMDGPU::S_ENDPGM ||
3421 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
3437 while (
I != EntryBB.
end() &&
I->isMetaInstruction())
3439 setSchedulingMode(EntryBB,
I,
true);
3441 for (MachineInstr *
MI : CallInsts) {
3442 MachineBasicBlock &
MBB = *
MI->getParent();
3443 setSchedulingMode(
MBB,
MI,
false);
3444 setSchedulingMode(
MBB, std::next(
MI->getIterator()),
true);
3447 for (MachineInstr *
MI : ReturnInsts)
3448 setSchedulingMode(*
MI->getParent(),
MI,
false);
3459 for (MachineInstr *
MI : ReleaseVGPRInsts) {
3461 TII->get(AMDGPU::S_ALLOC_VGPR))
3466 if (!ReleaseVGPRInsts.empty() &&
3467 (MF.getFrameInfo().hasCalls() ||
3468 ST->getOccupancyWithNumVGPRs(
3469 TRI->getNumUsedPhysRegs(*
MRI, AMDGPU::VGPR_32RegClass),
3472 for (MachineInstr *
MI : ReleaseVGPRInsts) {
3473 if (
ST->requiresNopBeforeDeallocVGPRs()) {
3475 TII->get(AMDGPU::S_NOP))
3479 TII->get(AMDGPU::S_SENDMSG))
3487 ReturnInsts.
clear();
3488 ReleaseVGPRInsts.clear();
3489 PreheadersToFlush.
clear();
3490 SLoadAddresses.
clear();
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
static bool isOptNone(const MachineFunction &MF)
static LoopDeletionResult merge(LoopDeletionResult A, LoopDeletionResult B)
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
static bool isReg(const MCInst &MI, unsigned OpNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static cl::opt< bool > ForceEmitZeroLoadFlag("amdgpu-waitcnt-load-forcezero", cl::desc("Force all waitcnt load counters to wait until 0"), cl::init(false), cl::Hidden)
#define AMDGPU_EVENT_NAME(Name)
static bool updateOperandIfDifferent(MachineInstr &MI, AMDGPU::OpName OpName, unsigned NewEnc)
static bool isWaitInstr(MachineInstr &Inst)
static std::optional< InstCounterType > counterTypeForInstr(unsigned Opcode)
Determine if MI is a gfx12+ single-counter S_WAIT_*CNT instruction, and if so, which counter it is wa...
static cl::opt< bool > ExpertSchedulingModeFlag("amdgpu-expert-scheduling-mode", cl::desc("Enable expert scheduling mode 2 for all functions (GFX12+ only)"), cl::init(false), cl::Hidden)
static cl::opt< bool > ForceEmitZeroFlag("amdgpu-waitcnt-forcezero", cl::desc("Force all waitcnt instrs to be emitted as " "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), cl::init(false), cl::Hidden)
#define AMDGPU_DECLARE_WAIT_EVENTS(DECL)
#define AMDGPU_EVENT_ENUM(Name)
Provides some synthesis utilities to produce sequences of values.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const uint32_t IV[8]
A manager for alias analyses.
bool isEntryFunction() const
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
Represents analyses that only rely on functions' control flow.
static bool shouldExecute(CounterInfo &Counter)
static bool isCounterSet(CounterInfo &Info)
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
FunctionPass class - This class is used to implement most global optimizations.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LLVM_ABI const MachineBasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
Instructions::iterator instr_iterator
iterator_range< succ_iterator > successors()
MachineInstrBundleIterator< MachineInstr > iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool isCall(QueryType Type=AnyInBundle) const
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
LLVM_ABI void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
bool isMetaInstruction(QueryType Type=IgnoreBundle) const
Return true if this instruction doesn't produce any output in the form of executable instructions.
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
iterator find(const KeyT &Key)
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
static bool isCBranchVCCZRead(const MachineInstr &MI)
static bool isDS(const MachineInstr &MI)
static bool isVMEM(const MachineInstr &MI)
static bool isFLATScratch(const MachineInstr &MI)
static bool isEXP(const MachineInstr &MI)
static bool mayWriteLDSThroughDMA(const MachineInstr &MI)
static bool isLDSDIR(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
static bool isFLATGlobal(const MachineInstr &MI)
static bool isVSAMPLE(const MachineInstr &MI)
static bool isAtomicRet(const MachineInstr &MI)
static bool isImage(const MachineInstr &MI)
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static bool isVINTERP(const MachineInstr &MI)
static bool isGFX12CacheInvOrWBInst(unsigned Opc)
static bool isSBarrierSCCWrite(unsigned Opcode)
static bool isMIMG(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isLDSDMA(const MachineInstr &MI)
static bool isAtomicNoRet(const MachineInstr &MI)
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool isDynamicVGPREnabled() const
void push_back(const T &Elt)
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaVdst(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
@ ID_DEALLOC_VGPRS_GFX11Plus
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isDPMACCInstruction(unsigned Opc)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool getMUBUFIsBufferInv(unsigned Opc)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Undef
Value of the register doesn't matter.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
static StringRef getCPU(StringRef CPU)
Processes a CPU name.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
char & SIInsertWaitcntsID
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
FunctionPass * createSIInsertWaitcntsPass()
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static constexpr ValueType Default
static constexpr uint64_t encode(Fields... Values)
Represents the hardware counter limits for different wait count types.
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.
static constexpr bool is_iterable