LLVM 23.0.0git
SIInsertWaitcnts.cpp
Go to the documentation of this file.
1//===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Insert wait instructions for memory reads and writes.
11///
12/// Memory reads and writes are issued asynchronously, so we need to insert
13/// S_WAITCNT instructions when we want to access any of their results or
14/// overwrite any register that's used asynchronously.
15///
16/// TODO: This pass currently keeps one timeline per hardware counter. A more
17/// finely-grained approach that keeps one timeline per event type could
18/// sometimes get away with generating weaker s_waitcnt instructions. For
19/// example, when both SMEM and LDS are in flight and we need to wait for
20/// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient,
21/// but the pass will currently generate a conservative lgkmcnt(0) because
22/// multiple event types are in flight.
23//
24//===----------------------------------------------------------------------===//
25
26#include "AMDGPU.h"
27#include "GCNSubtarget.h"
31#include "llvm/ADT/MapVector.h"
33#include "llvm/ADT/Sequence.h"
39#include "llvm/IR/Dominators.h"
43
44using namespace llvm;
45
46#define DEBUG_TYPE "si-insert-waitcnts"
47
48DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE "-forceexp",
49 "Force emit s_waitcnt expcnt(0) instrs");
50DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE "-forcelgkm",
51 "Force emit s_waitcnt lgkmcnt(0) instrs");
52DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE "-forcevm",
53 "Force emit s_waitcnt vmcnt(0) instrs");
54
55static cl::opt<bool>
56 ForceEmitZeroFlag("amdgpu-waitcnt-forcezero",
57 cl::desc("Force all waitcnt instrs to be emitted as "
58 "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
59 cl::init(false), cl::Hidden);
60
62 "amdgpu-waitcnt-load-forcezero",
63 cl::desc("Force all waitcnt load counters to wait until 0"),
64 cl::init(false), cl::Hidden);
65
67 "amdgpu-expert-scheduling-mode",
68 cl::desc("Enable expert scheduling mode 2 for all functions (GFX12+ only)"),
69 cl::init(false), cl::Hidden);
70
71namespace {
72// Class of object that encapsulates latest instruction counter score
73// associated with the operand. Used for determining whether
74// s_waitcnt instruction needs to be emitted.
75
76enum InstCounterType {
77 LOAD_CNT = 0, // VMcnt prior to gfx12.
78 DS_CNT, // LKGMcnt prior to gfx12.
79 EXP_CNT, //
80 STORE_CNT, // VScnt in gfx10/gfx11.
81 NUM_NORMAL_INST_CNTS,
82 SAMPLE_CNT = NUM_NORMAL_INST_CNTS, // gfx12+ only.
83 BVH_CNT, // gfx12+ only.
84 KM_CNT, // gfx12+ only.
85 X_CNT, // gfx1250.
86 NUM_EXTENDED_INST_CNTS,
87 VA_VDST = NUM_EXTENDED_INST_CNTS, // gfx12+ expert mode only.
88 VM_VSRC, // gfx12+ expert mode only.
89 NUM_EXPERT_INST_CNTS,
90 NUM_INST_CNTS = NUM_EXPERT_INST_CNTS
91};
92} // namespace
93
94namespace llvm {
95template <> struct enum_iteration_traits<InstCounterType> {
96 static constexpr bool is_iterable = true;
97};
98} // namespace llvm
99
100namespace {
101// Return an iterator over all counters between LOAD_CNT (the first counter)
102// and \c MaxCounter (exclusive, default value yields an enumeration over
103// all counters).
104auto inst_counter_types(InstCounterType MaxCounter = NUM_INST_CNTS) {
105 return enum_seq(LOAD_CNT, MaxCounter);
106}
107
108// Get the maximum wait count value for a given counter type.
109static unsigned getWaitCountMax(const AMDGPU::HardwareLimits &Limits,
110 InstCounterType T) {
111 switch (T) {
112 case LOAD_CNT:
113 return Limits.LoadcntMax;
114 case DS_CNT:
115 return Limits.DscntMax;
116 case EXP_CNT:
117 return Limits.ExpcntMax;
118 case STORE_CNT:
119 return Limits.StorecntMax;
120 case SAMPLE_CNT:
121 return Limits.SamplecntMax;
122 case BVH_CNT:
123 return Limits.BvhcntMax;
124 case KM_CNT:
125 return Limits.KmcntMax;
126 case X_CNT:
127 return Limits.XcntMax;
128 case VA_VDST:
129 return Limits.VaVdstMax;
130 case VM_VSRC:
131 return Limits.VmVsrcMax;
132 default:
133 return 0;
134 }
135}
136
137/// Integer IDs used to track vector memory locations we may have to wait on.
138/// Encoded as u16 chunks:
139///
140/// [0, REGUNITS_END ): MCRegUnit
141/// [LDSDMA_BEGIN, LDSDMA_END ) : LDS DMA IDs
142///
143/// NOTE: The choice of encoding these as "u16 chunks" is arbitrary.
144/// It gives (2 << 16) - 1 entries per category which is more than enough
145/// for all register units. MCPhysReg is u16 so we don't even support >u16
146/// physical register numbers at this time, let alone >u16 register units.
147/// In any case, an assertion in "WaitcntBrackets" ensures REGUNITS_END
148/// is enough for all register units.
149using VMEMID = uint32_t;
150
151enum : VMEMID {
152 TRACKINGID_RANGE_LEN = (1 << 16),
153
154 // Important: MCRegUnits must always be tracked starting from 0, as we
155 // need to be able to convert between a MCRegUnit and a VMEMID freely.
156 REGUNITS_BEGIN = 0,
157 REGUNITS_END = REGUNITS_BEGIN + TRACKINGID_RANGE_LEN,
158
159 // Note for LDSDMA: LDSDMA_BEGIN corresponds to the "common"
160 // entry, which is updated for all LDS DMA operations encountered.
161 // Specific LDS DMA IDs start at LDSDMA_BEGIN + 1.
162 NUM_LDSDMA = TRACKINGID_RANGE_LEN,
163 LDSDMA_BEGIN = REGUNITS_END,
164 LDSDMA_END = LDSDMA_BEGIN + NUM_LDSDMA,
165};
166
167/// Convert a MCRegUnit to a VMEMID.
168static constexpr VMEMID toVMEMID(MCRegUnit RU) {
169 return static_cast<unsigned>(RU);
170}
171
172#define AMDGPU_DECLARE_WAIT_EVENTS(DECL) \
173 DECL(VMEM_ACCESS) /* vmem read & write (pre-gfx10), vmem read (gfx10+) */ \
174 DECL(VMEM_SAMPLER_READ_ACCESS) /* vmem SAMPLER read (gfx12+ only) */ \
175 DECL(VMEM_BVH_READ_ACCESS) /* vmem BVH read (gfx12+ only) */ \
176 DECL(GLOBAL_INV_ACCESS) /* GLOBAL_INV (gfx12+ only) */ \
177 DECL(VMEM_WRITE_ACCESS) /* vmem write that is not scratch */ \
178 DECL(SCRATCH_WRITE_ACCESS) /* vmem write that may be scratch */ \
179 DECL(VMEM_GROUP) /* vmem group */ \
180 DECL(LDS_ACCESS) /* lds read & write */ \
181 DECL(GDS_ACCESS) /* gds read & write */ \
182 DECL(SQ_MESSAGE) /* send message */ \
183 DECL(SCC_WRITE) /* write to SCC from barrier */ \
184 DECL(SMEM_ACCESS) /* scalar-memory read & write */ \
185 DECL(SMEM_GROUP) /* scalar-memory group */ \
186 DECL(EXP_GPR_LOCK) /* export holding on its data src */ \
187 DECL(GDS_GPR_LOCK) /* GDS holding on its data and addr src */ \
188 DECL(EXP_POS_ACCESS) /* write to export position */ \
189 DECL(EXP_PARAM_ACCESS) /* write to export parameter */ \
190 DECL(VMW_GPR_LOCK) /* vmem write holding on its data src */ \
191 DECL(EXP_LDS_ACCESS) /* read by ldsdir counting as export */ \
192 DECL(VGPR_CSMACC_WRITE) /* write VGPR dest in Core/Side-MACC VALU */ \
193 DECL(VGPR_DPMACC_WRITE) /* write VGPR dest in DPMACC VALU */ \
194 DECL(VGPR_TRANS_WRITE) /* write VGPR dest in TRANS VALU */ \
195 DECL(VGPR_XDL_WRITE) /* write VGPR dest in XDL VALU */ \
196 DECL(VGPR_LDS_READ) /* read VGPR source in LDS */ \
197 DECL(VGPR_FLAT_READ) /* read VGPR source in FLAT */ \
198 DECL(VGPR_VMEM_READ) /* read VGPR source in other VMEM */
199
200// clang-format off
201#define AMDGPU_EVENT_ENUM(Name) Name,
202enum WaitEventType {
204 NUM_WAIT_EVENTS
205};
206#undef AMDGPU_EVENT_ENUM
207
208#define AMDGPU_EVENT_NAME(Name) #Name,
209static constexpr StringLiteral WaitEventTypeName[] = {
211};
212#undef AMDGPU_EVENT_NAME
213// clang-format on
214
215// Enumerate different types of result-returning VMEM operations. Although
216// s_waitcnt orders them all with a single vmcnt counter, in the absence of
217// s_waitcnt only instructions of the same VmemType are guaranteed to write
218// their results in order -- so there is no need to insert an s_waitcnt between
219// two instructions of the same type that write the same vgpr.
220enum VmemType {
221 // BUF instructions and MIMG instructions without a sampler.
222 VMEM_NOSAMPLER,
223 // MIMG instructions with a sampler.
224 VMEM_SAMPLER,
225 // BVH instructions
226 VMEM_BVH,
227 NUM_VMEM_TYPES
228};
229
230// Maps values of InstCounterType to the instruction that waits on that
231// counter. Only used if GCNSubtarget::hasExtendedWaitCounts()
232// returns true, and does not cover VA_VDST or VM_VSRC.
233static const unsigned instrsForExtendedCounterTypes[NUM_EXTENDED_INST_CNTS] = {
234 AMDGPU::S_WAIT_LOADCNT, AMDGPU::S_WAIT_DSCNT, AMDGPU::S_WAIT_EXPCNT,
235 AMDGPU::S_WAIT_STORECNT, AMDGPU::S_WAIT_SAMPLECNT, AMDGPU::S_WAIT_BVHCNT,
236 AMDGPU::S_WAIT_KMCNT, AMDGPU::S_WAIT_XCNT};
237
238static bool updateVMCntOnly(const MachineInstr &Inst) {
239 return (SIInstrInfo::isVMEM(Inst) && !SIInstrInfo::isFLAT(Inst)) ||
241}
242
243#ifndef NDEBUG
244static bool isNormalMode(InstCounterType MaxCounter) {
245 return MaxCounter == NUM_NORMAL_INST_CNTS;
246}
247#endif // NDEBUG
248
249VmemType getVmemType(const MachineInstr &Inst) {
250 assert(updateVMCntOnly(Inst));
251 if (!SIInstrInfo::isImage(Inst))
252 return VMEM_NOSAMPLER;
254 const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo =
256
257 if (BaseInfo->BVH)
258 return VMEM_BVH;
259
260 // We have to make an additional check for isVSAMPLE here since some
261 // instructions don't have a sampler, but are still classified as sampler
262 // instructions for the purposes of e.g. waitcnt.
263 if (BaseInfo->Sampler || BaseInfo->MSAA || SIInstrInfo::isVSAMPLE(Inst))
264 return VMEM_SAMPLER;
265
266 return VMEM_NOSAMPLER;
267}
268
269unsigned &getCounterRef(AMDGPU::Waitcnt &Wait, InstCounterType T) {
270 switch (T) {
271 case LOAD_CNT:
272 return Wait.LoadCnt;
273 case EXP_CNT:
274 return Wait.ExpCnt;
275 case DS_CNT:
276 return Wait.DsCnt;
277 case STORE_CNT:
278 return Wait.StoreCnt;
279 case SAMPLE_CNT:
280 return Wait.SampleCnt;
281 case BVH_CNT:
282 return Wait.BvhCnt;
283 case KM_CNT:
284 return Wait.KmCnt;
285 case X_CNT:
286 return Wait.XCnt;
287 case VA_VDST:
288 return Wait.VaVdst;
289 case VM_VSRC:
290 return Wait.VmVsrc;
291 default:
292 llvm_unreachable("bad InstCounterType");
293 }
294}
295
296void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) {
297 unsigned &WC = getCounterRef(Wait, T);
298 WC = std::min(WC, Count);
299}
300
301void setNoWait(AMDGPU::Waitcnt &Wait, InstCounterType T) {
302 getCounterRef(Wait, T) = ~0u;
303}
304
305unsigned getWait(AMDGPU::Waitcnt &Wait, InstCounterType T) {
306 return getCounterRef(Wait, T);
307}
308
309// Mapping from event to counter according to the table masks.
310InstCounterType eventCounter(const unsigned *masks, WaitEventType E) {
311 for (auto T : inst_counter_types()) {
312 if (masks[T] & (1 << E))
313 return T;
314 }
315 llvm_unreachable("event type has no associated counter");
316}
317
318class WaitcntBrackets;
319
320// This abstracts the logic for generating and updating S_WAIT* instructions
321// away from the analysis that determines where they are needed. This was
322// done because the set of counters and instructions for waiting on them
323// underwent a major shift with gfx12, sufficiently so that having this
324// abstraction allows the main analysis logic to be simpler than it would
325// otherwise have had to become.
326class WaitcntGenerator {
327protected:
328 const GCNSubtarget *ST = nullptr;
329 const SIInstrInfo *TII = nullptr;
330 AMDGPU::IsaVersion IV;
331 InstCounterType MaxCounter;
332 bool OptNone;
333 bool ExpandWaitcntProfiling = false;
334 const AMDGPU::HardwareLimits *Limits = nullptr;
335
336public:
337 WaitcntGenerator() = default;
338 WaitcntGenerator(const MachineFunction &MF, InstCounterType MaxCounter,
339 const AMDGPU::HardwareLimits *Limits)
340 : ST(&MF.getSubtarget<GCNSubtarget>()), TII(ST->getInstrInfo()),
341 IV(AMDGPU::getIsaVersion(ST->getCPU())), MaxCounter(MaxCounter),
342 OptNone(MF.getFunction().hasOptNone() ||
343 MF.getTarget().getOptLevel() == CodeGenOptLevel::None),
344 ExpandWaitcntProfiling(
345 MF.getFunction().hasFnAttribute("amdgpu-expand-waitcnt-profiling")),
346 Limits(Limits) {}
347
348 // Return true if the current function should be compiled with no
349 // optimization.
350 bool isOptNone() const { return OptNone; }
351
352 const AMDGPU::HardwareLimits &getLimits() const { return *Limits; }
353
354 // Edits an existing sequence of wait count instructions according
355 // to an incoming Waitcnt value, which is itself updated to reflect
356 // any new wait count instructions which may need to be generated by
357 // WaitcntGenerator::createNewWaitcnt(). It will return true if any edits
358 // were made.
359 //
360 // This editing will usually be merely updated operands, but it may also
361 // delete instructions if the incoming Wait value indicates they are not
362 // needed. It may also remove existing instructions for which a wait
363 // is needed if it can be determined that it is better to generate new
364 // instructions later, as can happen on gfx12.
365 virtual bool
366 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
367 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &Wait,
369
370 // Transform a soft waitcnt into a normal one.
371 bool promoteSoftWaitCnt(MachineInstr *Waitcnt) const;
372
373 // Generates new wait count instructions according to the value of
374 // Wait, returning true if any new instructions were created.
375 // ScoreBrackets is used for profiling expansion.
376 virtual bool createNewWaitcnt(MachineBasicBlock &Block,
378 AMDGPU::Waitcnt Wait,
379 const WaitcntBrackets &ScoreBrackets) = 0;
380
381 // Returns an array of bit masks which can be used to map values in
382 // WaitEventType to corresponding counter values in InstCounterType.
383 virtual const unsigned *getWaitEventMask() const = 0;
384
385 // Returns a new waitcnt with all counters except VScnt set to 0. If
386 // IncludeVSCnt is true, VScnt is set to 0, otherwise it is set to ~0u.
387 virtual AMDGPU::Waitcnt getAllZeroWaitcnt(bool IncludeVSCnt) const = 0;
388
389 virtual ~WaitcntGenerator() = default;
390
391 // Create a mask value from the initializer list of wait event types.
392 static constexpr unsigned
393 eventMask(std::initializer_list<WaitEventType> Events) {
394 unsigned Mask = 0;
395 for (auto &E : Events)
396 Mask |= 1 << E;
397
398 return Mask;
399 }
400};
401
402class WaitcntGeneratorPreGFX12 : public WaitcntGenerator {
403public:
404 using WaitcntGenerator::WaitcntGenerator;
405
406 bool
407 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
408 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &Wait,
409 MachineBasicBlock::instr_iterator It) const override;
410
411 bool createNewWaitcnt(MachineBasicBlock &Block,
413 AMDGPU::Waitcnt Wait,
414 const WaitcntBrackets &ScoreBrackets) override;
415
416 const unsigned *getWaitEventMask() const override {
417 assert(ST);
418
419 static const unsigned WaitEventMaskForInstPreGFX12[NUM_INST_CNTS] = {
420 eventMask(
421 {VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS}),
422 eventMask({SMEM_ACCESS, LDS_ACCESS, GDS_ACCESS, SQ_MESSAGE}),
423 eventMask({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK, EXP_PARAM_ACCESS,
424 EXP_POS_ACCESS, EXP_LDS_ACCESS}),
425 eventMask({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
426 0,
427 0,
428 0,
429 0,
430 0,
431 0};
432
433 return WaitEventMaskForInstPreGFX12;
434 }
435
436 AMDGPU::Waitcnt getAllZeroWaitcnt(bool IncludeVSCnt) const override;
437};
438
439class WaitcntGeneratorGFX12Plus : public WaitcntGenerator {
440protected:
441 bool IsExpertMode;
442
443public:
444 WaitcntGeneratorGFX12Plus() = default;
445 WaitcntGeneratorGFX12Plus(const MachineFunction &MF,
446 InstCounterType MaxCounter,
447 const AMDGPU::HardwareLimits *Limits,
448 bool IsExpertMode)
449 : WaitcntGenerator(MF, MaxCounter, Limits), IsExpertMode(IsExpertMode) {}
450
451 bool
452 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
453 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &Wait,
454 MachineBasicBlock::instr_iterator It) const override;
455
456 bool createNewWaitcnt(MachineBasicBlock &Block,
458 AMDGPU::Waitcnt Wait,
459 const WaitcntBrackets &ScoreBrackets) override;
460
461 const unsigned *getWaitEventMask() const override {
462 assert(ST);
463
464 static const unsigned WaitEventMaskForInstGFX12Plus[NUM_INST_CNTS] = {
465 eventMask({VMEM_ACCESS, GLOBAL_INV_ACCESS}),
466 eventMask({LDS_ACCESS, GDS_ACCESS}),
467 eventMask({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK, EXP_PARAM_ACCESS,
468 EXP_POS_ACCESS, EXP_LDS_ACCESS}),
469 eventMask({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
470 eventMask({VMEM_SAMPLER_READ_ACCESS}),
471 eventMask({VMEM_BVH_READ_ACCESS}),
472 eventMask({SMEM_ACCESS, SQ_MESSAGE, SCC_WRITE}),
473 eventMask({VMEM_GROUP, SMEM_GROUP}),
474 eventMask({VGPR_CSMACC_WRITE, VGPR_DPMACC_WRITE, VGPR_TRANS_WRITE,
475 VGPR_XDL_WRITE}),
476 eventMask({VGPR_LDS_READ, VGPR_FLAT_READ, VGPR_VMEM_READ})};
477
478 return WaitEventMaskForInstGFX12Plus;
479 }
480
481 AMDGPU::Waitcnt getAllZeroWaitcnt(bool IncludeVSCnt) const override;
482};
483
484// Flags indicating which counters should be flushed in a loop preheader.
485struct PreheaderFlushFlags {
486 bool FlushVmCnt = false;
487 bool FlushDsCnt = false;
488};
489
490class SIInsertWaitcnts {
491public:
492 const GCNSubtarget *ST;
493 const SIInstrInfo *TII = nullptr;
494 const SIRegisterInfo *TRI = nullptr;
495 const MachineRegisterInfo *MRI = nullptr;
496 InstCounterType SmemAccessCounter;
497 InstCounterType MaxCounter;
498 bool IsExpertMode = false;
499 const unsigned *WaitEventMaskForInst;
500
501private:
502 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses;
503 DenseMap<MachineBasicBlock *, PreheaderFlushFlags> PreheadersToFlush;
504 MachineLoopInfo *MLI;
505 MachinePostDominatorTree *PDT;
506 AliasAnalysis *AA = nullptr;
507
508 struct BlockInfo {
509 std::unique_ptr<WaitcntBrackets> Incoming;
510 bool Dirty = true;
511 };
512
513 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos;
514
515 bool ForceEmitWaitcnt[NUM_INST_CNTS];
516
517 // In any given run of this pass, WCG will point to one of these two
518 // generator objects, which must have been re-initialised before use
519 // from a value made using a subtarget constructor.
520 WaitcntGeneratorPreGFX12 WCGPreGFX12;
521 WaitcntGeneratorGFX12Plus WCGGFX12Plus;
522
523 WaitcntGenerator *WCG = nullptr;
524
525 // Remember call and return instructions in the function.
526 DenseSet<MachineInstr *> CallInsts;
527 DenseSet<MachineInstr *> ReturnInsts;
528
529 // S_ENDPGM instructions before which we should insert a DEALLOC_VGPRS
530 // message.
531 DenseSet<MachineInstr *> ReleaseVGPRInsts;
532
533 AMDGPU::HardwareLimits Limits;
534
535public:
536 SIInsertWaitcnts(MachineLoopInfo *MLI, MachinePostDominatorTree *PDT,
537 AliasAnalysis *AA)
538 : MLI(MLI), PDT(PDT), AA(AA) {
539 (void)ForceExpCounter;
540 (void)ForceLgkmCounter;
541 (void)ForceVMCounter;
542 }
543
544 const AMDGPU::HardwareLimits &getLimits() const { return Limits; }
545
546 PreheaderFlushFlags getPreheaderFlushFlags(MachineLoop *ML,
547 const WaitcntBrackets &Brackets);
548 PreheaderFlushFlags isPreheaderToFlush(MachineBasicBlock &MBB,
549 const WaitcntBrackets &ScoreBrackets);
550 bool isVMEMOrFlatVMEM(const MachineInstr &MI) const;
551 bool isDSRead(const MachineInstr &MI) const;
552 bool mayStoreIncrementingDSCNT(const MachineInstr &MI) const;
553 bool run(MachineFunction &MF);
554
555 void setForceEmitWaitcnt() {
556// For non-debug builds, ForceEmitWaitcnt has been initialized to false;
557// For debug builds, get the debug counter info and adjust if need be
558#ifndef NDEBUG
559 if (DebugCounter::isCounterSet(ForceExpCounter) &&
560 DebugCounter::shouldExecute(ForceExpCounter)) {
561 ForceEmitWaitcnt[EXP_CNT] = true;
562 } else {
563 ForceEmitWaitcnt[EXP_CNT] = false;
564 }
565
566 if (DebugCounter::isCounterSet(ForceLgkmCounter) &&
567 DebugCounter::shouldExecute(ForceLgkmCounter)) {
568 ForceEmitWaitcnt[DS_CNT] = true;
569 ForceEmitWaitcnt[KM_CNT] = true;
570 } else {
571 ForceEmitWaitcnt[DS_CNT] = false;
572 ForceEmitWaitcnt[KM_CNT] = false;
573 }
574
575 if (DebugCounter::isCounterSet(ForceVMCounter) &&
576 DebugCounter::shouldExecute(ForceVMCounter)) {
577 ForceEmitWaitcnt[LOAD_CNT] = true;
578 ForceEmitWaitcnt[SAMPLE_CNT] = true;
579 ForceEmitWaitcnt[BVH_CNT] = true;
580 } else {
581 ForceEmitWaitcnt[LOAD_CNT] = false;
582 ForceEmitWaitcnt[SAMPLE_CNT] = false;
583 ForceEmitWaitcnt[BVH_CNT] = false;
584 }
585
586 ForceEmitWaitcnt[VA_VDST] = false;
587 ForceEmitWaitcnt[VM_VSRC] = false;
588#endif // NDEBUG
589 }
590
591 // Return the appropriate VMEM_*_ACCESS type for Inst, which must be a VMEM
592 // instruction.
593 WaitEventType getVmemWaitEventType(const MachineInstr &Inst) const {
594 switch (Inst.getOpcode()) {
595 // FIXME: GLOBAL_INV needs to be tracked with xcnt too.
596 case AMDGPU::GLOBAL_INV:
597 return GLOBAL_INV_ACCESS; // tracked using loadcnt, but doesn't write
598 // VGPRs
599 case AMDGPU::GLOBAL_WB:
600 case AMDGPU::GLOBAL_WBINV:
601 return VMEM_WRITE_ACCESS; // tracked using storecnt
602 default:
603 break;
604 }
605
606 // Maps VMEM access types to their corresponding WaitEventType.
607 static const WaitEventType VmemReadMapping[NUM_VMEM_TYPES] = {
608 VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS};
609
611 // LDS DMA loads are also stores, but on the LDS side. On the VMEM side
612 // these should use VM_CNT.
613 if (!ST->hasVscnt() || SIInstrInfo::mayWriteLDSThroughDMA(Inst))
614 return VMEM_ACCESS;
615 if (Inst.mayStore() &&
616 (!Inst.mayLoad() || SIInstrInfo::isAtomicNoRet(Inst))) {
617 if (TII->mayAccessScratch(Inst))
618 return SCRATCH_WRITE_ACCESS;
619 return VMEM_WRITE_ACCESS;
620 }
621 if (!ST->hasExtendedWaitCounts() || SIInstrInfo::isFLAT(Inst))
622 return VMEM_ACCESS;
623 return VmemReadMapping[getVmemType(Inst)];
624 }
625
626 std::optional<WaitEventType>
627 getExpertSchedulingEventType(const MachineInstr &Inst) const;
628
629 bool isVmemAccess(const MachineInstr &MI) const;
630 bool generateWaitcntInstBefore(MachineInstr &MI,
631 WaitcntBrackets &ScoreBrackets,
632 MachineInstr *OldWaitcntInstr,
633 PreheaderFlushFlags FlushFlags);
634 bool generateWaitcnt(AMDGPU::Waitcnt Wait,
636 MachineBasicBlock &Block, WaitcntBrackets &ScoreBrackets,
637 MachineInstr *OldWaitcntInstr);
638 void updateEventWaitcntAfter(MachineInstr &Inst,
639 WaitcntBrackets *ScoreBrackets);
640 bool isNextENDPGM(MachineBasicBlock::instr_iterator It,
641 MachineBasicBlock *Block) const;
642 bool insertForcedWaitAfter(MachineInstr &Inst, MachineBasicBlock &Block,
643 WaitcntBrackets &ScoreBrackets);
644 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block,
645 WaitcntBrackets &ScoreBrackets);
646 void setSchedulingMode(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
647 bool ExpertMode) const;
648};
649
650// This objects maintains the current score brackets of each wait counter, and
651// a per-register scoreboard for each wait counter.
652//
653// We also maintain the latest score for every event type that can change the
654// waitcnt in order to know if there are multiple types of events within
655// the brackets. When multiple types of event happen in the bracket,
656// wait count may get decreased out of order, therefore we need to put in
657// "s_waitcnt 0" before use.
658class WaitcntBrackets {
659public:
660 WaitcntBrackets(const SIInsertWaitcnts *Context) : Context(Context) {
661 assert(Context->TRI->getNumRegUnits() < REGUNITS_END);
662 }
663
664#ifndef NDEBUG
665 ~WaitcntBrackets() {
666 unsigned NumUnusedVmem = 0, NumUnusedSGPRs = 0;
667 for (auto &[ID, Val] : VMem) {
668 if (Val.empty())
669 ++NumUnusedVmem;
670 }
671 for (auto &[ID, Val] : SGPRs) {
672 if (Val.empty())
673 ++NumUnusedSGPRs;
674 }
675
676 if (NumUnusedVmem || NumUnusedSGPRs) {
677 errs() << "WaitcntBracket had unused entries at destruction time: "
678 << NumUnusedVmem << " VMem and " << NumUnusedSGPRs
679 << " SGPR unused entries\n";
680 std::abort();
681 }
682 }
683#endif
684
685 bool isSmemCounter(InstCounterType T) const {
686 return T == Context->SmemAccessCounter || T == X_CNT;
687 }
688
689 unsigned getSgprScoresIdx(InstCounterType T) const {
690 assert(isSmemCounter(T) && "Invalid SMEM counter");
691 return T == X_CNT ? 1 : 0;
692 }
693
694 unsigned getScoreLB(InstCounterType T) const {
695 assert(T < NUM_INST_CNTS);
696 return ScoreLBs[T];
697 }
698
699 unsigned getScoreUB(InstCounterType T) const {
700 assert(T < NUM_INST_CNTS);
701 return ScoreUBs[T];
702 }
703
704 unsigned getScoreRange(InstCounterType T) const {
705 return getScoreUB(T) - getScoreLB(T);
706 }
707
708 unsigned getSGPRScore(MCRegUnit RU, InstCounterType T) const {
709 auto It = SGPRs.find(RU);
710 return It != SGPRs.end() ? It->second.Scores[getSgprScoresIdx(T)] : 0;
711 }
712
713 unsigned getVMemScore(VMEMID TID, InstCounterType T) const {
714 auto It = VMem.find(TID);
715 return It != VMem.end() ? It->second.Scores[T] : 0;
716 }
717
718 bool merge(const WaitcntBrackets &Other);
719
720 bool counterOutOfOrder(InstCounterType T) const;
721 void simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const {
722 simplifyWaitcnt(Wait, Wait);
723 }
724 void simplifyWaitcnt(const AMDGPU::Waitcnt &CheckWait,
725 AMDGPU::Waitcnt &UpdateWait) const;
726 void simplifyWaitcnt(InstCounterType T, unsigned &Count) const;
727 void simplifyXcnt(const AMDGPU::Waitcnt &CheckWait,
728 AMDGPU::Waitcnt &UpdateWait) const;
729 void simplifyVmVsrc(const AMDGPU::Waitcnt &CheckWait,
730 AMDGPU::Waitcnt &UpdateWait) const;
731
732 void determineWaitForPhysReg(InstCounterType T, MCPhysReg Reg,
733 AMDGPU::Waitcnt &Wait) const;
734 void determineWaitForLDSDMA(InstCounterType T, VMEMID TID,
735 AMDGPU::Waitcnt &Wait) const;
736 void tryClearSCCWriteEvent(MachineInstr *Inst);
737
738 void applyWaitcnt(const AMDGPU::Waitcnt &Wait);
739 void applyWaitcnt(InstCounterType T, unsigned Count);
740 void updateByEvent(WaitEventType E, MachineInstr &MI);
741
742 unsigned hasPendingEvent() const { return PendingEvents; }
743 unsigned hasPendingEvent(WaitEventType E) const {
744 return PendingEvents & (1 << E);
745 }
746 unsigned hasPendingEvent(InstCounterType T) const {
747 unsigned HasPending = PendingEvents & Context->WaitEventMaskForInst[T];
748 assert((HasPending != 0) == (getScoreRange(T) != 0));
749 return HasPending;
750 }
751
752 bool hasMixedPendingEvents(InstCounterType T) const {
753 unsigned Events = hasPendingEvent(T);
754 // Return true if more than one bit is set in Events.
755 return Events & (Events - 1);
756 }
757
758 bool hasPendingFlat() const {
759 return ((LastFlat[DS_CNT] > ScoreLBs[DS_CNT] &&
760 LastFlat[DS_CNT] <= ScoreUBs[DS_CNT]) ||
761 (LastFlat[LOAD_CNT] > ScoreLBs[LOAD_CNT] &&
762 LastFlat[LOAD_CNT] <= ScoreUBs[LOAD_CNT]));
763 }
764
765 void setPendingFlat() {
766 LastFlat[LOAD_CNT] = ScoreUBs[LOAD_CNT];
767 LastFlat[DS_CNT] = ScoreUBs[DS_CNT];
768 }
769
770 bool hasPendingGDS() const {
771 return LastGDS > ScoreLBs[DS_CNT] && LastGDS <= ScoreUBs[DS_CNT];
772 }
773
774 unsigned getPendingGDSWait() const {
775 return std::min(getScoreUB(DS_CNT) - LastGDS,
776 getWaitCountMax(Context->getLimits(), DS_CNT) - 1);
777 }
778
779 void setPendingGDS() { LastGDS = ScoreUBs[DS_CNT]; }
780
781 // Return true if there might be pending writes to the vgpr-interval by VMEM
782 // instructions with types different from V.
783 bool hasOtherPendingVmemTypes(MCPhysReg Reg, VmemType V) const {
784 for (MCRegUnit RU : regunits(Reg)) {
785 auto It = VMem.find(toVMEMID(RU));
786 if (It != VMem.end() && (It->second.VMEMTypes & ~(1 << V)))
787 return true;
788 }
789 return false;
790 }
791
792 void clearVgprVmemTypes(MCPhysReg Reg) {
793 for (MCRegUnit RU : regunits(Reg)) {
794 if (auto It = VMem.find(toVMEMID(RU)); It != VMem.end()) {
795 It->second.VMEMTypes = 0;
796 if (It->second.empty())
797 VMem.erase(It);
798 }
799 }
800 }
801
802 void setStateOnFunctionEntryOrReturn() {
803 setScoreUB(STORE_CNT, getScoreUB(STORE_CNT) +
804 getWaitCountMax(Context->getLimits(), STORE_CNT));
805 PendingEvents |= Context->WaitEventMaskForInst[STORE_CNT];
806 }
807
808 ArrayRef<const MachineInstr *> getLDSDMAStores() const {
809 return LDSDMAStores;
810 }
811
812 bool hasPointSampleAccel(const MachineInstr &MI) const;
813 bool hasPointSamplePendingVmemTypes(const MachineInstr &MI,
814 MCPhysReg RU) const;
815
816 void print(raw_ostream &) const;
817 void dump() const { print(dbgs()); }
818
819 // Free up memory by removing empty entries from the DenseMap that track event
820 // scores.
821 void purgeEmptyTrackingData();
822
823private:
824 struct MergeInfo {
825 unsigned OldLB;
826 unsigned OtherLB;
827 unsigned MyShift;
828 unsigned OtherShift;
829 };
830
831 void determineWaitForScore(InstCounterType T, unsigned Score,
832 AMDGPU::Waitcnt &Wait) const;
833
834 static bool mergeScore(const MergeInfo &M, unsigned &Score,
835 unsigned OtherScore);
836
838 assert(Reg != AMDGPU::SCC && "Shouldn't be used on SCC");
839 if (!Context->TRI->isInAllocatableClass(Reg))
840 return {{}, {}};
841 const TargetRegisterClass *RC = Context->TRI->getPhysRegBaseClass(Reg);
842 unsigned Size = Context->TRI->getRegSizeInBits(*RC);
843 if (Size == 16 && Context->ST->hasD16Writes32BitVgpr())
844 Reg = Context->TRI->get32BitRegister(Reg);
845 return Context->TRI->regunits(Reg);
846 }
847
848 void setScoreLB(InstCounterType T, unsigned Val) {
849 assert(T < NUM_INST_CNTS);
850 ScoreLBs[T] = Val;
851 }
852
853 void setScoreUB(InstCounterType T, unsigned Val) {
854 assert(T < NUM_INST_CNTS);
855 ScoreUBs[T] = Val;
856
857 if (T != EXP_CNT)
858 return;
859
860 if (getScoreRange(EXP_CNT) > getWaitCountMax(Context->getLimits(), EXP_CNT))
861 ScoreLBs[EXP_CNT] =
862 ScoreUBs[EXP_CNT] - getWaitCountMax(Context->getLimits(), EXP_CNT);
863 }
864
865 void setRegScore(MCPhysReg Reg, InstCounterType T, unsigned Val) {
866 const SIRegisterInfo *TRI = Context->TRI;
867 if (Reg == AMDGPU::SCC) {
868 SCCScore = Val;
869 } else if (TRI->isVectorRegister(*Context->MRI, Reg)) {
870 for (MCRegUnit RU : regunits(Reg))
871 VMem[toVMEMID(RU)].Scores[T] = Val;
872 } else if (TRI->isSGPRReg(*Context->MRI, Reg)) {
873 auto STy = getSgprScoresIdx(T);
874 for (MCRegUnit RU : regunits(Reg))
875 SGPRs[RU].Scores[STy] = Val;
876 } else {
877 llvm_unreachable("Register cannot be tracked/unknown register!");
878 }
879 }
880
881 void setVMemScore(VMEMID TID, InstCounterType T, unsigned Val) {
882 VMem[TID].Scores[T] = Val;
883 }
884
885 void setScoreByOperand(const MachineOperand &Op, InstCounterType CntTy,
886 unsigned Val);
887
888 const SIInsertWaitcnts *Context;
889
890 unsigned ScoreLBs[NUM_INST_CNTS] = {0};
891 unsigned ScoreUBs[NUM_INST_CNTS] = {0};
892 unsigned PendingEvents = 0;
893 // Remember the last flat memory operation.
894 unsigned LastFlat[NUM_INST_CNTS] = {0};
895 // Remember the last GDS operation.
896 unsigned LastGDS = 0;
897
898 // The score tracking logic is fragmented as follows:
899 // - VMem: VGPR RegUnits and LDS DMA IDs, see the VMEMID encoding.
900 // - SGPRs: SGPR RegUnits
901 // - SCC: Non-allocatable and not general purpose: not a SGPR.
902 //
903 // For the VMem case, if the key is within the range of LDS DMA IDs,
904 // then the corresponding index into the `LDSDMAStores` vector below is:
905 // Key - LDSDMA_BEGIN - 1
906 // This is because LDSDMA_BEGIN is a generic entry and does not have an
907 // associated MachineInstr.
908 //
909 // TODO: Could we track SCC alongside SGPRs so it's not longer a special case?
910
911 struct VMEMInfo {
912 // Scores for all instruction counters.
913 std::array<unsigned, NUM_INST_CNTS> Scores = {0};
914 // Bitmask of the VmemTypes of VMEM instructions for this VGPR.
915 unsigned VMEMTypes = 0;
916
917 bool empty() const { return all_of(Scores, equal_to(0)) && !VMEMTypes; }
918 };
919
920 struct SGPRInfo {
921 // Wait cnt scores for every sgpr, the DS_CNT (corresponding to LGKMcnt
922 // pre-gfx12) or KM_CNT (gfx12+ only), and X_CNT (gfx1250) are relevant.
923 // Row 0 represents the score for either DS_CNT or KM_CNT and row 1 keeps
924 // the X_CNT score.
925 std::array<unsigned, 2> Scores = {0};
926
927 bool empty() const { return !Scores[0] && !Scores[1]; }
928 };
929
930 DenseMap<VMEMID, VMEMInfo> VMem; // VGPR + LDS DMA
931 DenseMap<MCRegUnit, SGPRInfo> SGPRs;
932
933 // Reg score for SCC.
934 unsigned SCCScore = 0;
935 // The unique instruction that has an SCC write pending, if there is one.
936 const MachineInstr *PendingSCCWrite = nullptr;
937
938 // Store representative LDS DMA operations. The only useful info here is
939 // alias info. One store is kept per unique AAInfo.
940 SmallVector<const MachineInstr *> LDSDMAStores;
941};
942
943class SIInsertWaitcntsLegacy : public MachineFunctionPass {
944public:
945 static char ID;
946 SIInsertWaitcntsLegacy() : MachineFunctionPass(ID) {}
947
948 bool runOnMachineFunction(MachineFunction &MF) override;
949
950 StringRef getPassName() const override {
951 return "SI insert wait instructions";
952 }
953
954 void getAnalysisUsage(AnalysisUsage &AU) const override {
955 AU.setPreservesCFG();
956 AU.addRequired<MachineLoopInfoWrapperPass>();
957 AU.addRequired<MachinePostDominatorTreeWrapperPass>();
958 AU.addUsedIfAvailable<AAResultsWrapperPass>();
959 AU.addPreserved<AAResultsWrapperPass>();
961 }
962};
963
964} // end anonymous namespace
965
966void WaitcntBrackets::setScoreByOperand(const MachineOperand &Op,
967 InstCounterType CntTy, unsigned Score) {
968 setRegScore(Op.getReg().asMCReg(), CntTy, Score);
969}
970
971// Return true if the subtarget is one that enables Point Sample Acceleration
972// and the MachineInstr passed in is one to which it might be applied (the
973// hardware makes this decision based on several factors, but we can't determine
974// this at compile time, so we have to assume it might be applied if the
975// instruction supports it).
976bool WaitcntBrackets::hasPointSampleAccel(const MachineInstr &MI) const {
977 if (!Context->ST->hasPointSampleAccel() || !SIInstrInfo::isMIMG(MI))
978 return false;
979
980 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
981 const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo =
983 return BaseInfo->PointSampleAccel;
984}
985
986// Return true if the subtarget enables Point Sample Acceleration, the supplied
987// MachineInstr is one to which it might be applied and the supplied interval is
988// one that has outstanding writes to vmem-types different than VMEM_NOSAMPLER
989// (this is the type that a point sample accelerated instruction effectively
990// becomes)
991bool WaitcntBrackets::hasPointSamplePendingVmemTypes(const MachineInstr &MI,
992 MCPhysReg Reg) const {
993 if (!hasPointSampleAccel(MI))
994 return false;
995
996 return hasOtherPendingVmemTypes(Reg, VMEM_NOSAMPLER);
997}
998
999void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) {
1000 InstCounterType T = eventCounter(Context->WaitEventMaskForInst, E);
1001 assert(T < Context->MaxCounter);
1002
1003 unsigned UB = getScoreUB(T);
1004 unsigned CurrScore = UB + 1;
1005 if (CurrScore == 0)
1006 report_fatal_error("InsertWaitcnt score wraparound");
1007 // PendingEvents and ScoreUB need to be update regardless if this event
1008 // changes the score of a register or not.
1009 // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message.
1010 PendingEvents |= 1 << E;
1011 setScoreUB(T, CurrScore);
1012
1013 const SIRegisterInfo *TRI = Context->TRI;
1014 const MachineRegisterInfo *MRI = Context->MRI;
1015 const SIInstrInfo *TII = Context->TII;
1016
1017 if (T == EXP_CNT) {
1018 // Put score on the source vgprs. If this is a store, just use those
1019 // specific register(s).
1020 if (TII->isDS(Inst) && Inst.mayLoadOrStore()) {
1021 // All GDS operations must protect their address register (same as
1022 // export.)
1023 if (const auto *AddrOp = TII->getNamedOperand(Inst, AMDGPU::OpName::addr))
1024 setScoreByOperand(*AddrOp, EXP_CNT, CurrScore);
1025
1026 if (Inst.mayStore()) {
1027 if (const auto *Data0 =
1028 TII->getNamedOperand(Inst, AMDGPU::OpName::data0))
1029 setScoreByOperand(*Data0, EXP_CNT, CurrScore);
1030 if (const auto *Data1 =
1031 TII->getNamedOperand(Inst, AMDGPU::OpName::data1))
1032 setScoreByOperand(*Data1, EXP_CNT, CurrScore);
1033 } else if (SIInstrInfo::isAtomicRet(Inst) && !SIInstrInfo::isGWS(Inst) &&
1034 Inst.getOpcode() != AMDGPU::DS_APPEND &&
1035 Inst.getOpcode() != AMDGPU::DS_CONSUME &&
1036 Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
1037 for (const MachineOperand &Op : Inst.all_uses()) {
1038 if (TRI->isVectorRegister(*MRI, Op.getReg()))
1039 setScoreByOperand(Op, EXP_CNT, CurrScore);
1040 }
1041 }
1042 } else if (TII->isFLAT(Inst)) {
1043 if (Inst.mayStore()) {
1044 setScoreByOperand(*TII->getNamedOperand(Inst, AMDGPU::OpName::data),
1045 EXP_CNT, CurrScore);
1046 } else if (SIInstrInfo::isAtomicRet(Inst)) {
1047 setScoreByOperand(*TII->getNamedOperand(Inst, AMDGPU::OpName::data),
1048 EXP_CNT, CurrScore);
1049 }
1050 } else if (TII->isMIMG(Inst)) {
1051 if (Inst.mayStore()) {
1052 setScoreByOperand(Inst.getOperand(0), EXP_CNT, CurrScore);
1053 } else if (SIInstrInfo::isAtomicRet(Inst)) {
1054 setScoreByOperand(*TII->getNamedOperand(Inst, AMDGPU::OpName::data),
1055 EXP_CNT, CurrScore);
1056 }
1057 } else if (TII->isMTBUF(Inst)) {
1058 if (Inst.mayStore())
1059 setScoreByOperand(Inst.getOperand(0), EXP_CNT, CurrScore);
1060 } else if (TII->isMUBUF(Inst)) {
1061 if (Inst.mayStore()) {
1062 setScoreByOperand(Inst.getOperand(0), EXP_CNT, CurrScore);
1063 } else if (SIInstrInfo::isAtomicRet(Inst)) {
1064 setScoreByOperand(*TII->getNamedOperand(Inst, AMDGPU::OpName::data),
1065 EXP_CNT, CurrScore);
1066 }
1067 } else if (TII->isLDSDIR(Inst)) {
1068 // LDSDIR instructions attach the score to the destination.
1069 setScoreByOperand(*TII->getNamedOperand(Inst, AMDGPU::OpName::vdst),
1070 EXP_CNT, CurrScore);
1071 } else {
1072 if (TII->isEXP(Inst)) {
1073 // For export the destination registers are really temps that
1074 // can be used as the actual source after export patching, so
1075 // we need to treat them like sources and set the EXP_CNT
1076 // score.
1077 for (MachineOperand &DefMO : Inst.all_defs()) {
1078 if (TRI->isVGPR(*MRI, DefMO.getReg())) {
1079 setScoreByOperand(DefMO, EXP_CNT, CurrScore);
1080 }
1081 }
1082 }
1083 for (const MachineOperand &Op : Inst.all_uses()) {
1084 if (TRI->isVectorRegister(*MRI, Op.getReg()))
1085 setScoreByOperand(Op, EXP_CNT, CurrScore);
1086 }
1087 }
1088 } else if (T == X_CNT) {
1089 WaitEventType OtherEvent = E == SMEM_GROUP ? VMEM_GROUP : SMEM_GROUP;
1090 if (PendingEvents & (1 << OtherEvent)) {
1091 // Hardware inserts an implicit xcnt between interleaved
1092 // SMEM and VMEM operations. So there will never be
1093 // outstanding address translations for both SMEM and
1094 // VMEM at the same time.
1095 setScoreLB(T, getScoreUB(T) - 1);
1096 PendingEvents &= ~(1 << OtherEvent);
1097 }
1098 for (const MachineOperand &Op : Inst.all_uses())
1099 setScoreByOperand(Op, T, CurrScore);
1100 } else if (T == VA_VDST || T == VM_VSRC) {
1101 // Match the score to the VGPR destination or source registers as
1102 // appropriate
1103 for (const MachineOperand &Op : Inst.operands()) {
1104 if (!Op.isReg() || (T == VA_VDST && Op.isUse()) ||
1105 (T == VM_VSRC && Op.isDef()))
1106 continue;
1107 if (TRI->isVectorRegister(*Context->MRI, Op.getReg()))
1108 setScoreByOperand(Op, T, CurrScore);
1109 }
1110 } else /* LGKM_CNT || EXP_CNT || VS_CNT || NUM_INST_CNTS */ {
1111 // Match the score to the destination registers.
1112 //
1113 // Check only explicit operands. Stores, especially spill stores, include
1114 // implicit uses and defs of their super registers which would create an
1115 // artificial dependency, while these are there only for register liveness
1116 // accounting purposes.
1117 //
1118 // Special cases where implicit register defs exists, such as M0 or VCC,
1119 // but none with memory instructions.
1120 for (const MachineOperand &Op : Inst.defs()) {
1121 if (T == LOAD_CNT || T == SAMPLE_CNT || T == BVH_CNT) {
1122 if (!TRI->isVectorRegister(*MRI, Op.getReg())) // TODO: add wrapper
1123 continue;
1124 if (updateVMCntOnly(Inst)) {
1125 // updateVMCntOnly should only leave us with VGPRs
1126 // MUBUF, MTBUF, MIMG, FlatGlobal, and FlatScratch only have VGPR/AGPR
1127 // defs. That's required for a sane index into `VgprMemTypes` below
1128 assert(TRI->isVectorRegister(*MRI, Op.getReg()));
1129 VmemType V = getVmemType(Inst);
1130 unsigned char TypesMask = 1 << V;
1131 // If instruction can have Point Sample Accel applied, we have to flag
1132 // this with another potential dependency
1133 if (hasPointSampleAccel(Inst))
1134 TypesMask |= 1 << VMEM_NOSAMPLER;
1135 for (MCRegUnit RU : regunits(Op.getReg().asMCReg()))
1136 VMem[toVMEMID(RU)].VMEMTypes |= TypesMask;
1137 }
1138 }
1139 setScoreByOperand(Op, T, CurrScore);
1140 }
1141 if (Inst.mayStore() &&
1142 (TII->isDS(Inst) || TII->mayWriteLDSThroughDMA(Inst))) {
1143 // MUBUF and FLAT LDS DMA operations need a wait on vmcnt before LDS
1144 // written can be accessed. A load from LDS to VMEM does not need a wait.
1145 //
1146 // The "Slot" is the offset from LDSDMA_BEGIN. If it's non-zero, then
1147 // there is a MachineInstr in LDSDMAStores used to track this LDSDMA
1148 // store. The "Slot" is the index into LDSDMAStores + 1.
1149 unsigned Slot = 0;
1150 for (const auto *MemOp : Inst.memoperands()) {
1151 if (!MemOp->isStore() ||
1152 MemOp->getAddrSpace() != AMDGPUAS::LOCAL_ADDRESS)
1153 continue;
1154 // Comparing just AA info does not guarantee memoperands are equal
1155 // in general, but this is so for LDS DMA in practice.
1156 auto AAI = MemOp->getAAInfo();
1157 // Alias scope information gives a way to definitely identify an
1158 // original memory object and practically produced in the module LDS
1159 // lowering pass. If there is no scope available we will not be able
1160 // to disambiguate LDS aliasing as after the module lowering all LDS
1161 // is squashed into a single big object.
1162 if (!AAI || !AAI.Scope)
1163 break;
1164 for (unsigned I = 0, E = LDSDMAStores.size(); I != E && !Slot; ++I) {
1165 for (const auto *MemOp : LDSDMAStores[I]->memoperands()) {
1166 if (MemOp->isStore() && AAI == MemOp->getAAInfo()) {
1167 Slot = I + 1;
1168 break;
1169 }
1170 }
1171 }
1172 if (Slot)
1173 break;
1174 // The slot may not be valid because it can be >= NUM_LDSDMA which
1175 // means the scoreboard cannot track it. We still want to preserve the
1176 // MI in order to check alias information, though.
1177 LDSDMAStores.push_back(&Inst);
1178 Slot = LDSDMAStores.size();
1179 break;
1180 }
1181 setVMemScore(LDSDMA_BEGIN, T, CurrScore);
1182 if (Slot && Slot < NUM_LDSDMA)
1183 setVMemScore(LDSDMA_BEGIN + Slot, T, CurrScore);
1184 }
1185
1187 setRegScore(AMDGPU::SCC, T, CurrScore);
1188 PendingSCCWrite = &Inst;
1189 }
1190 }
1191}
1192
1193void WaitcntBrackets::print(raw_ostream &OS) const {
1194 const GCNSubtarget *ST = Context->ST;
1195
1196 OS << '\n';
1197 for (auto T : inst_counter_types(Context->MaxCounter)) {
1198 unsigned SR = getScoreRange(T);
1199
1200 switch (T) {
1201 case LOAD_CNT:
1202 OS << " " << (ST->hasExtendedWaitCounts() ? "LOAD" : "VM") << "_CNT("
1203 << SR << "):";
1204 break;
1205 case DS_CNT:
1206 OS << " " << (ST->hasExtendedWaitCounts() ? "DS" : "LGKM") << "_CNT("
1207 << SR << "):";
1208 break;
1209 case EXP_CNT:
1210 OS << " EXP_CNT(" << SR << "):";
1211 break;
1212 case STORE_CNT:
1213 OS << " " << (ST->hasExtendedWaitCounts() ? "STORE" : "VS") << "_CNT("
1214 << SR << "):";
1215 break;
1216 case SAMPLE_CNT:
1217 OS << " SAMPLE_CNT(" << SR << "):";
1218 break;
1219 case BVH_CNT:
1220 OS << " BVH_CNT(" << SR << "):";
1221 break;
1222 case KM_CNT:
1223 OS << " KM_CNT(" << SR << "):";
1224 break;
1225 case X_CNT:
1226 OS << " X_CNT(" << SR << "):";
1227 break;
1228 case VA_VDST:
1229 OS << " VA_VDST(" << SR << "): ";
1230 break;
1231 case VM_VSRC:
1232 OS << " VM_VSRC(" << SR << "): ";
1233 break;
1234 default:
1235 OS << " UNKNOWN(" << SR << "):";
1236 break;
1237 }
1238
1239 if (SR != 0) {
1240 // Print vgpr scores.
1241 unsigned LB = getScoreLB(T);
1242
1243 SmallVector<VMEMID> SortedVMEMIDs(VMem.keys());
1244 sort(SortedVMEMIDs);
1245
1246 for (auto ID : SortedVMEMIDs) {
1247 unsigned RegScore = VMem.at(ID).Scores[T];
1248 if (RegScore <= LB)
1249 continue;
1250 unsigned RelScore = RegScore - LB - 1;
1251 if (ID < REGUNITS_END) {
1252 OS << ' ' << RelScore << ":vRU" << ID;
1253 } else {
1254 assert(ID >= LDSDMA_BEGIN && ID < LDSDMA_END &&
1255 "Unhandled/unexpected ID value!");
1256 OS << ' ' << RelScore << ":LDSDMA" << ID;
1257 }
1258 }
1259
1260 // Also need to print sgpr scores for lgkm_cnt or xcnt.
1261 if (isSmemCounter(T)) {
1262 SmallVector<MCRegUnit> SortedSMEMIDs(SGPRs.keys());
1263 sort(SortedSMEMIDs);
1264 for (auto ID : SortedSMEMIDs) {
1265 unsigned RegScore = SGPRs.at(ID).Scores[getSgprScoresIdx(T)];
1266 if (RegScore <= LB)
1267 continue;
1268 unsigned RelScore = RegScore - LB - 1;
1269 OS << ' ' << RelScore << ":sRU" << static_cast<unsigned>(ID);
1270 }
1271 }
1272
1273 if (T == KM_CNT && SCCScore > 0)
1274 OS << ' ' << SCCScore << ":scc";
1275 }
1276 OS << '\n';
1277 }
1278
1279 OS << "Pending Events: ";
1280 if (hasPendingEvent()) {
1281 ListSeparator LS;
1282 for (unsigned I = 0; I != NUM_WAIT_EVENTS; ++I) {
1283 if (hasPendingEvent((WaitEventType)I)) {
1284 OS << LS << WaitEventTypeName[I];
1285 }
1286 }
1287 } else {
1288 OS << "none";
1289 }
1290 OS << '\n';
1291
1292 OS << '\n';
1293}
1294
1295/// Simplify \p UpdateWait by removing waits that are redundant based on the
1296/// current WaitcntBrackets and any other waits specified in \p CheckWait.
1297void WaitcntBrackets::simplifyWaitcnt(const AMDGPU::Waitcnt &CheckWait,
1298 AMDGPU::Waitcnt &UpdateWait) const {
1299 simplifyWaitcnt(LOAD_CNT, UpdateWait.LoadCnt);
1300 simplifyWaitcnt(EXP_CNT, UpdateWait.ExpCnt);
1301 simplifyWaitcnt(DS_CNT, UpdateWait.DsCnt);
1302 simplifyWaitcnt(STORE_CNT, UpdateWait.StoreCnt);
1303 simplifyWaitcnt(SAMPLE_CNT, UpdateWait.SampleCnt);
1304 simplifyWaitcnt(BVH_CNT, UpdateWait.BvhCnt);
1305 simplifyWaitcnt(KM_CNT, UpdateWait.KmCnt);
1306 simplifyXcnt(CheckWait, UpdateWait);
1307 simplifyWaitcnt(VA_VDST, UpdateWait.VaVdst);
1308 simplifyVmVsrc(CheckWait, UpdateWait);
1309}
1310
1311void WaitcntBrackets::simplifyWaitcnt(InstCounterType T,
1312 unsigned &Count) const {
1313 // The number of outstanding events for this type, T, can be calculated
1314 // as (UB - LB). If the current Count is greater than or equal to the number
1315 // of outstanding events, then the wait for this counter is redundant.
1316 if (Count >= getScoreRange(T))
1317 Count = ~0u;
1318}
1319
1320void WaitcntBrackets::simplifyVmVsrc(const AMDGPU::Waitcnt &CheckWait,
1321 AMDGPU::Waitcnt &UpdateWait) const {
1322 // Waiting for some counters implies waiting for VM_VSRC, since an
1323 // instruction that decrements a counter on completion would have
1324 // decremented VM_VSRC once its VGPR operands had been read.
1325 if (CheckWait.VmVsrc >=
1326 std::min({CheckWait.LoadCnt, CheckWait.StoreCnt, CheckWait.SampleCnt,
1327 CheckWait.BvhCnt, CheckWait.DsCnt}))
1328 UpdateWait.VmVsrc = ~0u;
1329 simplifyWaitcnt(VM_VSRC, UpdateWait.VmVsrc);
1330}
1331
1332void WaitcntBrackets::purgeEmptyTrackingData() {
1333 for (auto &[K, V] : make_early_inc_range(VMem)) {
1334 if (V.empty())
1335 VMem.erase(K);
1336 }
1337 for (auto &[K, V] : make_early_inc_range(SGPRs)) {
1338 if (V.empty())
1339 SGPRs.erase(K);
1340 }
1341}
1342
1343void WaitcntBrackets::determineWaitForScore(InstCounterType T,
1344 unsigned ScoreToWait,
1345 AMDGPU::Waitcnt &Wait) const {
1346 const unsigned LB = getScoreLB(T);
1347 const unsigned UB = getScoreUB(T);
1348
1349 // If the score falls within the bracket, we need a waitcnt.
1350 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
1351 if ((T == LOAD_CNT || T == DS_CNT) && hasPendingFlat() &&
1352 !Context->ST->hasFlatLgkmVMemCountInOrder()) {
1353 // If there is a pending FLAT operation, and this is a VMem or LGKM
1354 // waitcnt and the target can report early completion, then we need
1355 // to force a waitcnt 0.
1356 addWait(Wait, T, 0);
1357 } else if (counterOutOfOrder(T)) {
1358 // Counter can get decremented out-of-order when there
1359 // are multiple types event in the bracket. Also emit an s_wait counter
1360 // with a conservative value of 0 for the counter.
1361 addWait(Wait, T, 0);
1362 } else {
1363 // If a counter has been maxed out avoid overflow by waiting for
1364 // MAX(CounterType) - 1 instead.
1365 unsigned NeededWait = std::min(
1366 UB - ScoreToWait, getWaitCountMax(Context->getLimits(), T) - 1);
1367 addWait(Wait, T, NeededWait);
1368 }
1369 }
1370}
1371
1372void WaitcntBrackets::determineWaitForPhysReg(InstCounterType T, MCPhysReg Reg,
1373 AMDGPU::Waitcnt &Wait) const {
1374 if (Reg == AMDGPU::SCC) {
1375 determineWaitForScore(T, SCCScore, Wait);
1376 } else {
1377 bool IsVGPR = Context->TRI->isVectorRegister(*Context->MRI, Reg);
1378 for (MCRegUnit RU : regunits(Reg))
1379 determineWaitForScore(
1380 T, IsVGPR ? getVMemScore(toVMEMID(RU), T) : getSGPRScore(RU, T),
1381 Wait);
1382 }
1383}
1384
1385void WaitcntBrackets::determineWaitForLDSDMA(InstCounterType T, VMEMID TID,
1386 AMDGPU::Waitcnt &Wait) const {
1387 assert(TID >= LDSDMA_BEGIN && TID < LDSDMA_END);
1388 determineWaitForScore(T, getVMemScore(TID, T), Wait);
1389}
1390
1391void WaitcntBrackets::tryClearSCCWriteEvent(MachineInstr *Inst) {
1392 // S_BARRIER_WAIT on the same barrier guarantees that the pending write to
1393 // SCC has landed
1394 if (PendingSCCWrite &&
1395 PendingSCCWrite->getOpcode() == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM &&
1396 PendingSCCWrite->getOperand(0).getImm() == Inst->getOperand(0).getImm()) {
1397 unsigned SCC_WRITE_PendingEvent = 1 << SCC_WRITE;
1398 // If this SCC_WRITE is the only pending KM_CNT event, clear counter.
1399 if ((PendingEvents & Context->WaitEventMaskForInst[KM_CNT]) ==
1400 SCC_WRITE_PendingEvent) {
1401 setScoreLB(KM_CNT, getScoreUB(KM_CNT));
1402 }
1403
1404 PendingEvents &= ~SCC_WRITE_PendingEvent;
1405 PendingSCCWrite = nullptr;
1406 }
1407}
1408
1409void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) {
1410 applyWaitcnt(LOAD_CNT, Wait.LoadCnt);
1411 applyWaitcnt(EXP_CNT, Wait.ExpCnt);
1412 applyWaitcnt(DS_CNT, Wait.DsCnt);
1413 applyWaitcnt(STORE_CNT, Wait.StoreCnt);
1414 applyWaitcnt(SAMPLE_CNT, Wait.SampleCnt);
1415 applyWaitcnt(BVH_CNT, Wait.BvhCnt);
1416 applyWaitcnt(KM_CNT, Wait.KmCnt);
1417 applyWaitcnt(X_CNT, Wait.XCnt);
1418 applyWaitcnt(VA_VDST, Wait.VaVdst);
1419 applyWaitcnt(VM_VSRC, Wait.VmVsrc);
1420}
1421
1422void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) {
1423 const unsigned UB = getScoreUB(T);
1424 if (Count >= UB)
1425 return;
1426 if (Count != 0) {
1427 if (counterOutOfOrder(T))
1428 return;
1429 setScoreLB(T, std::max(getScoreLB(T), UB - Count));
1430 } else {
1431 setScoreLB(T, UB);
1432 PendingEvents &= ~Context->WaitEventMaskForInst[T];
1433 }
1434
1435 if (T == KM_CNT && Count == 0 && hasPendingEvent(SMEM_GROUP)) {
1436 if (!hasMixedPendingEvents(X_CNT))
1437 applyWaitcnt(X_CNT, 0);
1438 else
1439 PendingEvents &= ~(1 << SMEM_GROUP);
1440 }
1441 if (T == LOAD_CNT && hasPendingEvent(VMEM_GROUP) &&
1442 !hasPendingEvent(STORE_CNT)) {
1443 if (!hasMixedPendingEvents(X_CNT))
1444 applyWaitcnt(X_CNT, Count);
1445 else if (Count == 0)
1446 PendingEvents &= ~(1 << VMEM_GROUP);
1447 }
1448}
1449
1450void WaitcntBrackets::simplifyXcnt(const AMDGPU::Waitcnt &CheckWait,
1451 AMDGPU::Waitcnt &UpdateWait) const {
1452 // Try to simplify xcnt further by checking for joint kmcnt and loadcnt
1453 // optimizations. On entry to a block with multiple predescessors, there may
1454 // be pending SMEM and VMEM events active at the same time.
1455 // In such cases, only clear one active event at a time.
1456 // TODO: Revisit xcnt optimizations for gfx1250.
1457 // Wait on XCNT is redundant if we are already waiting for a load to complete.
1458 // SMEM can return out of order, so only omit XCNT wait if we are waiting till
1459 // zero.
1460 if (CheckWait.KmCnt == 0 && hasPendingEvent(SMEM_GROUP))
1461 UpdateWait.XCnt = ~0u;
1462 // If we have pending store we cannot optimize XCnt because we do not wait for
1463 // stores. VMEM loads retun in order, so if we only have loads XCnt is
1464 // decremented to the same number as LOADCnt.
1465 if (CheckWait.LoadCnt != ~0u && hasPendingEvent(VMEM_GROUP) &&
1466 !hasPendingEvent(STORE_CNT) && CheckWait.XCnt >= CheckWait.LoadCnt)
1467 UpdateWait.XCnt = ~0u;
1468 simplifyWaitcnt(X_CNT, UpdateWait.XCnt);
1469}
1470
1471// Where there are multiple types of event in the bracket of a counter,
1472// the decrement may go out of order.
1473bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const {
1474 // Scalar memory read always can go out of order.
1475 if ((T == Context->SmemAccessCounter && hasPendingEvent(SMEM_ACCESS)) ||
1476 (T == X_CNT && hasPendingEvent(SMEM_GROUP)))
1477 return true;
1478
1479 // GLOBAL_INV completes in-order with other LOAD_CNT events (VMEM_ACCESS),
1480 // so having GLOBAL_INV_ACCESS mixed with other LOAD_CNT events doesn't cause
1481 // out-of-order completion.
1482 if (T == LOAD_CNT) {
1483 unsigned Events = hasPendingEvent(T);
1484 // Remove GLOBAL_INV_ACCESS from the event mask before checking for mixed
1485 // events
1486 Events &= ~(1 << GLOBAL_INV_ACCESS);
1487 // Return true only if there are still multiple event types after removing
1488 // GLOBAL_INV
1489 return Events & (Events - 1);
1490 }
1491
1492 return hasMixedPendingEvents(T);
1493}
1494
1495INITIALIZE_PASS_BEGIN(SIInsertWaitcntsLegacy, DEBUG_TYPE, "SI Insert Waitcnts",
1496 false, false)
1499INITIALIZE_PASS_END(SIInsertWaitcntsLegacy, DEBUG_TYPE, "SI Insert Waitcnts",
1501
1502char SIInsertWaitcntsLegacy::ID = 0;
1503
1504char &llvm::SIInsertWaitcntsID = SIInsertWaitcntsLegacy::ID;
1505
1507 return new SIInsertWaitcntsLegacy();
1508}
1509
1510static bool updateOperandIfDifferent(MachineInstr &MI, AMDGPU::OpName OpName,
1511 unsigned NewEnc) {
1512 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName);
1513 assert(OpIdx >= 0);
1514
1515 MachineOperand &MO = MI.getOperand(OpIdx);
1516
1517 if (NewEnc == MO.getImm())
1518 return false;
1519
1520 MO.setImm(NewEnc);
1521 return true;
1522}
1523
1524/// Determine if \p MI is a gfx12+ single-counter S_WAIT_*CNT instruction,
1525/// and if so, which counter it is waiting on.
1526static std::optional<InstCounterType> counterTypeForInstr(unsigned Opcode) {
1527 switch (Opcode) {
1528 case AMDGPU::S_WAIT_LOADCNT:
1529 return LOAD_CNT;
1530 case AMDGPU::S_WAIT_EXPCNT:
1531 return EXP_CNT;
1532 case AMDGPU::S_WAIT_STORECNT:
1533 return STORE_CNT;
1534 case AMDGPU::S_WAIT_SAMPLECNT:
1535 return SAMPLE_CNT;
1536 case AMDGPU::S_WAIT_BVHCNT:
1537 return BVH_CNT;
1538 case AMDGPU::S_WAIT_DSCNT:
1539 return DS_CNT;
1540 case AMDGPU::S_WAIT_KMCNT:
1541 return KM_CNT;
1542 case AMDGPU::S_WAIT_XCNT:
1543 return X_CNT;
1544 default:
1545 return {};
1546 }
1547}
1548
1549bool WaitcntGenerator::promoteSoftWaitCnt(MachineInstr *Waitcnt) const {
1550 unsigned Opcode = SIInstrInfo::getNonSoftWaitcntOpcode(Waitcnt->getOpcode());
1551 if (Opcode == Waitcnt->getOpcode())
1552 return false;
1553
1554 Waitcnt->setDesc(TII->get(Opcode));
1555 return true;
1556}
1557
1558/// Combine consecutive S_WAITCNT and S_WAITCNT_VSCNT instructions that
1559/// precede \p It and follow \p OldWaitcntInstr and apply any extra waits
1560/// from \p Wait that were added by previous passes. Currently this pass
1561/// conservatively assumes that these preexisting waits are required for
1562/// correctness.
1563bool WaitcntGeneratorPreGFX12::applyPreexistingWaitcnt(
1564 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
1565 AMDGPU::Waitcnt &Wait, MachineBasicBlock::instr_iterator It) const {
1566 assert(ST);
1567 assert(isNormalMode(MaxCounter));
1568
1569 bool Modified = false;
1570 MachineInstr *WaitcntInstr = nullptr;
1571 MachineInstr *WaitcntVsCntInstr = nullptr;
1572
1573 LLVM_DEBUG({
1574 dbgs() << "PreGFX12::applyPreexistingWaitcnt at: ";
1575 if (It.isEnd())
1576 dbgs() << "end of block\n";
1577 else
1578 dbgs() << *It;
1579 });
1580
1581 for (auto &II :
1582 make_early_inc_range(make_range(OldWaitcntInstr.getIterator(), It))) {
1583 LLVM_DEBUG(dbgs() << "pre-existing iter: " << II);
1584 if (II.isMetaInstruction()) {
1585 LLVM_DEBUG(dbgs() << "skipped meta instruction\n");
1586 continue;
1587 }
1588
1589 unsigned Opcode = SIInstrInfo::getNonSoftWaitcntOpcode(II.getOpcode());
1590 bool TrySimplify = Opcode != II.getOpcode() && !OptNone;
1591
1592 // Update required wait count. If this is a soft waitcnt (= it was added
1593 // by an earlier pass), it may be entirely removed.
1594 if (Opcode == AMDGPU::S_WAITCNT) {
1595 unsigned IEnc = II.getOperand(0).getImm();
1596 AMDGPU::Waitcnt OldWait = AMDGPU::decodeWaitcnt(IV, IEnc);
1597 if (TrySimplify)
1598 ScoreBrackets.simplifyWaitcnt(OldWait);
1599 Wait = Wait.combined(OldWait);
1600
1601 // Merge consecutive waitcnt of the same type by erasing multiples.
1602 if (WaitcntInstr || (!Wait.hasWaitExceptStoreCnt() && TrySimplify)) {
1603 II.eraseFromParent();
1604 Modified = true;
1605 } else
1606 WaitcntInstr = &II;
1607 } else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
1608 assert(ST->hasVMemToLDSLoad());
1609 LLVM_DEBUG(dbgs() << "Processing S_WAITCNT_lds_direct: " << II
1610 << "Before: " << Wait << '\n';);
1611 ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, LDSDMA_BEGIN, Wait);
1612 LLVM_DEBUG(dbgs() << "After: " << Wait << '\n';);
1613
1614 // It is possible (but unlikely) that this is the only wait instruction,
1615 // in which case, we exit this loop without a WaitcntInstr to consume
1616 // `Wait`. But that works because `Wait` was passed in by reference, and
1617 // the callee eventually calls createNewWaitcnt on it. We test this
1618 // possibility in an articial MIR test since such a situation cannot be
1619 // recreated by running the memory legalizer.
1620 II.eraseFromParent();
1621 } else {
1622 assert(Opcode == AMDGPU::S_WAITCNT_VSCNT);
1623 assert(II.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1624
1625 unsigned OldVSCnt =
1626 TII->getNamedOperand(II, AMDGPU::OpName::simm16)->getImm();
1627 if (TrySimplify)
1628 ScoreBrackets.simplifyWaitcnt(InstCounterType::STORE_CNT, OldVSCnt);
1629 Wait.StoreCnt = std::min(Wait.StoreCnt, OldVSCnt);
1630
1631 if (WaitcntVsCntInstr || (!Wait.hasWaitStoreCnt() && TrySimplify)) {
1632 II.eraseFromParent();
1633 Modified = true;
1634 } else
1635 WaitcntVsCntInstr = &II;
1636 }
1637 }
1638
1639 if (WaitcntInstr) {
1640 Modified |= updateOperandIfDifferent(*WaitcntInstr, AMDGPU::OpName::simm16,
1642 Modified |= promoteSoftWaitCnt(WaitcntInstr);
1643
1644 ScoreBrackets.applyWaitcnt(LOAD_CNT, Wait.LoadCnt);
1645 ScoreBrackets.applyWaitcnt(EXP_CNT, Wait.ExpCnt);
1646 ScoreBrackets.applyWaitcnt(DS_CNT, Wait.DsCnt);
1647 Wait.LoadCnt = ~0u;
1648 Wait.ExpCnt = ~0u;
1649 Wait.DsCnt = ~0u;
1650
1651 LLVM_DEBUG(It.isEnd() ? dbgs() << "applied pre-existing waitcnt\n"
1652 << "New Instr at block end: "
1653 << *WaitcntInstr << '\n'
1654 : dbgs() << "applied pre-existing waitcnt\n"
1655 << "Old Instr: " << *It
1656 << "New Instr: " << *WaitcntInstr << '\n');
1657 }
1658
1659 if (WaitcntVsCntInstr) {
1660 Modified |= updateOperandIfDifferent(*WaitcntVsCntInstr,
1661 AMDGPU::OpName::simm16, Wait.StoreCnt);
1662 Modified |= promoteSoftWaitCnt(WaitcntVsCntInstr);
1663
1664 ScoreBrackets.applyWaitcnt(STORE_CNT, Wait.StoreCnt);
1665 Wait.StoreCnt = ~0u;
1666
1667 LLVM_DEBUG(It.isEnd()
1668 ? dbgs() << "applied pre-existing waitcnt\n"
1669 << "New Instr at block end: " << *WaitcntVsCntInstr
1670 << '\n'
1671 : dbgs() << "applied pre-existing waitcnt\n"
1672 << "Old Instr: " << *It
1673 << "New Instr: " << *WaitcntVsCntInstr << '\n');
1674 }
1675
1676 return Modified;
1677}
1678
1679/// Generate S_WAITCNT and/or S_WAITCNT_VSCNT instructions for any
1680/// required counters in \p Wait
1681bool WaitcntGeneratorPreGFX12::createNewWaitcnt(
1682 MachineBasicBlock &Block, MachineBasicBlock::instr_iterator It,
1683 AMDGPU::Waitcnt Wait, const WaitcntBrackets &ScoreBrackets) {
1684 assert(ST);
1685 assert(isNormalMode(MaxCounter));
1686
1687 bool Modified = false;
1688 const DebugLoc &DL = Block.findDebugLoc(It);
1689
1690 // Helper to emit expanded waitcnt sequence for profiling.
1691 // Emits waitcnts from (Outstanding-1) down to Target.
1692 // The EmitWaitcnt callback emits a single waitcnt.
1693 auto EmitExpandedWaitcnt = [&](unsigned Outstanding, unsigned Target,
1694 auto EmitWaitcnt) {
1695 do {
1696 EmitWaitcnt(--Outstanding);
1697 } while (Outstanding > Target);
1698 Modified = true;
1699 };
1700
1701 // Waits for VMcnt, LKGMcnt and/or EXPcnt are encoded together into a
1702 // single instruction while VScnt has its own instruction.
1703 if (Wait.hasWaitExceptStoreCnt()) {
1704 // If profiling expansion is enabled, emit an expanded sequence
1705 if (ExpandWaitcntProfiling) {
1706 // Check if any of the counters to be waited on are out-of-order.
1707 // If so, fall back to normal (non-expanded) behavior since expansion
1708 // would provide misleading profiling information.
1709 bool AnyOutOfOrder = false;
1710 for (auto CT : {LOAD_CNT, DS_CNT, EXP_CNT}) {
1711 unsigned &WaitCnt = getCounterRef(Wait, CT);
1712 if (WaitCnt != ~0u && ScoreBrackets.counterOutOfOrder(CT)) {
1713 AnyOutOfOrder = true;
1714 break;
1715 }
1716 }
1717
1718 if (AnyOutOfOrder) {
1719 // Fall back to non-expanded wait
1720 unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait);
1721 BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAITCNT)).addImm(Enc);
1722 Modified = true;
1723 } else {
1724 // All counters are in-order, safe to expand
1725 for (auto CT : {LOAD_CNT, DS_CNT, EXP_CNT}) {
1726 unsigned &WaitCnt = getCounterRef(Wait, CT);
1727 if (WaitCnt == ~0u)
1728 continue;
1729
1730 unsigned Outstanding = std::min(ScoreBrackets.getScoreUB(CT) -
1731 ScoreBrackets.getScoreLB(CT),
1732 getWaitCountMax(getLimits(), CT) - 1);
1733 EmitExpandedWaitcnt(Outstanding, WaitCnt, [&](unsigned Count) {
1734 AMDGPU::Waitcnt W;
1735 getCounterRef(W, CT) = Count;
1736 BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAITCNT))
1738 });
1739 }
1740 }
1741 } else {
1742 // Normal behavior: emit single combined waitcnt
1743 unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait);
1744 [[maybe_unused]] auto SWaitInst =
1745 BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAITCNT)).addImm(Enc);
1746 Modified = true;
1747
1748 LLVM_DEBUG(dbgs() << "PreGFX12::createNewWaitcnt\n";
1749 if (It != Block.instr_end()) dbgs() << "Old Instr: " << *It;
1750 dbgs() << "New Instr: " << *SWaitInst << '\n');
1751 }
1752 }
1753
1754 if (Wait.hasWaitStoreCnt()) {
1755 assert(ST->hasVscnt());
1756
1757 if (ExpandWaitcntProfiling && Wait.StoreCnt != ~0u &&
1758 !ScoreBrackets.counterOutOfOrder(STORE_CNT)) {
1759 // Only expand if counter is not out-of-order
1760 unsigned Outstanding =
1761 std::min(ScoreBrackets.getScoreUB(STORE_CNT) -
1762 ScoreBrackets.getScoreLB(STORE_CNT),
1763 getWaitCountMax(getLimits(), STORE_CNT) - 1);
1764 EmitExpandedWaitcnt(Outstanding, Wait.StoreCnt, [&](unsigned Count) {
1765 BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAITCNT_VSCNT))
1766 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1767 .addImm(Count);
1768 });
1769 } else {
1770 [[maybe_unused]] auto SWaitInst =
1771 BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAITCNT_VSCNT))
1772 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1773 .addImm(Wait.StoreCnt);
1774 Modified = true;
1775
1776 LLVM_DEBUG(dbgs() << "PreGFX12::createNewWaitcnt\n";
1777 if (It != Block.instr_end()) dbgs() << "Old Instr: " << *It;
1778 dbgs() << "New Instr: " << *SWaitInst << '\n');
1779 }
1780 }
1781
1782 return Modified;
1783}
1784
1785AMDGPU::Waitcnt
1786WaitcntGeneratorPreGFX12::getAllZeroWaitcnt(bool IncludeVSCnt) const {
1787 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt && ST->hasVscnt() ? 0 : ~0u);
1788}
1789
1790AMDGPU::Waitcnt
1791WaitcntGeneratorGFX12Plus::getAllZeroWaitcnt(bool IncludeVSCnt) const {
1792 unsigned ExpertVal = IsExpertMode ? 0 : ~0u;
1793 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt ? 0 : ~0u, 0, 0, 0,
1794 ~0u /* XCNT */, ExpertVal, ExpertVal);
1795}
1796
1797/// Combine consecutive S_WAIT_*CNT instructions that precede \p It and
1798/// follow \p OldWaitcntInstr and apply any extra waits from \p Wait that
1799/// were added by previous passes. Currently this pass conservatively
1800/// assumes that these preexisting waits are required for correctness.
1801bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt(
1802 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
1803 AMDGPU::Waitcnt &Wait, MachineBasicBlock::instr_iterator It) const {
1804 assert(ST);
1805 assert(!isNormalMode(MaxCounter));
1806
1807 bool Modified = false;
1808 MachineInstr *CombinedLoadDsCntInstr = nullptr;
1809 MachineInstr *CombinedStoreDsCntInstr = nullptr;
1810 MachineInstr *WaitcntDepctrInstr = nullptr;
1811 MachineInstr *WaitInstrs[NUM_EXTENDED_INST_CNTS] = {};
1812
1813 LLVM_DEBUG({
1814 dbgs() << "GFX12Plus::applyPreexistingWaitcnt at: ";
1815 if (It.isEnd())
1816 dbgs() << "end of block\n";
1817 else
1818 dbgs() << *It;
1819 });
1820
1821 // Accumulate waits that should not be simplified.
1822 AMDGPU::Waitcnt RequiredWait;
1823
1824 for (auto &II :
1825 make_early_inc_range(make_range(OldWaitcntInstr.getIterator(), It))) {
1826 LLVM_DEBUG(dbgs() << "pre-existing iter: " << II);
1827 if (II.isMetaInstruction()) {
1828 LLVM_DEBUG(dbgs() << "skipped meta instruction\n");
1829 continue;
1830 }
1831
1832 MachineInstr **UpdatableInstr;
1833
1834 // Update required wait count. If this is a soft waitcnt (= it was added
1835 // by an earlier pass), it may be entirely removed.
1836
1837 unsigned Opcode = SIInstrInfo::getNonSoftWaitcntOpcode(II.getOpcode());
1838 bool TrySimplify = Opcode != II.getOpcode() && !OptNone;
1839
1840 // Don't crash if the programmer used legacy waitcnt intrinsics, but don't
1841 // attempt to do more than that either.
1842 if (Opcode == AMDGPU::S_WAITCNT)
1843 continue;
1844
1845 if (Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT) {
1846 unsigned OldEnc =
1847 TII->getNamedOperand(II, AMDGPU::OpName::simm16)->getImm();
1848 AMDGPU::Waitcnt OldWait = AMDGPU::decodeLoadcntDscnt(IV, OldEnc);
1849 if (TrySimplify)
1850 Wait = Wait.combined(OldWait);
1851 else
1852 RequiredWait = RequiredWait.combined(OldWait);
1853 UpdatableInstr = &CombinedLoadDsCntInstr;
1854 } else if (Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT) {
1855 unsigned OldEnc =
1856 TII->getNamedOperand(II, AMDGPU::OpName::simm16)->getImm();
1857 AMDGPU::Waitcnt OldWait = AMDGPU::decodeStorecntDscnt(IV, OldEnc);
1858 if (TrySimplify)
1859 Wait = Wait.combined(OldWait);
1860 else
1861 RequiredWait = RequiredWait.combined(OldWait);
1862 UpdatableInstr = &CombinedStoreDsCntInstr;
1863 } else if (Opcode == AMDGPU::S_WAITCNT_DEPCTR) {
1864 unsigned OldEnc =
1865 TII->getNamedOperand(II, AMDGPU::OpName::simm16)->getImm();
1866 AMDGPU::Waitcnt OldWait;
1867 OldWait.VaVdst = AMDGPU::DepCtr::decodeFieldVaVdst(OldEnc);
1868 OldWait.VmVsrc = AMDGPU::DepCtr::decodeFieldVmVsrc(OldEnc);
1869 if (TrySimplify)
1870 ScoreBrackets.simplifyWaitcnt(OldWait);
1871 Wait = Wait.combined(OldWait);
1872 UpdatableInstr = &WaitcntDepctrInstr;
1873 } else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
1874 // Architectures higher than GFX10 do not have direct loads to
1875 // LDS, so no work required here yet.
1876 II.eraseFromParent();
1877 continue;
1878 } else {
1879 std::optional<InstCounterType> CT = counterTypeForInstr(Opcode);
1880 assert(CT.has_value());
1881 unsigned OldCnt =
1882 TII->getNamedOperand(II, AMDGPU::OpName::simm16)->getImm();
1883 if (TrySimplify)
1884 addWait(Wait, CT.value(), OldCnt);
1885 else
1886 addWait(RequiredWait, CT.value(), OldCnt);
1887 UpdatableInstr = &WaitInstrs[CT.value()];
1888 }
1889
1890 // Merge consecutive waitcnt of the same type by erasing multiples.
1891 if (!*UpdatableInstr) {
1892 *UpdatableInstr = &II;
1893 } else if (Opcode == AMDGPU::S_WAITCNT_DEPCTR) {
1894 // S_WAITCNT_DEPCTR requires special care. Don't remove a
1895 // duplicate if it is waiting on things other than VA_VDST or
1896 // VM_VSRC. If that is the case, just make sure the VA_VDST and
1897 // VM_VSRC subfields of the operand are set to the "no wait"
1898 // values.
1899
1900 unsigned Enc = TII->getNamedOperand(II, AMDGPU::OpName::simm16)->getImm();
1901 Enc = AMDGPU::DepCtr::encodeFieldVmVsrc(Enc, ~0u);
1902 Enc = AMDGPU::DepCtr::encodeFieldVaVdst(Enc, ~0u);
1903
1904 if (Enc != (unsigned)AMDGPU::DepCtr::getDefaultDepCtrEncoding(*ST)) {
1905 Modified |= updateOperandIfDifferent(II, AMDGPU::OpName::simm16, Enc);
1906 Modified |= promoteSoftWaitCnt(&II);
1907 } else {
1908 II.eraseFromParent();
1909 Modified = true;
1910 }
1911 } else {
1912 II.eraseFromParent();
1913 Modified = true;
1914 }
1915 }
1916
1917 ScoreBrackets.simplifyWaitcnt(Wait.combined(RequiredWait), Wait);
1918 Wait = Wait.combined(RequiredWait);
1919
1920 if (CombinedLoadDsCntInstr) {
1921 // Only keep an S_WAIT_LOADCNT_DSCNT if both counters actually need
1922 // to be waited for. Otherwise, let the instruction be deleted so
1923 // the appropriate single counter wait instruction can be inserted
1924 // instead, when new S_WAIT_*CNT instructions are inserted by
1925 // createNewWaitcnt(). As a side effect, resetting the wait counts will
1926 // cause any redundant S_WAIT_LOADCNT or S_WAIT_DSCNT to be removed by
1927 // the loop below that deals with single counter instructions.
1928 //
1929 // A wait for LOAD_CNT or DS_CNT implies a wait for VM_VSRC, since
1930 // instructions that have decremented LOAD_CNT or DS_CNT on completion
1931 // will have needed to wait for their register sources to be available
1932 // first.
1933 if (Wait.LoadCnt != ~0u && Wait.DsCnt != ~0u) {
1934 unsigned NewEnc = AMDGPU::encodeLoadcntDscnt(IV, Wait);
1935 Modified |= updateOperandIfDifferent(*CombinedLoadDsCntInstr,
1936 AMDGPU::OpName::simm16, NewEnc);
1937 Modified |= promoteSoftWaitCnt(CombinedLoadDsCntInstr);
1938 ScoreBrackets.applyWaitcnt(LOAD_CNT, Wait.LoadCnt);
1939 ScoreBrackets.applyWaitcnt(DS_CNT, Wait.DsCnt);
1940 Wait.LoadCnt = ~0u;
1941 Wait.DsCnt = ~0u;
1942
1943 LLVM_DEBUG(It.isEnd() ? dbgs() << "applied pre-existing waitcnt\n"
1944 << "New Instr at block end: "
1945 << *CombinedLoadDsCntInstr << '\n'
1946 : dbgs() << "applied pre-existing waitcnt\n"
1947 << "Old Instr: " << *It << "New Instr: "
1948 << *CombinedLoadDsCntInstr << '\n');
1949 } else {
1950 CombinedLoadDsCntInstr->eraseFromParent();
1951 Modified = true;
1952 }
1953 }
1954
1955 if (CombinedStoreDsCntInstr) {
1956 // Similarly for S_WAIT_STORECNT_DSCNT.
1957 if (Wait.StoreCnt != ~0u && Wait.DsCnt != ~0u) {
1958 unsigned NewEnc = AMDGPU::encodeStorecntDscnt(IV, Wait);
1959 Modified |= updateOperandIfDifferent(*CombinedStoreDsCntInstr,
1960 AMDGPU::OpName::simm16, NewEnc);
1961 Modified |= promoteSoftWaitCnt(CombinedStoreDsCntInstr);
1962 ScoreBrackets.applyWaitcnt(STORE_CNT, Wait.StoreCnt);
1963 ScoreBrackets.applyWaitcnt(DS_CNT, Wait.DsCnt);
1964 Wait.StoreCnt = ~0u;
1965 Wait.DsCnt = ~0u;
1966
1967 LLVM_DEBUG(It.isEnd() ? dbgs() << "applied pre-existing waitcnt\n"
1968 << "New Instr at block end: "
1969 << *CombinedStoreDsCntInstr << '\n'
1970 : dbgs() << "applied pre-existing waitcnt\n"
1971 << "Old Instr: " << *It << "New Instr: "
1972 << *CombinedStoreDsCntInstr << '\n');
1973 } else {
1974 CombinedStoreDsCntInstr->eraseFromParent();
1975 Modified = true;
1976 }
1977 }
1978
1979 // Look for an opportunity to convert existing S_WAIT_LOADCNT,
1980 // S_WAIT_STORECNT and S_WAIT_DSCNT into new S_WAIT_LOADCNT_DSCNT
1981 // or S_WAIT_STORECNT_DSCNT. This is achieved by selectively removing
1982 // instructions so that createNewWaitcnt() will create new combined
1983 // instructions to replace them.
1984
1985 if (Wait.DsCnt != ~0u) {
1986 // This is a vector of addresses in WaitInstrs pointing to instructions
1987 // that should be removed if they are present.
1989
1990 // If it's known that both DScnt and either LOADcnt or STOREcnt (but not
1991 // both) need to be waited for, ensure that there are no existing
1992 // individual wait count instructions for these.
1993
1994 if (Wait.LoadCnt != ~0u) {
1995 WaitsToErase.push_back(&WaitInstrs[LOAD_CNT]);
1996 WaitsToErase.push_back(&WaitInstrs[DS_CNT]);
1997 } else if (Wait.StoreCnt != ~0u) {
1998 WaitsToErase.push_back(&WaitInstrs[STORE_CNT]);
1999 WaitsToErase.push_back(&WaitInstrs[DS_CNT]);
2000 }
2001
2002 for (MachineInstr **WI : WaitsToErase) {
2003 if (!*WI)
2004 continue;
2005
2006 (*WI)->eraseFromParent();
2007 *WI = nullptr;
2008 Modified = true;
2009 }
2010 }
2011
2012 for (auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
2013 if (!WaitInstrs[CT])
2014 continue;
2015
2016 unsigned NewCnt = getWait(Wait, CT);
2017 if (NewCnt != ~0u) {
2018 Modified |= updateOperandIfDifferent(*WaitInstrs[CT],
2019 AMDGPU::OpName::simm16, NewCnt);
2020 Modified |= promoteSoftWaitCnt(WaitInstrs[CT]);
2021
2022 ScoreBrackets.applyWaitcnt(CT, NewCnt);
2023 setNoWait(Wait, CT);
2024
2025 LLVM_DEBUG(It.isEnd()
2026 ? dbgs() << "applied pre-existing waitcnt\n"
2027 << "New Instr at block end: " << *WaitInstrs[CT]
2028 << '\n'
2029 : dbgs() << "applied pre-existing waitcnt\n"
2030 << "Old Instr: " << *It
2031 << "New Instr: " << *WaitInstrs[CT] << '\n');
2032 } else {
2033 WaitInstrs[CT]->eraseFromParent();
2034 Modified = true;
2035 }
2036 }
2037
2038 if (WaitcntDepctrInstr) {
2039 // Get the encoded Depctr immediate and override the VA_VDST and VM_VSRC
2040 // subfields with the new required values.
2041 unsigned Enc =
2042 TII->getNamedOperand(*WaitcntDepctrInstr, AMDGPU::OpName::simm16)
2043 ->getImm();
2044 Enc = AMDGPU::DepCtr::encodeFieldVmVsrc(Enc, Wait.VmVsrc);
2045 Enc = AMDGPU::DepCtr::encodeFieldVaVdst(Enc, Wait.VaVdst);
2046
2047 ScoreBrackets.applyWaitcnt(VA_VDST, Wait.VaVdst);
2048 ScoreBrackets.applyWaitcnt(VM_VSRC, Wait.VmVsrc);
2049 Wait.VaVdst = ~0u;
2050 Wait.VmVsrc = ~0u;
2051
2052 // If that new encoded Depctr immediate would actually still wait
2053 // for anything, update the instruction's operand. Otherwise it can
2054 // just be deleted.
2055 if (Enc != (unsigned)AMDGPU::DepCtr::getDefaultDepCtrEncoding(*ST)) {
2056 Modified |= updateOperandIfDifferent(*WaitcntDepctrInstr,
2057 AMDGPU::OpName::simm16, Enc);
2058 LLVM_DEBUG(It.isEnd() ? dbgs() << "applyPreexistingWaitcnt\n"
2059 << "New Instr at block end: "
2060 << *WaitcntDepctrInstr << '\n'
2061 : dbgs() << "applyPreexistingWaitcnt\n"
2062 << "Old Instr: " << *It << "New Instr: "
2063 << *WaitcntDepctrInstr << '\n');
2064 } else {
2065 WaitcntDepctrInstr->eraseFromParent();
2066 Modified = true;
2067 }
2068 }
2069
2070 return Modified;
2071}
2072
2073/// Generate S_WAIT_*CNT instructions for any required counters in \p Wait
2074bool WaitcntGeneratorGFX12Plus::createNewWaitcnt(
2075 MachineBasicBlock &Block, MachineBasicBlock::instr_iterator It,
2076 AMDGPU::Waitcnt Wait, const WaitcntBrackets &ScoreBrackets) {
2077 assert(ST);
2078 assert(!isNormalMode(MaxCounter));
2079
2080 bool Modified = false;
2081 const DebugLoc &DL = Block.findDebugLoc(It);
2082
2083 // Helper to emit expanded waitcnt sequence for profiling.
2084 auto EmitExpandedWaitcnt = [&](unsigned Outstanding, unsigned Target,
2085 auto EmitWaitcnt) {
2086 for (unsigned I = Outstanding - 1; I > Target && I != ~0u; --I)
2087 EmitWaitcnt(I);
2088 EmitWaitcnt(Target);
2089 Modified = true;
2090 };
2091
2092 // For GFX12+, we use separate wait instructions, which makes expansion
2093 // simpler
2094 if (ExpandWaitcntProfiling) {
2095 for (auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
2096 unsigned Count = getWait(Wait, CT);
2097 if (Count == ~0u)
2098 continue;
2099
2100 // Skip expansion for out-of-order counters - emit normal wait instead
2101 if (ScoreBrackets.counterOutOfOrder(CT)) {
2102 BuildMI(Block, It, DL, TII->get(instrsForExtendedCounterTypes[CT]))
2103 .addImm(Count);
2104 Modified = true;
2105 continue;
2106 }
2107
2108 unsigned Outstanding =
2109 std::min(ScoreBrackets.getScoreUB(CT) - ScoreBrackets.getScoreLB(CT),
2110 getWaitCountMax(getLimits(), CT) - 1);
2111 EmitExpandedWaitcnt(Outstanding, Count, [&](unsigned Val) {
2112 BuildMI(Block, It, DL, TII->get(instrsForExtendedCounterTypes[CT]))
2113 .addImm(Val);
2114 });
2115 }
2116 return Modified;
2117 }
2118
2119 // Normal behavior (no expansion)
2120 // Check for opportunities to use combined wait instructions.
2121 if (Wait.DsCnt != ~0u) {
2122 MachineInstr *SWaitInst = nullptr;
2123
2124 if (Wait.LoadCnt != ~0u) {
2125 unsigned Enc = AMDGPU::encodeLoadcntDscnt(IV, Wait);
2126
2127 SWaitInst = BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAIT_LOADCNT_DSCNT))
2128 .addImm(Enc);
2129
2130 Wait.LoadCnt = ~0u;
2131 Wait.DsCnt = ~0u;
2132 } else if (Wait.StoreCnt != ~0u) {
2133 unsigned Enc = AMDGPU::encodeStorecntDscnt(IV, Wait);
2134
2135 SWaitInst =
2136 BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAIT_STORECNT_DSCNT))
2137 .addImm(Enc);
2138
2139 Wait.StoreCnt = ~0u;
2140 Wait.DsCnt = ~0u;
2141 }
2142
2143 if (SWaitInst) {
2144 Modified = true;
2145
2146 LLVM_DEBUG(dbgs() << "GFX12Plus::createNewWaitcnt\n";
2147 if (It != Block.instr_end()) dbgs() << "Old Instr: " << *It;
2148 dbgs() << "New Instr: " << *SWaitInst << '\n');
2149 }
2150 }
2151
2152 // Generate an instruction for any remaining counter that needs
2153 // waiting for.
2154
2155 for (auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
2156 unsigned Count = getWait(Wait, CT);
2157 if (Count == ~0u)
2158 continue;
2159
2160 [[maybe_unused]] auto SWaitInst =
2161 BuildMI(Block, It, DL, TII->get(instrsForExtendedCounterTypes[CT]))
2162 .addImm(Count);
2163
2164 Modified = true;
2165
2166 LLVM_DEBUG(dbgs() << "GFX12Plus::createNewWaitcnt\n";
2167 if (It != Block.instr_end()) dbgs() << "Old Instr: " << *It;
2168 dbgs() << "New Instr: " << *SWaitInst << '\n');
2169 }
2170
2171 if (Wait.hasWaitDepctr()) {
2172 assert(IsExpertMode);
2173 unsigned Enc = AMDGPU::DepCtr::encodeFieldVmVsrc(Wait.VmVsrc, *ST);
2174 Enc = AMDGPU::DepCtr::encodeFieldVaVdst(Enc, Wait.VaVdst);
2175
2176 [[maybe_unused]] auto SWaitInst =
2177 BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAITCNT_DEPCTR)).addImm(Enc);
2178
2179 Modified = true;
2180
2181 LLVM_DEBUG(dbgs() << "generateWaitcnt\n";
2182 if (It != Block.instr_end()) dbgs() << "Old Instr: " << *It;
2183 dbgs() << "New Instr: " << *SWaitInst << '\n');
2184 }
2185
2186 return Modified;
2187}
2188
2189/// Generate s_waitcnt instruction to be placed before cur_Inst.
2190/// Instructions of a given type are returned in order,
2191/// but instructions of different types can complete out of order.
2192/// We rely on this in-order completion
2193/// and simply assign a score to the memory access instructions.
2194/// We keep track of the active "score bracket" to determine
2195/// if an access of a memory read requires an s_waitcnt
2196/// and if so what the value of each counter is.
2197/// The "score bracket" is bound by the lower bound and upper bound
2198/// scores (*_score_LB and *_score_ub respectively).
2199/// If FlushFlags.FlushVmCnt is true, we want to flush the vmcnt counter here.
2200/// If FlushFlags.FlushDsCnt is true, we want to flush the dscnt counter here
2201/// (GFX12+ only, where DS_CNT is a separate counter).
2202bool SIInsertWaitcnts::generateWaitcntInstBefore(
2203 MachineInstr &MI, WaitcntBrackets &ScoreBrackets,
2204 MachineInstr *OldWaitcntInstr, PreheaderFlushFlags FlushFlags) {
2205 setForceEmitWaitcnt();
2206
2207 assert(!MI.isMetaInstruction());
2208
2209 AMDGPU::Waitcnt Wait;
2210 const unsigned Opc = MI.getOpcode();
2211
2212 // FIXME: This should have already been handled by the memory legalizer.
2213 // Removing this currently doesn't affect any lit tests, but we need to
2214 // verify that nothing was relying on this. The number of buffer invalidates
2215 // being handled here should not be expanded.
2216 if (Opc == AMDGPU::BUFFER_WBINVL1 || Opc == AMDGPU::BUFFER_WBINVL1_SC ||
2217 Opc == AMDGPU::BUFFER_WBINVL1_VOL || Opc == AMDGPU::BUFFER_GL0_INV ||
2218 Opc == AMDGPU::BUFFER_GL1_INV) {
2219 Wait.LoadCnt = 0;
2220 }
2221
2222 // All waits must be resolved at call return.
2223 // NOTE: this could be improved with knowledge of all call sites or
2224 // with knowledge of the called routines.
2225 if (Opc == AMDGPU::SI_RETURN_TO_EPILOG || Opc == AMDGPU::SI_RETURN ||
2226 Opc == AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN ||
2227 Opc == AMDGPU::S_SETPC_B64_return) {
2228 ReturnInsts.insert(&MI);
2229 AMDGPU::Waitcnt AllZeroWait =
2230 WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/false);
2231 // On GFX12+, if LOAD_CNT is pending but no VGPRs are waiting for loads
2232 // (e.g., only GLOBAL_INV is pending), we can skip waiting on loadcnt.
2233 // GLOBAL_INV increments loadcnt but doesn't write to VGPRs, so there's
2234 // no need to wait for it at function boundaries.
2235 if (ST->hasExtendedWaitCounts() &&
2236 !ScoreBrackets.hasPendingEvent(VMEM_ACCESS))
2237 AllZeroWait.LoadCnt = ~0u;
2238 Wait = Wait.combined(AllZeroWait);
2239 }
2240 // In dynamic VGPR mode, we want to release the VGPRs before the wave exits.
2241 // Technically the hardware will do this on its own if we don't, but that
2242 // might cost extra cycles compared to doing it explicitly.
2243 // When not in dynamic VGPR mode, identify S_ENDPGM instructions which may
2244 // have to wait for outstanding VMEM stores. In this case it can be useful to
2245 // send a message to explicitly release all VGPRs before the stores have
2246 // completed, but it is only safe to do this if there are no outstanding
2247 // scratch stores.
2248 else if (Opc == AMDGPU::S_ENDPGM || Opc == AMDGPU::S_ENDPGM_SAVED) {
2249 if (!WCG->isOptNone() &&
2250 (MI.getMF()->getInfo<SIMachineFunctionInfo>()->isDynamicVGPREnabled() ||
2251 (ST->getGeneration() >= AMDGPUSubtarget::GFX11 &&
2252 ScoreBrackets.getScoreRange(STORE_CNT) != 0 &&
2253 !ScoreBrackets.hasPendingEvent(SCRATCH_WRITE_ACCESS))))
2254 ReleaseVGPRInsts.insert(&MI);
2255 }
2256 // Resolve vm waits before gs-done.
2257 else if ((Opc == AMDGPU::S_SENDMSG || Opc == AMDGPU::S_SENDMSGHALT) &&
2258 ST->hasLegacyGeometry() &&
2259 ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_PreGFX11_) ==
2261 Wait.LoadCnt = 0;
2262 }
2263
2264 // Export & GDS instructions do not read the EXEC mask until after the export
2265 // is granted (which can occur well after the instruction is issued).
2266 // The shader program must flush all EXP operations on the export-count
2267 // before overwriting the EXEC mask.
2268 else {
2269 if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) {
2270 // Export and GDS are tracked individually, either may trigger a waitcnt
2271 // for EXEC.
2272 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
2273 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
2274 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
2275 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
2276 Wait.ExpCnt = 0;
2277 }
2278 }
2279
2280 // Wait for any pending GDS instruction to complete before any
2281 // "Always GDS" instruction.
2282 if (TII->isAlwaysGDS(Opc) && ScoreBrackets.hasPendingGDS())
2283 addWait(Wait, DS_CNT, ScoreBrackets.getPendingGDSWait());
2284
2285 if (MI.isCall()) {
2286 // The function is going to insert a wait on everything in its prolog.
2287 // This still needs to be careful if the call target is a load (e.g. a GOT
2288 // load). We also need to check WAW dependency with saved PC.
2289 CallInsts.insert(&MI);
2290 Wait = AMDGPU::Waitcnt();
2291
2292 const MachineOperand &CallAddrOp = TII->getCalleeOperand(MI);
2293 if (CallAddrOp.isReg()) {
2294 ScoreBrackets.determineWaitForPhysReg(
2295 SmemAccessCounter, CallAddrOp.getReg().asMCReg(), Wait);
2296
2297 if (const auto *RtnAddrOp =
2298 TII->getNamedOperand(MI, AMDGPU::OpName::dst)) {
2299 ScoreBrackets.determineWaitForPhysReg(
2300 SmemAccessCounter, RtnAddrOp->getReg().asMCReg(), Wait);
2301 }
2302 }
2303 } else if (Opc == AMDGPU::S_BARRIER_WAIT) {
2304 ScoreBrackets.tryClearSCCWriteEvent(&MI);
2305 } else {
2306 // FIXME: Should not be relying on memoperands.
2307 // Look at the source operands of every instruction to see if
2308 // any of them results from a previous memory operation that affects
2309 // its current usage. If so, an s_waitcnt instruction needs to be
2310 // emitted.
2311 // If the source operand was defined by a load, add the s_waitcnt
2312 // instruction.
2313 //
2314 // Two cases are handled for destination operands:
2315 // 1) If the destination operand was defined by a load, add the s_waitcnt
2316 // instruction to guarantee the right WAW order.
2317 // 2) If a destination operand that was used by a recent export/store ins,
2318 // add s_waitcnt on exp_cnt to guarantee the WAR order.
2319
2320 for (const MachineMemOperand *Memop : MI.memoperands()) {
2321 const Value *Ptr = Memop->getValue();
2322 if (Memop->isStore()) {
2323 if (auto It = SLoadAddresses.find(Ptr); It != SLoadAddresses.end()) {
2324 addWait(Wait, SmemAccessCounter, 0);
2325 if (PDT->dominates(MI.getParent(), It->second))
2326 SLoadAddresses.erase(It);
2327 }
2328 }
2329 unsigned AS = Memop->getAddrSpace();
2331 continue;
2332 // No need to wait before load from VMEM to LDS.
2333 if (TII->mayWriteLDSThroughDMA(MI))
2334 continue;
2335
2336 // LOAD_CNT is only relevant to vgpr or LDS.
2337 unsigned TID = LDSDMA_BEGIN;
2338 if (Ptr && Memop->getAAInfo()) {
2339 const auto &LDSDMAStores = ScoreBrackets.getLDSDMAStores();
2340 for (unsigned I = 0, E = LDSDMAStores.size(); I != E; ++I) {
2341 if (MI.mayAlias(AA, *LDSDMAStores[I], true)) {
2342 if ((I + 1) >= NUM_LDSDMA) {
2343 // We didn't have enough slot to track this LDS DMA store, it
2344 // has been tracked using the common RegNo (FIRST_LDS_VGPR).
2345 ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, TID, Wait);
2346 break;
2347 }
2348
2349 ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, TID + I + 1, Wait);
2350 }
2351 }
2352 } else {
2353 ScoreBrackets.determineWaitForLDSDMA(LOAD_CNT, TID, Wait);
2354 }
2355 if (Memop->isStore()) {
2356 ScoreBrackets.determineWaitForLDSDMA(EXP_CNT, TID, Wait);
2357 }
2358 }
2359
2360 // Loop over use and def operands.
2361 for (const MachineOperand &Op : MI.operands()) {
2362 if (!Op.isReg())
2363 continue;
2364
2365 // If the instruction does not read tied source, skip the operand.
2366 if (Op.isTied() && Op.isUse() && TII->doesNotReadTiedSource(MI))
2367 continue;
2368
2369 MCPhysReg Reg = Op.getReg().asMCReg();
2370
2371 const bool IsVGPR = TRI->isVectorRegister(*MRI, Op.getReg());
2372 if (IsVGPR) {
2373 // Implicit VGPR defs and uses are never a part of the memory
2374 // instructions description and usually present to account for
2375 // super-register liveness.
2376 // TODO: Most of the other instructions also have implicit uses
2377 // for the liveness accounting only.
2378 if (Op.isImplicit() && MI.mayLoadOrStore())
2379 continue;
2380
2381 ScoreBrackets.determineWaitForPhysReg(VA_VDST, Reg, Wait);
2382 if (Op.isDef())
2383 ScoreBrackets.determineWaitForPhysReg(VM_VSRC, Reg, Wait);
2384 // RAW always needs an s_waitcnt. WAW needs an s_waitcnt unless the
2385 // previous write and this write are the same type of VMEM
2386 // instruction, in which case they are (in some architectures)
2387 // guaranteed to write their results in order anyway.
2388 // Additionally check instructions where Point Sample Acceleration
2389 // might be applied.
2390 if (Op.isUse() || !updateVMCntOnly(MI) ||
2391 ScoreBrackets.hasOtherPendingVmemTypes(Reg, getVmemType(MI)) ||
2392 ScoreBrackets.hasPointSamplePendingVmemTypes(MI, Reg) ||
2393 !ST->hasVmemWriteVgprInOrder()) {
2394 ScoreBrackets.determineWaitForPhysReg(LOAD_CNT, Reg, Wait);
2395 ScoreBrackets.determineWaitForPhysReg(SAMPLE_CNT, Reg, Wait);
2396 ScoreBrackets.determineWaitForPhysReg(BVH_CNT, Reg, Wait);
2397 ScoreBrackets.clearVgprVmemTypes(Reg);
2398 }
2399
2400 if (Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) {
2401 ScoreBrackets.determineWaitForPhysReg(EXP_CNT, Reg, Wait);
2402 }
2403 ScoreBrackets.determineWaitForPhysReg(DS_CNT, Reg, Wait);
2404 } else if (Op.getReg() == AMDGPU::SCC) {
2405 ScoreBrackets.determineWaitForPhysReg(KM_CNT, Reg, Wait);
2406 } else {
2407 ScoreBrackets.determineWaitForPhysReg(SmemAccessCounter, Reg, Wait);
2408 }
2409
2410 if (ST->hasWaitXCnt() && Op.isDef())
2411 ScoreBrackets.determineWaitForPhysReg(X_CNT, Reg, Wait);
2412 }
2413 }
2414 }
2415
2416 // Ensure safety against exceptions from outstanding memory operations while
2417 // waiting for a barrier:
2418 //
2419 // * Some subtargets safely handle backing off the barrier in hardware
2420 // when an exception occurs.
2421 // * Some subtargets have an implicit S_WAITCNT 0 before barriers, so that
2422 // there can be no outstanding memory operations during the wait.
2423 // * Subtargets with split barriers don't need to back off the barrier; it
2424 // is up to the trap handler to preserve the user barrier state correctly.
2425 //
2426 // In all other cases, ensure safety by ensuring that there are no outstanding
2427 // memory operations.
2428 if (Opc == AMDGPU::S_BARRIER && !ST->hasAutoWaitcntBeforeBarrier() &&
2429 !ST->supportsBackOffBarrier()) {
2430 Wait = Wait.combined(WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/true));
2431 }
2432
2433 // TODO: Remove this work-around, enable the assert for Bug 457939
2434 // after fixing the scheduler. Also, the Shader Compiler code is
2435 // independent of target.
2436 if (SIInstrInfo::isCBranchVCCZRead(MI) && ST->hasReadVCCZBug() &&
2437 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
2438 Wait.DsCnt = 0;
2439 }
2440
2441 // Verify that the wait is actually needed.
2442 ScoreBrackets.simplifyWaitcnt(Wait);
2443
2444 // It is only necessary to insert an S_WAITCNT_DEPCTR instruction that
2445 // waits on VA_VDST if the instruction it would precede is not a VALU
2446 // instruction, since hardware handles VALU->VGPR->VALU hazards in
2447 // expert scheduling mode.
2448 if (TII->isVALU(MI))
2449 Wait.VaVdst = ~0u;
2450
2451 // Since the translation for VMEM addresses occur in-order, we can apply the
2452 // XCnt if the current instruction is of VMEM type and has a memory
2453 // dependency with another VMEM instruction in flight.
2454 if (Wait.XCnt != ~0u && isVmemAccess(MI)) {
2455 ScoreBrackets.applyWaitcnt(X_CNT, Wait.XCnt);
2456 Wait.XCnt = ~0u;
2457 }
2458
2459 // When forcing emit, we need to skip terminators because that would break the
2460 // terminators of the MBB if we emit a waitcnt between terminators.
2461 if (ForceEmitZeroFlag && !MI.isTerminator())
2462 Wait = WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/false);
2463
2464 if (ForceEmitWaitcnt[LOAD_CNT])
2465 Wait.LoadCnt = 0;
2466 if (ForceEmitWaitcnt[EXP_CNT])
2467 Wait.ExpCnt = 0;
2468 if (ForceEmitWaitcnt[DS_CNT])
2469 Wait.DsCnt = 0;
2470 if (ForceEmitWaitcnt[SAMPLE_CNT])
2471 Wait.SampleCnt = 0;
2472 if (ForceEmitWaitcnt[BVH_CNT])
2473 Wait.BvhCnt = 0;
2474 if (ForceEmitWaitcnt[KM_CNT])
2475 Wait.KmCnt = 0;
2476 if (ForceEmitWaitcnt[X_CNT])
2477 Wait.XCnt = 0;
2478 // Only force emit VA_VDST and VM_VSRC if expert mode is enabled.
2479 if (IsExpertMode) {
2480 if (ForceEmitWaitcnt[VA_VDST])
2481 Wait.VaVdst = 0;
2482 if (ForceEmitWaitcnt[VM_VSRC])
2483 Wait.VmVsrc = 0;
2484 }
2485
2486 if (FlushFlags.FlushVmCnt) {
2487 if (ScoreBrackets.hasPendingEvent(LOAD_CNT))
2488 Wait.LoadCnt = 0;
2489 if (ScoreBrackets.hasPendingEvent(SAMPLE_CNT))
2490 Wait.SampleCnt = 0;
2491 if (ScoreBrackets.hasPendingEvent(BVH_CNT))
2492 Wait.BvhCnt = 0;
2493 }
2494
2495 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(DS_CNT))
2496 Wait.DsCnt = 0;
2497
2498 if (ForceEmitZeroLoadFlag && Wait.LoadCnt != ~0u)
2499 Wait.LoadCnt = 0;
2500
2501 return generateWaitcnt(Wait, MI.getIterator(), *MI.getParent(), ScoreBrackets,
2502 OldWaitcntInstr);
2503}
2504
2505bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt Wait,
2507 MachineBasicBlock &Block,
2508 WaitcntBrackets &ScoreBrackets,
2509 MachineInstr *OldWaitcntInstr) {
2510 bool Modified = false;
2511
2512 if (OldWaitcntInstr)
2513 // Try to merge the required wait with preexisting waitcnt instructions.
2514 // Also erase redundant waitcnt.
2515 Modified =
2516 WCG->applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr, Wait, It);
2517
2518 // ExpCnt can be merged into VINTERP.
2519 if (Wait.ExpCnt != ~0u && It != Block.instr_end() &&
2521 MachineOperand *WaitExp =
2522 TII->getNamedOperand(*It, AMDGPU::OpName::waitexp);
2523 if (Wait.ExpCnt < WaitExp->getImm()) {
2524 WaitExp->setImm(Wait.ExpCnt);
2525 Modified = true;
2526 }
2527 // Apply ExpCnt before resetting it, so applyWaitcnt below sees all counts.
2528 ScoreBrackets.applyWaitcnt(EXP_CNT, Wait.ExpCnt);
2529 Wait.ExpCnt = ~0u;
2530
2531 LLVM_DEBUG(dbgs() << "generateWaitcnt\n"
2532 << "Update Instr: " << *It);
2533 }
2534
2535 if (WCG->createNewWaitcnt(Block, It, Wait, ScoreBrackets))
2536 Modified = true;
2537
2538 // Any counts that could have been applied to any existing waitcnt
2539 // instructions will have been done so, now deal with any remaining.
2540 ScoreBrackets.applyWaitcnt(Wait);
2541
2542 return Modified;
2543}
2544
2545std::optional<WaitEventType>
2546SIInsertWaitcnts::getExpertSchedulingEventType(const MachineInstr &Inst) const {
2547 if (TII->isVALU(Inst)) {
2548 // Core/Side-, DP-, XDL- and TRANS-MACC VALU instructions complete
2549 // out-of-order with respect to each other, so each of these classes
2550 // has its own event.
2551
2552 if (TII->isXDL(Inst))
2553 return VGPR_XDL_WRITE;
2554
2555 if (TII->isTRANS(Inst))
2556 return VGPR_TRANS_WRITE;
2557
2559 return VGPR_DPMACC_WRITE;
2560
2561 return VGPR_CSMACC_WRITE;
2562 }
2563
2564 // FLAT and LDS instructions may read their VGPR sources out-of-order
2565 // with respect to each other and all other VMEM instructions, so
2566 // each of these also has a separate event.
2567
2568 if (TII->isFLAT(Inst))
2569 return VGPR_FLAT_READ;
2570
2571 if (TII->isDS(Inst))
2572 return VGPR_LDS_READ;
2573
2574 if (TII->isVMEM(Inst) || TII->isVIMAGE(Inst) || TII->isVSAMPLE(Inst))
2575 return VGPR_VMEM_READ;
2576
2577 // Otherwise, no hazard.
2578
2579 return {};
2580}
2581
2582bool SIInsertWaitcnts::isVmemAccess(const MachineInstr &MI) const {
2583 return (TII->isFLAT(MI) && TII->mayAccessVMEMThroughFlat(MI)) ||
2584 (TII->isVMEM(MI) && !AMDGPU::getMUBUFIsBufferInv(MI.getOpcode()));
2585}
2586
2587// Return true if the next instruction is S_ENDPGM, following fallthrough
2588// blocks if necessary.
2589bool SIInsertWaitcnts::isNextENDPGM(MachineBasicBlock::instr_iterator It,
2590 MachineBasicBlock *Block) const {
2591 auto BlockEnd = Block->getParent()->end();
2592 auto BlockIter = Block->getIterator();
2593
2594 while (true) {
2595 if (It.isEnd()) {
2596 if (++BlockIter != BlockEnd) {
2597 It = BlockIter->instr_begin();
2598 continue;
2599 }
2600
2601 return false;
2602 }
2603
2604 if (!It->isMetaInstruction())
2605 break;
2606
2607 It++;
2608 }
2609
2610 assert(!It.isEnd());
2611
2612 return It->getOpcode() == AMDGPU::S_ENDPGM;
2613}
2614
2615// Add a wait after an instruction if architecture requirements mandate one.
2616bool SIInsertWaitcnts::insertForcedWaitAfter(MachineInstr &Inst,
2617 MachineBasicBlock &Block,
2618 WaitcntBrackets &ScoreBrackets) {
2619 AMDGPU::Waitcnt Wait;
2620 bool NeedsEndPGMCheck = false;
2621
2622 if (ST->isPreciseMemoryEnabled() && Inst.mayLoadOrStore())
2623 Wait = WCG->getAllZeroWaitcnt(Inst.mayStore() &&
2625
2626 if (TII->isAlwaysGDS(Inst.getOpcode())) {
2627 Wait.DsCnt = 0;
2628 NeedsEndPGMCheck = true;
2629 }
2630
2631 ScoreBrackets.simplifyWaitcnt(Wait);
2632
2633 auto SuccessorIt = std::next(Inst.getIterator());
2634 bool Result = generateWaitcnt(Wait, SuccessorIt, Block, ScoreBrackets,
2635 /*OldWaitcntInstr=*/nullptr);
2636
2637 if (Result && NeedsEndPGMCheck && isNextENDPGM(SuccessorIt, &Block)) {
2638 BuildMI(Block, SuccessorIt, Inst.getDebugLoc(), TII->get(AMDGPU::S_NOP))
2639 .addImm(0);
2640 }
2641
2642 return Result;
2643}
2644
2645void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
2646 WaitcntBrackets *ScoreBrackets) {
2647 // Now look at the instruction opcode. If it is a memory access
2648 // instruction, update the upper-bound of the appropriate counter's
2649 // bracket and the destination operand scores.
2650 // For architectures with X_CNT, mark the source address operands
2651 // with the appropriate counter values.
2652 // TODO: Use the (TSFlags & SIInstrFlags::DS_CNT) property everywhere.
2653
2654 bool IsVMEMAccess = false;
2655 bool IsSMEMAccess = false;
2656
2657 if (IsExpertMode) {
2658 if (const auto ET = getExpertSchedulingEventType(Inst))
2659 ScoreBrackets->updateByEvent(*ET, Inst);
2660 }
2661
2662 if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) {
2663 if (TII->isAlwaysGDS(Inst.getOpcode()) ||
2664 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
2665 ScoreBrackets->updateByEvent(GDS_ACCESS, Inst);
2666 ScoreBrackets->updateByEvent(GDS_GPR_LOCK, Inst);
2667 ScoreBrackets->setPendingGDS();
2668 } else {
2669 ScoreBrackets->updateByEvent(LDS_ACCESS, Inst);
2670 }
2671 } else if (TII->isFLAT(Inst)) {
2673 ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst);
2674 return;
2675 }
2676
2677 assert(Inst.mayLoadOrStore());
2678
2679 int FlatASCount = 0;
2680
2681 if (TII->mayAccessVMEMThroughFlat(Inst)) {
2682 ++FlatASCount;
2683 IsVMEMAccess = true;
2684 ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst);
2685 }
2686
2687 if (TII->mayAccessLDSThroughFlat(Inst)) {
2688 ++FlatASCount;
2689 ScoreBrackets->updateByEvent(LDS_ACCESS, Inst);
2690 }
2691
2692 // Async/LDSDMA operations have FLAT encoding but do not actually use flat
2693 // pointers. They do have two operands that each access global and LDS, thus
2694 // making it appear at this point that they are using a flat pointer. Filter
2695 // them out, and for the rest, generate a dependency on flat pointers so
2696 // that both VM and LGKM counters are flushed.
2697 if (!SIInstrInfo::isLDSDMA(Inst) && FlatASCount > 1)
2698 ScoreBrackets->setPendingFlat();
2699 } else if (SIInstrInfo::isVMEM(Inst) &&
2701 IsVMEMAccess = true;
2702 ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst);
2703
2704 if (ST->vmemWriteNeedsExpWaitcnt() &&
2705 (Inst.mayStore() || SIInstrInfo::isAtomicRet(Inst))) {
2706 ScoreBrackets->updateByEvent(VMW_GPR_LOCK, Inst);
2707 }
2708 } else if (TII->isSMRD(Inst)) {
2709 IsSMEMAccess = true;
2710 ScoreBrackets->updateByEvent(SMEM_ACCESS, Inst);
2711 } else if (Inst.isCall()) {
2712 // Act as a wait on everything
2713 ScoreBrackets->applyWaitcnt(WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/false));
2714 ScoreBrackets->setStateOnFunctionEntryOrReturn();
2715 } else if (SIInstrInfo::isLDSDIR(Inst)) {
2716 ScoreBrackets->updateByEvent(EXP_LDS_ACCESS, Inst);
2717 } else if (TII->isVINTERP(Inst)) {
2718 int64_t Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm();
2719 ScoreBrackets->applyWaitcnt(EXP_CNT, Imm);
2720 } else if (SIInstrInfo::isEXP(Inst)) {
2721 unsigned Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
2723 ScoreBrackets->updateByEvent(EXP_PARAM_ACCESS, Inst);
2724 else if (Imm >= AMDGPU::Exp::ET_POS0 && Imm <= AMDGPU::Exp::ET_POS_LAST)
2725 ScoreBrackets->updateByEvent(EXP_POS_ACCESS, Inst);
2726 else
2727 ScoreBrackets->updateByEvent(EXP_GPR_LOCK, Inst);
2728 } else if (SIInstrInfo::isSBarrierSCCWrite(Inst.getOpcode())) {
2729 ScoreBrackets->updateByEvent(SCC_WRITE, Inst);
2730 } else {
2731 switch (Inst.getOpcode()) {
2732 case AMDGPU::S_SENDMSG:
2733 case AMDGPU::S_SENDMSG_RTN_B32:
2734 case AMDGPU::S_SENDMSG_RTN_B64:
2735 case AMDGPU::S_SENDMSGHALT:
2736 ScoreBrackets->updateByEvent(SQ_MESSAGE, Inst);
2737 break;
2738 case AMDGPU::S_MEMTIME:
2739 case AMDGPU::S_MEMREALTIME:
2740 case AMDGPU::S_GET_BARRIER_STATE_M0:
2741 case AMDGPU::S_GET_BARRIER_STATE_IMM:
2742 ScoreBrackets->updateByEvent(SMEM_ACCESS, Inst);
2743 break;
2744 }
2745 }
2746
2747 if (!ST->hasWaitXCnt())
2748 return;
2749
2750 if (IsVMEMAccess)
2751 ScoreBrackets->updateByEvent(VMEM_GROUP, Inst);
2752
2753 if (IsSMEMAccess)
2754 ScoreBrackets->updateByEvent(SMEM_GROUP, Inst);
2755}
2756
2757bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score,
2758 unsigned OtherScore) {
2759 unsigned MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift;
2760 unsigned OtherShifted =
2761 OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift;
2762 Score = std::max(MyShifted, OtherShifted);
2763 return OtherShifted > MyShifted;
2764}
2765
2766/// Merge the pending events and associater score brackets of \p Other into
2767/// this brackets status.
2768///
2769/// Returns whether the merge resulted in a change that requires tighter waits
2770/// (i.e. the merged brackets strictly dominate the original brackets).
2771bool WaitcntBrackets::merge(const WaitcntBrackets &Other) {
2772 bool StrictDom = false;
2773
2774 // Check if "other" has keys we don't have, and create default entries for
2775 // those. If they remain empty after merging, we will clean it up after.
2776 for (auto K : Other.VMem.keys())
2777 VMem.try_emplace(K);
2778 for (auto K : Other.SGPRs.keys())
2779 SGPRs.try_emplace(K);
2780
2781 for (auto T : inst_counter_types(Context->MaxCounter)) {
2782 // Merge event flags for this counter
2783 const unsigned *WaitEventMaskForInst = Context->WaitEventMaskForInst;
2784 const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[T];
2785 const unsigned OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T];
2786 if (OtherEvents & ~OldEvents)
2787 StrictDom = true;
2788 PendingEvents |= OtherEvents;
2789
2790 // Merge scores for this counter
2791 const unsigned MyPending = ScoreUBs[T] - ScoreLBs[T];
2792 const unsigned OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T];
2793 const unsigned NewUB = ScoreLBs[T] + std::max(MyPending, OtherPending);
2794 if (NewUB < ScoreLBs[T])
2795 report_fatal_error("waitcnt score overflow");
2796
2797 MergeInfo M;
2798 M.OldLB = ScoreLBs[T];
2799 M.OtherLB = Other.ScoreLBs[T];
2800 M.MyShift = NewUB - ScoreUBs[T];
2801 M.OtherShift = NewUB - Other.ScoreUBs[T];
2802
2803 ScoreUBs[T] = NewUB;
2804
2805 StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]);
2806
2807 if (T == DS_CNT)
2808 StrictDom |= mergeScore(M, LastGDS, Other.LastGDS);
2809
2810 if (T == KM_CNT) {
2811 StrictDom |= mergeScore(M, SCCScore, Other.SCCScore);
2812 if (Other.hasPendingEvent(SCC_WRITE)) {
2813 unsigned OldEventsHasSCCWrite = OldEvents & (1 << SCC_WRITE);
2814 if (!OldEventsHasSCCWrite) {
2815 PendingSCCWrite = Other.PendingSCCWrite;
2816 } else if (PendingSCCWrite != Other.PendingSCCWrite) {
2817 PendingSCCWrite = nullptr;
2818 }
2819 }
2820 }
2821
2822 for (auto &[RegID, Info] : VMem)
2823 StrictDom |= mergeScore(M, Info.Scores[T], Other.getVMemScore(RegID, T));
2824
2825 if (isSmemCounter(T)) {
2826 unsigned Idx = getSgprScoresIdx(T);
2827 for (auto &[RegID, Info] : SGPRs) {
2828 auto It = Other.SGPRs.find(RegID);
2829 unsigned OtherScore =
2830 (It != Other.SGPRs.end()) ? It->second.Scores[Idx] : 0;
2831 StrictDom |= mergeScore(M, Info.Scores[Idx], OtherScore);
2832 }
2833 }
2834 }
2835
2836 for (auto &[TID, Info] : VMem) {
2837 if (auto It = Other.VMem.find(TID); It != Other.VMem.end()) {
2838 unsigned char NewVmemTypes = Info.VMEMTypes | It->second.VMEMTypes;
2839 StrictDom |= NewVmemTypes != Info.VMEMTypes;
2840 Info.VMEMTypes = NewVmemTypes;
2841 }
2842 }
2843
2844 purgeEmptyTrackingData();
2845 return StrictDom;
2846}
2847
2848static bool isWaitInstr(MachineInstr &Inst) {
2849 unsigned Opcode = SIInstrInfo::getNonSoftWaitcntOpcode(Inst.getOpcode());
2850 return Opcode == AMDGPU::S_WAITCNT ||
2851 (Opcode == AMDGPU::S_WAITCNT_VSCNT && Inst.getOperand(0).isReg() &&
2852 Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL) ||
2853 Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT ||
2854 Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT ||
2855 Opcode == AMDGPU::S_WAITCNT_lds_direct ||
2856 counterTypeForInstr(Opcode).has_value();
2857}
2858
2859void SIInsertWaitcnts::setSchedulingMode(MachineBasicBlock &MBB,
2861 bool ExpertMode) const {
2862 const unsigned EncodedReg = AMDGPU::Hwreg::HwregEncoding::encode(
2864 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_SETREG_IMM32_B32))
2865 .addImm(ExpertMode ? 2 : 0)
2866 .addImm(EncodedReg);
2867}
2868
2869// Generate s_waitcnt instructions where needed.
2870bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
2871 MachineBasicBlock &Block,
2872 WaitcntBrackets &ScoreBrackets) {
2873 bool Modified = false;
2874
2875 LLVM_DEBUG({
2876 dbgs() << "*** Begin Block: ";
2877 Block.printName(dbgs());
2878 ScoreBrackets.dump();
2879 });
2880
2881 // Track the correctness of vccz through this basic block. There are two
2882 // reasons why it might be incorrect; see ST->hasReadVCCZBug() and
2883 // ST->partialVCCWritesUpdateVCCZ().
2884 bool VCCZCorrect = true;
2885 if (ST->hasReadVCCZBug()) {
2886 // vccz could be incorrect at a basic block boundary if a predecessor wrote
2887 // to vcc and then issued an smem load.
2888 VCCZCorrect = false;
2889 } else if (!ST->partialVCCWritesUpdateVCCZ()) {
2890 // vccz could be incorrect at a basic block boundary if a predecessor wrote
2891 // to vcc_lo or vcc_hi.
2892 VCCZCorrect = false;
2893 }
2894
2895 // Walk over the instructions.
2896 MachineInstr *OldWaitcntInstr = nullptr;
2897
2898 for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(),
2899 E = Block.instr_end();
2900 Iter != E;) {
2901 MachineInstr &Inst = *Iter;
2902 if (Inst.isMetaInstruction()) {
2903 ++Iter;
2904 continue;
2905 }
2906
2907 // Track pre-existing waitcnts that were added in earlier iterations or by
2908 // the memory legalizer.
2909 if (isWaitInstr(Inst) ||
2910 (IsExpertMode && Inst.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR)) {
2911 if (!OldWaitcntInstr)
2912 OldWaitcntInstr = &Inst;
2913 ++Iter;
2914 continue;
2915 }
2916
2917 PreheaderFlushFlags FlushFlags;
2918 if (Block.getFirstTerminator() == Inst)
2919 FlushFlags = isPreheaderToFlush(Block, ScoreBrackets);
2920
2921 // Generate an s_waitcnt instruction to be placed before Inst, if needed.
2922 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr,
2923 FlushFlags);
2924 OldWaitcntInstr = nullptr;
2925
2926 // Restore vccz if it's not known to be correct already.
2927 bool RestoreVCCZ = !VCCZCorrect && SIInstrInfo::isCBranchVCCZRead(Inst);
2928
2929 // Don't examine operands unless we need to track vccz correctness.
2930 if (ST->hasReadVCCZBug() || !ST->partialVCCWritesUpdateVCCZ()) {
2931 if (Inst.definesRegister(AMDGPU::VCC_LO, /*TRI=*/nullptr) ||
2932 Inst.definesRegister(AMDGPU::VCC_HI, /*TRI=*/nullptr)) {
2933 // Up to gfx9, writes to vcc_lo and vcc_hi don't update vccz.
2934 if (!ST->partialVCCWritesUpdateVCCZ())
2935 VCCZCorrect = false;
2936 } else if (Inst.definesRegister(AMDGPU::VCC, /*TRI=*/nullptr)) {
2937 // There is a hardware bug on CI/SI where SMRD instruction may corrupt
2938 // vccz bit, so when we detect that an instruction may read from a
2939 // corrupt vccz bit, we need to:
2940 // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD
2941 // operations to complete.
2942 // 2. Restore the correct value of vccz by writing the current value
2943 // of vcc back to vcc.
2944 if (ST->hasReadVCCZBug() &&
2945 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
2946 // Writes to vcc while there's an outstanding smem read may get
2947 // clobbered as soon as any read completes.
2948 VCCZCorrect = false;
2949 } else {
2950 // Writes to vcc will fix any incorrect value in vccz.
2951 VCCZCorrect = true;
2952 }
2953 }
2954 }
2955
2956 if (TII->isSMRD(Inst)) {
2957 for (const MachineMemOperand *Memop : Inst.memoperands()) {
2958 // No need to handle invariant loads when avoiding WAR conflicts, as
2959 // there cannot be a vector store to the same memory location.
2960 if (!Memop->isInvariant()) {
2961 const Value *Ptr = Memop->getValue();
2962 SLoadAddresses.insert(std::pair(Ptr, Inst.getParent()));
2963 }
2964 }
2965 if (ST->hasReadVCCZBug()) {
2966 // This smem read could complete and clobber vccz at any time.
2967 VCCZCorrect = false;
2968 }
2969 }
2970
2971 updateEventWaitcntAfter(Inst, &ScoreBrackets);
2972
2973 Modified |= insertForcedWaitAfter(Inst, Block, ScoreBrackets);
2974
2975 LLVM_DEBUG({
2976 Inst.print(dbgs());
2977 ScoreBrackets.dump();
2978 });
2979
2980 // TODO: Remove this work-around after fixing the scheduler and enable the
2981 // assert above.
2982 if (RestoreVCCZ) {
2983 // Restore the vccz bit. Any time a value is written to vcc, the vcc
2984 // bit is updated, so we can restore the bit by reading the value of
2985 // vcc and then writing it back to the register.
2986 BuildMI(Block, Inst, Inst.getDebugLoc(),
2987 TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
2988 TRI->getVCC())
2989 .addReg(TRI->getVCC());
2990 VCCZCorrect = true;
2991 Modified = true;
2992 }
2993
2994 ++Iter;
2995 }
2996
2997 // Flush counters at the end of the block if needed (for preheaders with no
2998 // terminator).
2999 AMDGPU::Waitcnt Wait;
3000 if (Block.getFirstTerminator() == Block.end()) {
3001 PreheaderFlushFlags FlushFlags = isPreheaderToFlush(Block, ScoreBrackets);
3002 if (FlushFlags.FlushVmCnt) {
3003 if (ScoreBrackets.hasPendingEvent(LOAD_CNT))
3004 Wait.LoadCnt = 0;
3005 if (ScoreBrackets.hasPendingEvent(SAMPLE_CNT))
3006 Wait.SampleCnt = 0;
3007 if (ScoreBrackets.hasPendingEvent(BVH_CNT))
3008 Wait.BvhCnt = 0;
3009 }
3010 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(DS_CNT))
3011 Wait.DsCnt = 0;
3012 }
3013
3014 // Combine or remove any redundant waitcnts at the end of the block.
3015 Modified |= generateWaitcnt(Wait, Block.instr_end(), Block, ScoreBrackets,
3016 OldWaitcntInstr);
3017
3018 LLVM_DEBUG({
3019 dbgs() << "*** End Block: ";
3020 Block.printName(dbgs());
3021 ScoreBrackets.dump();
3022 });
3023
3024 return Modified;
3025}
3026
3027// Return flags indicating which counters should be flushed in the preheader.
3028PreheaderFlushFlags
3029SIInsertWaitcnts::isPreheaderToFlush(MachineBasicBlock &MBB,
3030 const WaitcntBrackets &ScoreBrackets) {
3031 auto [Iterator, IsInserted] =
3032 PreheadersToFlush.try_emplace(&MBB, PreheaderFlushFlags());
3033 if (!IsInserted)
3034 return Iterator->second;
3035
3036 MachineBasicBlock *Succ = MBB.getSingleSuccessor();
3037 if (!Succ)
3038 return PreheaderFlushFlags();
3039
3040 MachineLoop *Loop = MLI->getLoopFor(Succ);
3041 if (!Loop)
3042 return PreheaderFlushFlags();
3043
3044 if (Loop->getLoopPreheader() == &MBB) {
3045 Iterator->second = getPreheaderFlushFlags(Loop, ScoreBrackets);
3046 return Iterator->second;
3047 }
3048
3049 return PreheaderFlushFlags();
3050}
3051
3052bool SIInsertWaitcnts::isVMEMOrFlatVMEM(const MachineInstr &MI) const {
3054 return TII->mayAccessVMEMThroughFlat(MI);
3055 return SIInstrInfo::isVMEM(MI);
3056}
3057
3058bool SIInsertWaitcnts::isDSRead(const MachineInstr &MI) const {
3059 return SIInstrInfo::isDS(MI) && MI.mayLoad() && !MI.mayStore();
3060}
3061
3062// Check if instruction is a store to LDS that is counted via DSCNT
3063// (where that counter exists).
3064bool SIInsertWaitcnts::mayStoreIncrementingDSCNT(const MachineInstr &MI) const {
3065 if (!MI.mayStore())
3066 return false;
3067 if (SIInstrInfo::isDS(MI))
3068 return true;
3069 return false;
3070}
3071
3072// Return flags indicating which counters should be flushed in the preheader of
3073// the given loop. We currently decide to flush in a few situations:
3074// For VMEM (FlushVmCnt):
3075// 1. The loop contains vmem store(s), no vmem load and at least one use of a
3076// vgpr containing a value that is loaded outside of the loop. (Only on
3077// targets with no vscnt counter).
3078// 2. The loop contains vmem load(s), but the loaded values are not used in the
3079// loop, and at least one use of a vgpr containing a value that is loaded
3080// outside of the loop.
3081// For DS (FlushDsCnt, GFX12+ only):
3082// 3. The loop contains no DS reads, and at least one use of a vgpr containing
3083// a value that is DS loaded outside of the loop.
3084// 4. The loop contains DS read(s), loaded values are not used in the same
3085// iteration but in the next iteration (prefetch pattern), and at least one
3086// use of a vgpr containing a value that is DS loaded outside of the loop.
3087// Flushing in preheader reduces wait overhead if the wait requirement in
3088// iteration 1 would otherwise be more strict.
3089PreheaderFlushFlags
3090SIInsertWaitcnts::getPreheaderFlushFlags(MachineLoop *ML,
3091 const WaitcntBrackets &Brackets) {
3092 PreheaderFlushFlags Flags;
3093 bool HasVMemLoad = false;
3094 bool HasVMemStore = false;
3095 bool SeenDSStoreInLoop = false;
3096 bool UsesVgprLoadedOutsideVMEM = false;
3097 bool UsesVgprLoadedOutsideDS = false;
3098 bool VMemInvalidated = false;
3099 // DS optimization only applies to GFX12+ where DS_CNT is separate.
3100 bool DSInvalidated = !ST->hasExtendedWaitCounts();
3101 DenseSet<MCRegUnit> VgprUse;
3102 DenseSet<MCRegUnit> VgprDefVMEM;
3103 DenseSet<MCRegUnit> VgprDefDS;
3104
3105 for (MachineBasicBlock *MBB : ML->blocks()) {
3106 bool SeenDSStoreInCurrMBB = false;
3107 for (MachineInstr &MI : *MBB) {
3108 if (isVMEMOrFlatVMEM(MI)) {
3109 HasVMemLoad |= MI.mayLoad();
3110 HasVMemStore |= MI.mayStore();
3111 }
3112 if (mayStoreIncrementingDSCNT(MI))
3113 SeenDSStoreInCurrMBB = true;
3114 // Stores postdominated by a barrier will have a wait at the barrier
3115 // and thus no need to be waited at the loop header. Barrier found
3116 // later in the same MBB during in-order traversal is used here as a
3117 // cheaper alternative to postdomination check.
3118 if (MI.getOpcode() == AMDGPU::S_BARRIER)
3119 SeenDSStoreInCurrMBB = false;
3120 for (const MachineOperand &Op : MI.all_uses()) {
3121 if (Op.isDebug() || !TRI->isVectorRegister(*MRI, Op.getReg()))
3122 continue;
3123 // Vgpr use
3124 for (MCRegUnit RU : TRI->regunits(Op.getReg().asMCReg())) {
3125 // If we find a register that is loaded inside the loop, 1. and 2.
3126 // are invalidated.
3127 if (VgprDefVMEM.contains(RU))
3128 VMemInvalidated = true;
3129
3130 // Check for DS loads used inside the loop
3131 if (VgprDefDS.contains(RU))
3132 DSInvalidated = true;
3133
3134 // Early exit if both optimizations are invalidated
3135 if (VMemInvalidated && DSInvalidated)
3136 return Flags;
3137
3138 VgprUse.insert(RU);
3139 // Check if this register has a pending VMEM load from outside the
3140 // loop (value loaded outside and used inside).
3141 VMEMID ID = toVMEMID(RU);
3142 bool HasPendingVMEM =
3143 Brackets.getVMemScore(ID, LOAD_CNT) >
3144 Brackets.getScoreLB(LOAD_CNT) ||
3145 Brackets.getVMemScore(ID, SAMPLE_CNT) >
3146 Brackets.getScoreLB(SAMPLE_CNT) ||
3147 Brackets.getVMemScore(ID, BVH_CNT) > Brackets.getScoreLB(BVH_CNT);
3148 if (HasPendingVMEM)
3149 UsesVgprLoadedOutsideVMEM = true;
3150 // Check if loaded outside the loop via DS (not VMEM/FLAT).
3151 // Only consider it a DS load if there's no pending VMEM load for
3152 // this register, since FLAT can set both counters.
3153 if (!HasPendingVMEM &&
3154 Brackets.getVMemScore(ID, DS_CNT) > Brackets.getScoreLB(DS_CNT))
3155 UsesVgprLoadedOutsideDS = true;
3156 }
3157 }
3158
3159 // VMem load vgpr def
3160 if (isVMEMOrFlatVMEM(MI) && MI.mayLoad()) {
3161 for (const MachineOperand &Op : MI.all_defs()) {
3162 for (MCRegUnit RU : TRI->regunits(Op.getReg().asMCReg())) {
3163 // If we find a register that is loaded inside the loop, 1. and 2.
3164 // are invalidated.
3165 if (VgprUse.contains(RU))
3166 VMemInvalidated = true;
3167 VgprDefVMEM.insert(RU);
3168 }
3169 }
3170 // Early exit if both optimizations are invalidated
3171 if (VMemInvalidated && DSInvalidated)
3172 return Flags;
3173 }
3174
3175 // DS read vgpr def
3176 // Note: Unlike VMEM, we DON'T invalidate when VgprUse.contains(RegNo).
3177 // If USE comes before DEF, it's the prefetch pattern (use value from
3178 // previous iteration, load for next iteration). We should still flush
3179 // in preheader so iteration 1 doesn't need to wait inside the loop.
3180 // Only invalidate when DEF comes before USE (same-iteration consumption,
3181 // checked above when processing uses).
3182 if (isDSRead(MI)) {
3183 for (const MachineOperand &Op : MI.all_defs()) {
3184 for (MCRegUnit RU : TRI->regunits(Op.getReg().asMCReg())) {
3185 VgprDefDS.insert(RU);
3186 }
3187 }
3188 }
3189 }
3190 // Accumulate unprotected DS stores from this MBB
3191 SeenDSStoreInLoop |= SeenDSStoreInCurrMBB;
3192 }
3193
3194 // VMEM flush decision
3195 if (!VMemInvalidated && UsesVgprLoadedOutsideVMEM &&
3196 ((!ST->hasVscnt() && HasVMemStore && !HasVMemLoad) ||
3197 (HasVMemLoad && ST->hasVmemWriteVgprInOrder())))
3198 Flags.FlushVmCnt = true;
3199
3200 // DS flush decision: flush if loop uses DS-loaded values from outside
3201 // and either has no DS reads in the loop, or DS reads whose results
3202 // are not used in the loop.
3203 // DSInvalidated is pre-set to true on non-GFX12+ targets where DS_CNT
3204 // is LGKM_CNT which also tracks FLAT/SMEM.
3205 if (!DSInvalidated && !SeenDSStoreInLoop && UsesVgprLoadedOutsideDS)
3206 Flags.FlushDsCnt = true;
3207
3208 return Flags;
3209}
3210
3211bool SIInsertWaitcntsLegacy::runOnMachineFunction(MachineFunction &MF) {
3212 auto *MLI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
3213 auto *PDT =
3214 &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
3215 AliasAnalysis *AA = nullptr;
3216 if (auto *AAR = getAnalysisIfAvailable<AAResultsWrapperPass>())
3217 AA = &AAR->getAAResults();
3218
3219 return SIInsertWaitcnts(MLI, PDT, AA).run(MF);
3220}
3221
3222PreservedAnalyses
3225 auto *MLI = &MFAM.getResult<MachineLoopAnalysis>(MF);
3226 auto *PDT = &MFAM.getResult<MachinePostDominatorTreeAnalysis>(MF);
3228 .getManager()
3229 .getCachedResult<AAManager>(MF.getFunction());
3230
3231 if (!SIInsertWaitcnts(MLI, PDT, AA).run(MF))
3232 return PreservedAnalyses::all();
3233
3236 .preserve<AAManager>();
3237}
3238
3239bool SIInsertWaitcnts::run(MachineFunction &MF) {
3240 ST = &MF.getSubtarget<GCNSubtarget>();
3241 TII = ST->getInstrInfo();
3242 TRI = &TII->getRegisterInfo();
3243 MRI = &MF.getRegInfo();
3245
3247
3248 // Initialize hardware limits first, as they're needed by the generators.
3249 Limits = AMDGPU::HardwareLimits(IV);
3250
3251 if (ST->hasExtendedWaitCounts()) {
3252 IsExpertMode = ST->hasExpertSchedulingMode() &&
3253 (ExpertSchedulingModeFlag.getNumOccurrences()
3255 : MF.getFunction()
3256 .getFnAttribute("amdgpu-expert-scheduling-mode")
3257 .getValueAsBool());
3258 MaxCounter = IsExpertMode ? NUM_EXPERT_INST_CNTS : NUM_EXTENDED_INST_CNTS;
3259 WCGGFX12Plus =
3260 WaitcntGeneratorGFX12Plus(MF, MaxCounter, &Limits, IsExpertMode);
3261 WCG = &WCGGFX12Plus;
3262 } else {
3263 MaxCounter = NUM_NORMAL_INST_CNTS;
3264 WCGPreGFX12 = WaitcntGeneratorPreGFX12(MF, NUM_NORMAL_INST_CNTS, &Limits);
3265 WCG = &WCGPreGFX12;
3266 }
3267
3268 for (auto T : inst_counter_types())
3269 ForceEmitWaitcnt[T] = false;
3270
3271 WaitEventMaskForInst = WCG->getWaitEventMask();
3272
3273 SmemAccessCounter = eventCounter(WaitEventMaskForInst, SMEM_ACCESS);
3274
3275 BlockInfos.clear();
3276 bool Modified = false;
3277
3278 MachineBasicBlock &EntryBB = MF.front();
3279
3280 if (!MFI->isEntryFunction()) {
3281 // Wait for any outstanding memory operations that the input registers may
3282 // depend on. We can't track them and it's better to do the wait after the
3283 // costly call sequence.
3284
3285 // TODO: Could insert earlier and schedule more liberally with operations
3286 // that only use caller preserved registers.
3288 while (I != EntryBB.end() && I->isMetaInstruction())
3289 ++I;
3290
3291 if (ST->hasExtendedWaitCounts()) {
3292 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAIT_LOADCNT_DSCNT))
3293 .addImm(0);
3294 for (auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
3295 if (CT == LOAD_CNT || CT == DS_CNT || CT == STORE_CNT || CT == X_CNT)
3296 continue;
3297
3298 if (!ST->hasImageInsts() &&
3299 (CT == EXP_CNT || CT == SAMPLE_CNT || CT == BVH_CNT))
3300 continue;
3301
3302 BuildMI(EntryBB, I, DebugLoc(),
3303 TII->get(instrsForExtendedCounterTypes[CT]))
3304 .addImm(0);
3305 }
3306 if (IsExpertMode) {
3307 unsigned Enc = AMDGPU::DepCtr::encodeFieldVaVdst(0, *ST);
3309 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT_DEPCTR))
3310 .addImm(Enc);
3311 }
3312 } else {
3313 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT)).addImm(0);
3314 }
3315
3316 auto NonKernelInitialState = std::make_unique<WaitcntBrackets>(this);
3317 NonKernelInitialState->setStateOnFunctionEntryOrReturn();
3318 BlockInfos[&EntryBB].Incoming = std::move(NonKernelInitialState);
3319
3320 Modified = true;
3321 }
3322
3323 // Keep iterating over the blocks in reverse post order, inserting and
3324 // updating s_waitcnt where needed, until a fix point is reached.
3325 for (auto *MBB : ReversePostOrderTraversal<MachineFunction *>(&MF))
3326 BlockInfos.try_emplace(MBB);
3327
3328 std::unique_ptr<WaitcntBrackets> Brackets;
3329 bool Repeat;
3330 do {
3331 Repeat = false;
3332
3333 for (auto BII = BlockInfos.begin(), BIE = BlockInfos.end(); BII != BIE;
3334 ++BII) {
3335 MachineBasicBlock *MBB = BII->first;
3336 BlockInfo &BI = BII->second;
3337 if (!BI.Dirty)
3338 continue;
3339
3340 if (BI.Incoming) {
3341 if (!Brackets)
3342 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
3343 else
3344 *Brackets = *BI.Incoming;
3345 } else {
3346 if (!Brackets)
3347 Brackets = std::make_unique<WaitcntBrackets>(this);
3348 else
3349 *Brackets = WaitcntBrackets(this);
3350 }
3351
3352 Modified |= insertWaitcntInBlock(MF, *MBB, *Brackets);
3353 BI.Dirty = false;
3354
3355 if (Brackets->hasPendingEvent()) {
3356 BlockInfo *MoveBracketsToSucc = nullptr;
3357 for (MachineBasicBlock *Succ : MBB->successors()) {
3358 auto *SuccBII = BlockInfos.find(Succ);
3359 BlockInfo &SuccBI = SuccBII->second;
3360 if (!SuccBI.Incoming) {
3361 SuccBI.Dirty = true;
3362 if (SuccBII <= BII) {
3363 LLVM_DEBUG(dbgs() << "repeat on backedge\n");
3364 Repeat = true;
3365 }
3366 if (!MoveBracketsToSucc) {
3367 MoveBracketsToSucc = &SuccBI;
3368 } else {
3369 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
3370 }
3371 } else if (SuccBI.Incoming->merge(*Brackets)) {
3372 SuccBI.Dirty = true;
3373 if (SuccBII <= BII) {
3374 LLVM_DEBUG(dbgs() << "repeat on backedge\n");
3375 Repeat = true;
3376 }
3377 }
3378 }
3379 if (MoveBracketsToSucc)
3380 MoveBracketsToSucc->Incoming = std::move(Brackets);
3381 }
3382 }
3383 } while (Repeat);
3384
3385 if (ST->hasScalarStores()) {
3386 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
3387 bool HaveScalarStores = false;
3388
3389 for (MachineBasicBlock &MBB : MF) {
3390 for (MachineInstr &MI : MBB) {
3391 if (!HaveScalarStores && TII->isScalarStore(MI))
3392 HaveScalarStores = true;
3393
3394 if (MI.getOpcode() == AMDGPU::S_ENDPGM ||
3395 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
3396 EndPgmBlocks.push_back(&MBB);
3397 }
3398 }
3399
3400 if (HaveScalarStores) {
3401 // If scalar writes are used, the cache must be flushed or else the next
3402 // wave to reuse the same scratch memory can be clobbered.
3403 //
3404 // Insert s_dcache_wb at wave termination points if there were any scalar
3405 // stores, and only if the cache hasn't already been flushed. This could
3406 // be improved by looking across blocks for flushes in postdominating
3407 // blocks from the stores but an explicitly requested flush is probably
3408 // very rare.
3409 for (MachineBasicBlock *MBB : EndPgmBlocks) {
3410 bool SeenDCacheWB = false;
3411
3412 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
3413 I != E; ++I) {
3414 if (I->getOpcode() == AMDGPU::S_DCACHE_WB)
3415 SeenDCacheWB = true;
3416 else if (TII->isScalarStore(*I))
3417 SeenDCacheWB = false;
3418
3419 // FIXME: It would be better to insert this before a waitcnt if any.
3420 if ((I->getOpcode() == AMDGPU::S_ENDPGM ||
3421 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
3422 !SeenDCacheWB) {
3423 Modified = true;
3424 BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB));
3425 }
3426 }
3427 }
3428 }
3429 }
3430
3431 if (IsExpertMode) {
3432 // Enable expert scheduling on function entry. To satisfy ABI requirements
3433 // and to allow calls between function with different expert scheduling
3434 // settings, disable it around calls and before returns.
3435
3437 while (I != EntryBB.end() && I->isMetaInstruction())
3438 ++I;
3439 setSchedulingMode(EntryBB, I, true);
3440
3441 for (MachineInstr *MI : CallInsts) {
3442 MachineBasicBlock &MBB = *MI->getParent();
3443 setSchedulingMode(MBB, MI, false);
3444 setSchedulingMode(MBB, std::next(MI->getIterator()), true);
3445 }
3446
3447 for (MachineInstr *MI : ReturnInsts)
3448 setSchedulingMode(*MI->getParent(), MI, false);
3449
3450 Modified = true;
3451 }
3452
3453 // Deallocate the VGPRs before previously identified S_ENDPGM instructions.
3454 // This is done in different ways depending on how the VGPRs were allocated
3455 // (i.e. whether we're in dynamic VGPR mode or not).
3456 // Skip deallocation if kernel is waveslot limited vs VGPR limited. A short
3457 // waveslot limited kernel runs slower with the deallocation.
3458 if (MFI->isDynamicVGPREnabled()) {
3459 for (MachineInstr *MI : ReleaseVGPRInsts) {
3460 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
3461 TII->get(AMDGPU::S_ALLOC_VGPR))
3462 .addImm(0);
3463 Modified = true;
3464 }
3465 } else {
3466 if (!ReleaseVGPRInsts.empty() &&
3467 (MF.getFrameInfo().hasCalls() ||
3468 ST->getOccupancyWithNumVGPRs(
3469 TRI->getNumUsedPhysRegs(*MRI, AMDGPU::VGPR_32RegClass),
3470 /*IsDynamicVGPR=*/false) <
3472 for (MachineInstr *MI : ReleaseVGPRInsts) {
3473 if (ST->requiresNopBeforeDeallocVGPRs()) {
3474 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
3475 TII->get(AMDGPU::S_NOP))
3476 .addImm(0);
3477 }
3478 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
3479 TII->get(AMDGPU::S_SENDMSG))
3481 Modified = true;
3482 }
3483 }
3484 }
3485
3486 CallInsts.clear();
3487 ReturnInsts.clear();
3488 ReleaseVGPRInsts.clear();
3489 PreheadersToFlush.clear();
3490 SLoadAddresses.clear();
3491
3492 return Modified;
3493}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
AMD GCN specific subclass of TargetSubtarget.
#define DEBUG_TYPE
const HexagonInstrInfo * TII
static bool isOptNone(const MachineFunction &MF)
IRTranslator LLVM IR MI
static LoopDeletionResult merge(LoopDeletionResult A, LoopDeletionResult B)
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
#define T
static bool isReg(const MCInst &MI, unsigned OpNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static cl::opt< bool > ForceEmitZeroLoadFlag("amdgpu-waitcnt-load-forcezero", cl::desc("Force all waitcnt load counters to wait until 0"), cl::init(false), cl::Hidden)
#define AMDGPU_EVENT_NAME(Name)
static bool updateOperandIfDifferent(MachineInstr &MI, AMDGPU::OpName OpName, unsigned NewEnc)
static bool isWaitInstr(MachineInstr &Inst)
static std::optional< InstCounterType > counterTypeForInstr(unsigned Opcode)
Determine if MI is a gfx12+ single-counter S_WAIT_*CNT instruction, and if so, which counter it is wa...
static cl::opt< bool > ExpertSchedulingModeFlag("amdgpu-expert-scheduling-mode", cl::desc("Enable expert scheduling mode 2 for all functions (GFX12+ only)"), cl::init(false), cl::Hidden)
static cl::opt< bool > ForceEmitZeroFlag("amdgpu-waitcnt-forcezero", cl::desc("Force all waitcnt instrs to be emitted as " "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), cl::init(false), cl::Hidden)
#define AMDGPU_DECLARE_WAIT_EVENTS(DECL)
#define AMDGPU_EVENT_ENUM(Name)
Provides some synthesis utilities to produce sequences of values.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
static bool shouldExecute(CounterInfo &Counter)
static bool isCounterSet(CounterInfo &Info)
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
bool erase(const KeyT &Val)
Definition DenseMap.h:330
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:765
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LLVM_ABI const MachineBasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
Instructions::iterator instr_iterator
iterator_range< succ_iterator > successors()
MachineInstrBundleIterator< MachineInstr > iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool isCall(QueryType Type=AnyInBundle) const
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
mop_range operands()
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
LLVM_ABI void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
bool isMetaInstruction(QueryType Type=IgnoreBundle) const
Return true if this instruction doesn't produce any output in the form of executable instructions.
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
iterator end()
Definition MapVector.h:67
iterator find(const KeyT &Key)
Definition MapVector.h:154
iterator begin()
Definition MapVector.h:65
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Definition MapVector.h:116
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:107
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
static bool isCBranchVCCZRead(const MachineInstr &MI)
static bool isDS(const MachineInstr &MI)
static bool isVMEM(const MachineInstr &MI)
static bool isFLATScratch(const MachineInstr &MI)
static bool isEXP(const MachineInstr &MI)
static bool mayWriteLDSThroughDMA(const MachineInstr &MI)
static bool isLDSDIR(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
static bool isFLATGlobal(const MachineInstr &MI)
static bool isVSAMPLE(const MachineInstr &MI)
static bool isAtomicRet(const MachineInstr &MI)
static bool isImage(const MachineInstr &MI)
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static bool isVINTERP(const MachineInstr &MI)
static bool isGFX12CacheInvOrWBInst(unsigned Opc)
static bool isSBarrierSCCWrite(unsigned Opcode)
static bool isMIMG(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isLDSDMA(const MachineInstr &MI)
static bool isAtomicNoRet(const MachineInstr &MI)
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
void push_back(const T &Elt)
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
Definition StringRef.h:864
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
self_iterator getIterator()
Definition ilist_node.h:123
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaVdst(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isDPMACCInstruction(unsigned Opc)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool getMUBUFIsBufferInv(unsigned Opc)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Undef
Value of the register doesn't matter.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
Definition Sequence.h:337
@ Wait
Definition Threading.h:60
static StringRef getCPU(StringRef CPU)
Processes a CPU name.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
Definition STLExtras.h:2163
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
char & SIInsertWaitcntsID
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
@ Other
Any other memory.
Definition ModRef.h:68
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
FunctionPass * createSIInsertWaitcntsPass()
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static constexpr ValueType Default
static constexpr uint64_t encode(Fields... Values)
Represents the hardware counter limits for different wait count types.
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.