LLVM 23.0.0git
AMDGPULowerVGPREncoding.cpp
Go to the documentation of this file.
1//===- AMDGPULowerVGPREncoding.cpp - lower VGPRs above v255 ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Lower VGPRs above first 256 on gfx1250.
11///
12/// The pass scans used VGPRs and inserts S_SET_VGPR_MSB instructions to switch
13/// VGPR addressing mode. The mode change is effective until the next change.
14/// This instruction provides high bits of a VGPR address for four of the
15/// operands: vdst, src0, src1, and src2, or other 4 operands depending on the
16/// instruction encoding. If bits are set they are added as MSB to the
17/// corresponding operand VGPR number.
18///
19/// There is no need to replace actual register operands because encoding of the
20/// high and low VGPRs is the same. I.e. v0 has the encoding 0x100, so does
21/// v256. v1 has the encoding 0x101 and v257 has the same encoding. So high
22/// VGPRs will survive until actual encoding and will result in a same actual
23/// bit encoding.
24///
25/// As a result the pass only inserts S_SET_VGPR_MSB to provide an actual offset
26/// to a VGPR address of the subseqent instructions. The InstPrinter will take
27/// care of the printing a low VGPR instead of a high one. In prinicple this
28/// shall be viable to print actual high VGPR numbers, but that would disagree
29/// with a disasm printing and create a situation where asm text is not
30/// deterministic.
31///
32/// This pass creates a convention where non-fall through basic blocks shall
33/// start with all 4 MSBs zero. Otherwise a disassembly would not be readable.
34/// An optimization here is possible but deemed not desirable because of the
35/// readbility concerns.
36///
37/// Consequentially the ABI is set to expect all 4 MSBs to be zero on entry.
38/// The pass must run very late in the pipeline to make sure no changes to VGPR
39/// operands will be made after it.
40//
41//===----------------------------------------------------------------------===//
42
44#include "AMDGPU.h"
45#include "GCNSubtarget.h"
46#include "SIDefines.h"
47#include "SIInstrInfo.h"
48#include "llvm/ADT/bit.h"
49#include "llvm/Support/Debug.h"
51
52using namespace llvm;
53
54#define DEBUG_TYPE "amdgpu-lower-vgpr-encoding"
55
56namespace {
57
58class AMDGPULowerVGPREncoding {
59 static constexpr unsigned OpNum = 4;
60 static constexpr unsigned BitsPerField = 2;
61 static constexpr unsigned NumFields = 4;
62 static constexpr unsigned ModeWidth = NumFields * BitsPerField;
63 static constexpr unsigned ModeMask = (1 << ModeWidth) - 1;
64 static constexpr unsigned VGPRMSBShift =
66
67 struct OpMode {
68 // No MSBs set means they are not required to be of a particular value.
69 std::optional<unsigned> MSBits;
70
71 bool update(const OpMode &New, bool &Rewritten) {
72 bool Updated = false;
73 if (New.MSBits) {
74 if (*New.MSBits != MSBits.value_or(0)) {
75 Updated = true;
76 Rewritten |= MSBits.has_value();
77 }
78 MSBits = New.MSBits;
79 }
80 return Updated;
81 }
82 };
83
84 struct ModeTy {
85 OpMode Ops[OpNum];
86
87 bool update(const ModeTy &New, bool &Rewritten) {
88 bool Updated = false;
89 for (unsigned I : seq(OpNum))
90 Updated |= Ops[I].update(New.Ops[I], Rewritten);
91 return Updated;
92 }
93
94 unsigned encode() const {
95 // Layout: [src0 msb, src1 msb, src2 msb, dst msb].
96 unsigned V = 0;
97 for (const auto &[I, Op] : enumerate(Ops))
98 V |= Op.MSBits.value_or(0) << (I * 2);
99 return V;
100 }
101
102 void print(raw_ostream &OS) const {
103 static const char *FieldNames[] = {"src0", "src1", "src2", "dst"};
104 OS << '{';
105 for (const auto &[I, Op] : enumerate(Ops)) {
106 if (I)
107 OS << ", ";
108 OS << FieldNames[I] << '=';
109 if (Op.MSBits)
110 OS << *Op.MSBits;
111 else
112 OS << '?';
113 }
114 OS << '}';
115 }
116
117 // Check if this mode is compatible with required \p NewMode without
118 // modification.
119 bool isCompatible(const ModeTy NewMode) const {
120 for (unsigned I : seq(OpNum)) {
121 if (!NewMode.Ops[I].MSBits.has_value())
122 continue;
123 if (Ops[I].MSBits.value_or(0) != NewMode.Ops[I].MSBits.value_or(0))
124 return false;
125 }
126 return true;
127 }
128 };
129
130public:
131 bool run(MachineFunction &MF);
132
133private:
134 const SIInstrInfo *TII;
135 const SIRegisterInfo *TRI;
136
137 // Current basic block.
139
140 /// Most recent s_set_* instruction.
141 MachineInstr *MostRecentModeSet;
142
143 /// Current mode bits.
144 ModeTy CurrentMode;
145
146 /// Number of current hard clause instructions.
147 unsigned ClauseLen;
148
149 /// Number of hard clause instructions remaining.
150 unsigned ClauseRemaining;
151
152 /// Clause group breaks.
153 unsigned ClauseBreaks;
154
155 /// Last hard clause instruction.
157
158 // Remember whether XCNT is known to be zero because of an S_SET_VGPR_MSB
159 // instruction that we inserted, which implicitly waits for XCNT==0.
160 bool XCntIsZero;
161
162 /// Insert mode change before \p I. \returns true if mode was changed.
163 bool setMode(ModeTy NewMode, MachineBasicBlock::instr_iterator I);
164
165 /// Reset mode to default.
166 void resetMode(MachineBasicBlock::instr_iterator I) {
167 ModeTy Mode;
168 for (OpMode &Op : Mode.Ops)
169 Op.MSBits = 0;
170 setMode(Mode, I);
171 }
172
173 /// If \p MO references VGPRs, return the MSBs. Otherwise, return nullopt.
174 std::optional<unsigned> getMSBs(const MachineOperand &MO) const;
175
176 /// Handle single \p MI. \return true if changed.
177 bool runOnMachineInstr(MachineInstr &MI);
178
179 /// Compute the mode for a single \p MI given \p Ops operands
180 /// bit mapping. Optionally takes second array \p Ops2 for VOPD.
181 /// If provided and an operand from \p Ops is not a VGPR, then \p Ops2
182 /// is checked.
183 void computeMode(ModeTy &NewMode, const MachineInstr &MI,
184 const AMDGPU::OpName Ops[OpNum],
185 const AMDGPU::OpName *Ops2 = nullptr);
186
187 /// Check if an instruction \p I is within a clause and returns a suitable
188 /// iterator to insert mode change. It may also modify the S_CLAUSE
189 /// instruction to extend it or drop the clause if it cannot be adjusted.
192
193 /// Check if an instruction \p I is immediately after another program state
194 /// instruction which it cannot coissue with. If so, insert before that
195 /// instruction to encourage more coissuing.
198
199 /// S_SET_VGPR_MSB immediately after S_SETREG_IMM32_B32 targeting MODE is
200 /// silently dropped on GFX1250. When set, the next S_SET_VGPR_MSB insertion
201 /// must be preceded by S_NOP to avoid the hazard.
202 bool needNopBeforeSetVGPRMSB(MachineBasicBlock::instr_iterator I);
203
204 /// Handle S_SETREG_IMM32_B32 targeting MODE register. On certain hardware,
205 /// this instruction clobbers VGPR MSB bits[12:19], so we need to restore
206 /// the current mode. \returns true if the instruction was modified or a
207 /// new one was inserted.
208 bool handleSetregMode(MachineInstr &MI);
209
210 /// Update bits[12:19] of the imm operand in S_SETREG_IMM32_B32 to contain
211 /// the VGPR MSB mode value. \returns true if the immediate was changed.
212 bool updateSetregModeImm(MachineInstr &MI, int64_t ModeValue);
213};
214
215bool AMDGPULowerVGPREncoding::setMode(ModeTy NewMode,
217 LLVM_DEBUG({
218 dbgs() << " setMode: NewMode=";
219 NewMode.print(dbgs());
220 dbgs() << " CurrentMode=";
221 CurrentMode.print(dbgs());
222 dbgs() << " MostRecentModeSet=" << (MostRecentModeSet ? "yes" : "null");
223 if (I != MBB->instr_end())
224 dbgs() << " before: " << *I;
225 else
226 dbgs() << " at end\n";
227 });
228
229 // Record previous mode into high 8 bits of the immediate.
230 int64_t OldModeBits = CurrentMode.encode() << ModeWidth;
231
232 bool Rewritten = false;
233 if (!CurrentMode.update(NewMode, Rewritten)) {
234 LLVM_DEBUG(dbgs() << " -> no change needed\n");
235 return false;
236 }
237
238 LLVM_DEBUG(dbgs() << " Rewritten=" << Rewritten << " after update\n");
239
240 if (MostRecentModeSet && !Rewritten) {
241 // Update MostRecentModeSet with the new mode.
242 MachineOperand &Op = MostRecentModeSet->getOperand(0);
243 // Carry old mode bits from the existing instruction.
244 int64_t OldModeBits = Op.getImm() & (ModeMask << ModeWidth);
245 Op.setImm(CurrentMode.encode() | OldModeBits);
246 LLVM_DEBUG(dbgs() << " -> piggybacked onto S_SET_VGPR_MSB: "
247 << *MostRecentModeSet);
248 return true;
249 }
250
251 MachineBasicBlock::instr_iterator InsertPt = handleClause(I);
252 InsertPt = handleCoissue(InsertPt);
253 // Case 2 match in handleSetregMode: the setreg's imm[12:19] matched
254 // current MSBs, but the next VALU needs different MSBs, so this
255 // S_SET_VGPR_MSB would land right after the setreg. Insert S_NOP to
256 // prevent it from being silently dropped.
257 if (needNopBeforeSetVGPRMSB(I))
258 BuildMI(*MBB, InsertPt, {}, TII->get(AMDGPU::S_NOP)).addImm(0);
259 MostRecentModeSet =
260 BuildMI(*MBB, InsertPt, {}, TII->get(AMDGPU::S_SET_VGPR_MSB))
261 .addImm(NewMode.encode() | OldModeBits);
262 LLVM_DEBUG(dbgs() << " -> inserted new S_SET_VGPR_MSB: "
263 << *MostRecentModeSet);
264
265 // If we inserted S_SET_VGPR_MSB early then XCNT should remain zero from the
266 // insertion point to the current instruction. Remove any redundant
267 // S_WAIT_XCNT instructions in that range.
268 for (MachineInstr &MI : make_early_inc_range(make_range(InsertPt, I))) {
270 if (MI.getOpcode() == AMDGPU::S_WAIT_XCNT)
271 MI.eraseFromBundle();
272 }
273 XCntIsZero = true;
274
275 CurrentMode = NewMode;
276 return true;
277}
278
279std::optional<unsigned>
280AMDGPULowerVGPREncoding::getMSBs(const MachineOperand &MO) const {
281 if (!MO.isReg())
282 return std::nullopt;
283
284 MCRegister Reg = MO.getReg();
285 const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Reg);
286 if (!RC || !TRI->isVGPRClass(RC))
287 return std::nullopt;
288
289 unsigned Idx = TRI->getHWRegIndex(Reg);
290 return Idx >> 8;
291}
292
293void AMDGPULowerVGPREncoding::computeMode(ModeTy &NewMode,
294 const MachineInstr &MI,
295 const AMDGPU::OpName Ops[OpNum],
296 const AMDGPU::OpName *Ops2) {
297 NewMode = {};
298
299 for (unsigned I = 0; I < OpNum; ++I) {
300 const MachineOperand *Op = TII->getNamedOperand(MI, Ops[I]);
301
302 std::optional<unsigned> MSBits;
303 if (Op)
304 MSBits = getMSBs(*Op);
305
306#if !defined(NDEBUG)
307 if (MSBits.has_value() && Ops2) {
308 const MachineOperand *Op2 = TII->getNamedOperand(MI, Ops2[I]);
309 if (Op2) {
310 std::optional<unsigned> MSBits2;
311 MSBits2 = getMSBs(*Op2);
312 if (MSBits2.has_value() && MSBits != MSBits2)
313 llvm_unreachable("Invalid VOPD pair was created");
314 }
315 }
316#endif
317
318 if (!MSBits.has_value() && Ops2) {
319 Op = TII->getNamedOperand(MI, Ops2[I]);
320 if (Op)
321 MSBits = getMSBs(*Op);
322 }
323
324 if (!MSBits.has_value())
325 continue;
326
327 // Skip tied uses of src2 of VOP2, these will be handled along with defs and
328 // only vdst bit affects these operands. We cannot skip tied uses of VOP3,
329 // these uses are real even if must match the vdst.
330 if (Ops[I] == AMDGPU::OpName::src2 && !Op->isDef() && Op->isTied() &&
333 TII->hasVALU32BitEncoding(MI.getOpcode()))))
334 continue;
335
336 NewMode.Ops[I].MSBits = MSBits.value();
337 }
338}
339
340bool AMDGPULowerVGPREncoding::runOnMachineInstr(MachineInstr &MI) {
342 if (Ops.first) {
343 ModeTy NewMode;
344 computeMode(NewMode, MI, Ops.first, Ops.second);
345 LLVM_DEBUG({
346 dbgs() << " runOnMachineInstr: ";
347 MI.print(dbgs());
348 dbgs() << " computed NewMode=";
349 NewMode.print(dbgs());
350 dbgs() << " compatible=" << CurrentMode.isCompatible(NewMode) << '\n';
351 });
352 if (!CurrentMode.isCompatible(NewMode) && MI.isCommutable() &&
353 TII->commuteInstruction(MI)) {
354 ModeTy NewModeCommuted;
355 computeMode(NewModeCommuted, MI, Ops.first, Ops.second);
356 LLVM_DEBUG({
357 dbgs() << " commuted NewMode=";
358 NewModeCommuted.print(dbgs());
359 dbgs() << " compatible=" << CurrentMode.isCompatible(NewModeCommuted)
360 << '\n';
361 });
362 if (CurrentMode.isCompatible(NewModeCommuted)) {
363 // Update CurrentMode with mode bits the commuted instruction relies on.
364 // This prevents later instructions from piggybacking and corrupting
365 // those bits (e.g., a nullopt src treated as 0 could be overwritten).
366 bool Unused = false;
367 CurrentMode.update(NewModeCommuted, Unused);
368 // MI was modified by the commute above.
369 return true;
370 }
371 // Commute back.
372 if (!TII->commuteInstruction(MI))
373 llvm_unreachable("Failed to restore commuted instruction.");
374 }
375 return setMode(NewMode, MI.getIterator());
376 }
377 assert(!TII->hasVGPRUses(MI) || MI.isMetaInstruction() || MI.isPseudo());
378 return false;
379}
380
382AMDGPULowerVGPREncoding::handleClause(MachineBasicBlock::instr_iterator I) {
383 if (!ClauseRemaining)
384 return I;
385
386 // A clause cannot start with a special instruction, place it right before
387 // the clause.
388 if (ClauseRemaining == ClauseLen) {
389 I = Clause->getPrevNode()->getIterator();
390 assert(I->isBundle());
391 return I;
392 }
393
394 // If a clause defines breaks each group cannot start with a mode change.
395 // just drop the clause.
396 if (ClauseBreaks) {
397 Clause->eraseFromBundle();
398 ClauseRemaining = 0;
399 return I;
400 }
401
402 // Otherwise adjust a number of instructions in the clause if it fits.
403 // If it does not clause will just become shorter. Since the length
404 // recorded in the clause is one less, increment the length after the
405 // update. Note that SIMM16[5:0] must be 1-62, not 0 or 63.
406 if (ClauseLen < 63)
407 Clause->getOperand(0).setImm(ClauseLen | (ClauseBreaks << 8));
408
409 ++ClauseLen;
410
411 return I;
412}
413
415AMDGPULowerVGPREncoding::handleCoissue(MachineBasicBlock::instr_iterator I) {
416 // "Program State instructions" are instructions which are used to control
417 // operation of the GPU rather than performing arithmetic. Such instructions
418 // have different coissuing rules w.r.t s_set_vgpr_msb.
419 auto isProgramStateInstr = [this](MachineInstr *MI) {
420 unsigned Opc = MI->getOpcode();
421 return TII->isBarrier(Opc) || TII->isWaitcnt(Opc) ||
422 Opc == AMDGPU::S_DELAY_ALU;
423 };
424
425 while (I != MBB->begin()) {
426 auto Prev = std::prev(I);
427 if (!isProgramStateInstr(&*Prev))
428 return I;
429 I = Prev;
430 }
431
432 return I;
433}
434
435bool AMDGPULowerVGPREncoding::needNopBeforeSetVGPRMSB(
437 while (I != MBB->begin()) {
438 I = std::prev(I);
439 if (I->getOpcode() == AMDGPU::S_SETREG_IMM32_B32) {
440 MachineOperand *SIMM16Op =
441 TII->getNamedOperand(*I, AMDGPU::OpName::simm16);
442 auto [HwRegId, Offset, Size] =
444 if (HwRegId == AMDGPU::Hwreg::ID_MODE)
445 return true;
446 }
447 if (!I->isMetaInstruction())
448 return false;
449 }
450 // FIXME: Return true if the previous MBB falls through and ends with
451 // S_SETREG_IMM32_B32.
452 return false;
453}
454
455/// Convert mode value from S_SET_VGPR_MSB format to MODE register format.
456/// S_SET_VGPR_MSB uses: (src0[0-1], src1[2-3], src2[4-5], dst[6-7])
457/// MODE register uses: (dst[0-1], src0[2-3], src1[4-5], src2[6-7])
458/// This is a left rotation by 2 bits on an 8-bit value.
459static int64_t convertModeToSetregFormat(int64_t Mode) {
460 assert(isUInt<8>(Mode) && "Mode expected to be 8-bit");
461 return llvm::rotl<uint8_t>(static_cast<uint8_t>(Mode), /*R=*/2);
462}
463
464bool AMDGPULowerVGPREncoding::updateSetregModeImm(MachineInstr &MI,
465 int64_t ModeValue) {
466 assert(MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32);
467
468 // Convert from S_SET_VGPR_MSB format to MODE register format
469 int64_t SetregMode = convertModeToSetregFormat(ModeValue);
470
471 MachineOperand *ImmOp = TII->getNamedOperand(MI, AMDGPU::OpName::imm);
472 int64_t OldImm = ImmOp->getImm();
473 // Note that Offset is ignored for mode bits here.
474 int64_t NewImm =
475 (OldImm & ~AMDGPU::Hwreg::VGPR_MSB_MASK) | (SetregMode << VGPRMSBShift);
476 ImmOp->setImm(NewImm);
477 return NewImm != OldImm;
478}
479
480bool AMDGPULowerVGPREncoding::handleSetregMode(MachineInstr &MI) {
481 using namespace AMDGPU::Hwreg;
482
483 assert(MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 &&
484 "only S_SETREG_IMM32_B32 needs to be handled");
485
486 LLVM_DEBUG(dbgs() << " handleSetregMode: " << MI);
487
488 MachineOperand *SIMM16Op = TII->getNamedOperand(MI, AMDGPU::OpName::simm16);
489 assert(SIMM16Op && "SIMM16Op must be present");
490
491 auto [HwRegId, Offset, Size] = HwregEncoding::decode(SIMM16Op->getImm());
492 LLVM_DEBUG(dbgs() << " HwRegId=" << HwRegId << " Offset=" << Offset
493 << " Size=" << Size << '\n');
494 if (HwRegId != ID_MODE) {
495 LLVM_DEBUG(dbgs() << " -> not ID_MODE, skipping\n");
496 return false;
497 }
498
499 // MostRecentModeSet is clobbered by SETREG and not relevant anymore.
500 MostRecentModeSet = nullptr;
501
502 int64_t ModeValue = CurrentMode.encode();
503 LLVM_DEBUG({
504 dbgs() << " CurrentMode=";
505 CurrentMode.print(dbgs());
506 dbgs() << " encoded=0x" << Twine::utohexstr(ModeValue)
507 << " VGPRMSBShift=" << VGPRMSBShift << '\n';
508 });
509
510 // Case 1: Size <= 12 - the original instruction uses imm32[0:Size-1], so
511 // imm32[12:19] is unused, or Offset is zero and it is safe to set
512 // imm32[12:19] to the correct VGPR MSBs.
513 if (!Offset || Size <= VGPRMSBShift) {
514 // Set imm32[12:19] to the correct VGPR MSBs.
515 LLVM_DEBUG(dbgs() << " Case 1: Size(" << Size << ") <= VGPRMSBShift("
516 << VGPRMSBShift << "), update mode bits[12:19]\n");
517 bool Changed = updateSetregModeImm(MI, ModeValue);
518 LLVM_DEBUG(dbgs() << " -> " << MI);
519 return Changed;
520 }
521
522 // Case 2: Size > 12 - the original instruction uses bits beyond 11, so we
523 // cannot arbitrarily modify imm32[12:19]. Check if it already matches VGPR
524 // MSBs. Note: imm32[12:19] is in MODE register format, while ModeValue is
525 // in S_SET_VGPR_MSB format, so we need to convert before comparing.
526 MachineOperand *ImmOp = TII->getNamedOperand(MI, AMDGPU::OpName::imm);
527 assert(ImmOp && "ImmOp must be present");
528 int64_t ImmBits12To19 = (ImmOp->getImm() & VGPR_MSB_MASK) >> VGPRMSBShift;
529 int64_t SetregModeValue = convertModeToSetregFormat(ModeValue);
530 LLVM_DEBUG(dbgs() << " Case 2: Size(" << Size << ") > VGPRMSBShift, "
531 << "ImmBits12To19=0x" << Twine::utohexstr(ImmBits12To19)
532 << " SetregModeValue=0x"
533 << Twine::utohexstr(SetregModeValue) << '\n');
534 if (ImmBits12To19 == SetregModeValue) {
535 LLVM_DEBUG(dbgs() << " -> bits[12:19] already correct\n");
536 return false;
537 }
538
539 // imm32[12:19] doesn't match VGPR MSBs - insert s_set_vgpr_msb after
540 // the original instruction to restore the correct value. Insert S_NOP
541 // to avoid the GFX1250 hazard where S_SET_VGPR_MSB immediately after
542 // S_SETREG_IMM32_B32(MODE) is silently dropped.
543 MachineBasicBlock::iterator InsertPt = std::next(MI.getIterator());
544 BuildMI(*MBB, InsertPt, MI.getDebugLoc(), TII->get(AMDGPU::S_NOP)).addImm(0);
545 MostRecentModeSet = BuildMI(*MBB, InsertPt, MI.getDebugLoc(),
546 TII->get(AMDGPU::S_SET_VGPR_MSB))
547 .addImm(ModeValue | (ModeValue << ModeWidth));
548 LLVM_DEBUG(dbgs() << " -> inserted S_SET_VGPR_MSB after setreg: "
549 << *MostRecentModeSet);
550 return true;
551}
552
553bool AMDGPULowerVGPREncoding::run(MachineFunction &MF) {
554 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
555 if (!ST.has1024AddressableVGPRs())
556 return false;
557
558 TII = ST.getInstrInfo();
559 TRI = ST.getRegisterInfo();
560
561 LLVM_DEBUG(dbgs() << "*** AMDGPULowerVGPREncoding on " << MF.getName()
562 << " ***\n");
563
564 bool Changed = false;
565 ClauseLen = ClauseRemaining = 0;
566 CurrentMode = {};
567 for (auto &MBB : MF) {
568 MostRecentModeSet = nullptr;
569 XCntIsZero = false;
570 this->MBB = &MBB;
571
572 LLVM_DEBUG(dbgs() << "BB#" << MBB.getNumber() << ' ' << MBB.getName()
573 << ":\n");
574
575 for (auto &MI : llvm::make_early_inc_range(MBB.instrs())) {
576 if (MI.isMetaInstruction())
577 continue;
578
579 if (MI.isTerminator() || MI.isCall()) {
580 LLVM_DEBUG(dbgs() << " terminator/call: " << MI);
581 if (MI.getOpcode() == AMDGPU::S_ENDPGM ||
582 MI.getOpcode() == AMDGPU::S_ENDPGM_SAVED)
583 CurrentMode = {};
584 else
585 resetMode(MI.getIterator());
586 continue;
587 }
588
589 if (MI.isInlineAsm()) {
590 LLVM_DEBUG(dbgs() << " inline asm: " << MI);
591 if (TII->hasVGPRUses(MI))
592 resetMode(MI.getIterator());
593 continue;
594 }
595
596 if (MI.getOpcode() == AMDGPU::S_CLAUSE) {
597 assert(!ClauseRemaining && "Nested clauses are not supported");
598 ClauseLen = MI.getOperand(0).getImm();
599 ClauseBreaks = (ClauseLen >> 8) & 15;
600 ClauseLen = ClauseRemaining = (ClauseLen & 63) + 1;
601 Clause = &MI;
602 LLVM_DEBUG(dbgs() << " clause: len=" << ClauseLen
603 << " breaks=" << ClauseBreaks << '\n');
604 continue;
605 }
606
607 if (MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 &&
608 ST.hasSetregVGPRMSBFixup()) {
609 Changed |= handleSetregMode(MI);
610 continue;
611 }
612
613 // If XCNT is known to be zero then any S_WAIT_XCNT instruction is
614 // redundant and can be removed.
615 if (MI.getOpcode() == AMDGPU::S_WAIT_XCNT && XCntIsZero) {
616 MI.eraseFromBundle();
617 Changed = true;
618 continue;
619 }
620
621 Changed |= runOnMachineInstr(MI);
622
623 // Any VMEM or SMEM instruction may increment XCNT.
625 XCntIsZero = false;
626
627 if (ClauseRemaining)
628 --ClauseRemaining;
629 }
630
631 // Reset the mode if we are falling through.
632 LLVM_DEBUG(dbgs() << " end of BB, resetting mode\n");
633 resetMode(MBB.instr_end());
634 }
635
636 return Changed;
637}
638
639class AMDGPULowerVGPREncodingLegacy : public MachineFunctionPass {
640public:
641 static char ID;
642
643 AMDGPULowerVGPREncodingLegacy() : MachineFunctionPass(ID) {}
644
645 bool runOnMachineFunction(MachineFunction &MF) override {
646 return AMDGPULowerVGPREncoding().run(MF);
647 }
648
649 void getAnalysisUsage(AnalysisUsage &AU) const override {
650 AU.setPreservesCFG();
652 }
653};
654
655} // namespace
656
657char AMDGPULowerVGPREncodingLegacy::ID = 0;
658
659char &llvm::AMDGPULowerVGPREncodingLegacyID = AMDGPULowerVGPREncodingLegacy::ID;
660
661INITIALIZE_PASS(AMDGPULowerVGPREncodingLegacy, DEBUG_TYPE,
662 "AMDGPU Lower VGPR Encoding", false, false)
663
667 if (!AMDGPULowerVGPREncoding().run(MF))
668 return PreservedAnalyses::all();
669
671}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
AMD GCN specific subclass of TargetSubtarget.
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
Interface definition for SIInstrInfo.
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file implements the C++20 <bit> header.
Represent the analysis usage information of a pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Instructions::iterator instr_iterator
MachineInstrBundleIterator< MachineInstr > iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
static bool isVMEM(const MachineInstr &MI)
static bool isSMRD(const MachineInstr &MI)
static bool isVOP2(const MachineInstr &MI)
static bool isVOP3(const MCInstrDesc &Desc)
static Twine utohexstr(uint64_t Val)
Definition Twine.h:385
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
unsigned encode(MaybeAlign A)
Returns a representation of the alignment that encodes undefined as 0.
Definition Alignment.h:206
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
DWARFExpression::Operation Op
constexpr int countr_zero_constexpr(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:190
char & AMDGPULowerVGPREncodingLegacyID
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
constexpr T rotl(T V, int R)
Definition bit.h:386
static std::tuple< typename Fields::ValueType... > decode(uint64_t Encoded)