LLVM 17.0.0git
AMDGPUInstPrinter.cpp
Go to the documentation of this file.
1//===-- AMDGPUInstPrinter.cpp - AMDGPU MC Inst -> ASM ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7// \file
8//===----------------------------------------------------------------------===//
9
10#include "AMDGPUInstPrinter.h"
12#include "SIDefines.h"
13#include "SIRegisterInfo.h"
16#include "llvm/MC/MCExpr.h"
17#include "llvm/MC/MCInst.h"
18#include "llvm/MC/MCInstrDesc.h"
19#include "llvm/MC/MCInstrInfo.h"
23
24using namespace llvm;
25using namespace llvm::AMDGPU;
26
28 "amdgpu-keep-16-bit-reg-suffixes",
29 cl::desc("Keep .l and .h suffixes in asm for debugging purposes"),
30 cl::init(false),
32
34 // FIXME: The current implementation of
35 // AsmParser::parseRegisterOrRegisterNumber in MC implies we either emit this
36 // as an integer or we provide a name which represents a physical register.
37 // For CFI instructions we really want to emit a name for the DWARF register
38 // instead, because there may be multiple DWARF registers corresponding to a
39 // single physical register. One case where this problem manifests is with
40 // wave32/wave64 where using the physical register name is ambiguous: if we
41 // write e.g. `.cfi_undefined v0` we lose information about the wavefront
42 // size which we need to encode the register in the final DWARF. Ideally we
43 // would extend MC to support parsing DWARF register names so we could do
44 // something like `.cfi_undefined dwarf_wave32_v0`. For now we just live with
45 // non-pretty DWARF register names in assembly text.
46 OS << Reg.id();
47}
48
50 StringRef Annot, const MCSubtargetInfo &STI,
51 raw_ostream &OS) {
52 OS.flush();
53 printInstruction(MI, Address, STI, OS);
54 printAnnotation(OS, Annot);
55}
56
57void AMDGPUInstPrinter::printU4ImmOperand(const MCInst *MI, unsigned OpNo,
58 const MCSubtargetInfo &STI,
59 raw_ostream &O) {
60 O << formatHex(MI->getOperand(OpNo).getImm() & 0xf);
61}
62
63void AMDGPUInstPrinter::printU8ImmOperand(const MCInst *MI, unsigned OpNo,
64 raw_ostream &O) {
65 O << formatHex(MI->getOperand(OpNo).getImm() & 0xff);
66}
67
68void AMDGPUInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo,
69 const MCSubtargetInfo &STI,
70 raw_ostream &O) {
71 // It's possible to end up with a 32-bit literal used with a 16-bit operand
72 // with ignored high bits. Print as 32-bit anyway in that case.
73 int64_t Imm = MI->getOperand(OpNo).getImm();
74 if (isInt<16>(Imm) || isUInt<16>(Imm))
75 O << formatHex(static_cast<uint64_t>(Imm & 0xffff));
76 else
77 printU32ImmOperand(MI, OpNo, STI, O);
78}
79
80void AMDGPUInstPrinter::printU4ImmDecOperand(const MCInst *MI, unsigned OpNo,
81 raw_ostream &O) {
82 O << formatDec(MI->getOperand(OpNo).getImm() & 0xf);
83}
84
85void AMDGPUInstPrinter::printU8ImmDecOperand(const MCInst *MI, unsigned OpNo,
86 raw_ostream &O) {
87 O << formatDec(MI->getOperand(OpNo).getImm() & 0xff);
88}
89
90void AMDGPUInstPrinter::printU16ImmDecOperand(const MCInst *MI, unsigned OpNo,
91 raw_ostream &O) {
92 O << formatDec(MI->getOperand(OpNo).getImm() & 0xffff);
93}
94
95void AMDGPUInstPrinter::printU32ImmOperand(const MCInst *MI, unsigned OpNo,
96 const MCSubtargetInfo &STI,
97 raw_ostream &O) {
98 O << formatHex(MI->getOperand(OpNo).getImm() & 0xffffffff);
99}
100
101void AMDGPUInstPrinter::printNamedBit(const MCInst *MI, unsigned OpNo,
102 raw_ostream &O, StringRef BitName) {
103 if (MI->getOperand(OpNo).getImm()) {
104 O << ' ' << BitName;
105 }
106}
107
108void AMDGPUInstPrinter::printOffen(const MCInst *MI, unsigned OpNo,
109 raw_ostream &O) {
110 printNamedBit(MI, OpNo, O, "offen");
111}
112
113void AMDGPUInstPrinter::printIdxen(const MCInst *MI, unsigned OpNo,
114 raw_ostream &O) {
115 printNamedBit(MI, OpNo, O, "idxen");
116}
117
118void AMDGPUInstPrinter::printAddr64(const MCInst *MI, unsigned OpNo,
119 raw_ostream &O) {
120 printNamedBit(MI, OpNo, O, "addr64");
121}
122
123void AMDGPUInstPrinter::printOffset(const MCInst *MI, unsigned OpNo,
124 const MCSubtargetInfo &STI,
125 raw_ostream &O) {
126 uint16_t Imm = MI->getOperand(OpNo).getImm();
127 if (Imm != 0) {
128 O << " offset:";
129 printU16ImmDecOperand(MI, OpNo, O);
130 }
131}
132
133void AMDGPUInstPrinter::printFlatOffset(const MCInst *MI, unsigned OpNo,
134 const MCSubtargetInfo &STI,
135 raw_ostream &O) {
136 uint16_t Imm = MI->getOperand(OpNo).getImm();
137 if (Imm != 0) {
138 O << " offset:";
139
140 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
141 bool IsFlatSeg = !(Desc.TSFlags &
143
144 if (IsFlatSeg) // Unsigned offset
145 printU16ImmDecOperand(MI, OpNo, O);
146 else // Signed offset
148 }
149}
150
151void AMDGPUInstPrinter::printOffset0(const MCInst *MI, unsigned OpNo,
152 const MCSubtargetInfo &STI,
153 raw_ostream &O) {
154 if (MI->getOperand(OpNo).getImm()) {
155 O << " offset0:";
156 printU8ImmDecOperand(MI, OpNo, O);
157 }
158}
159
160void AMDGPUInstPrinter::printOffset1(const MCInst *MI, unsigned OpNo,
161 const MCSubtargetInfo &STI,
162 raw_ostream &O) {
163 if (MI->getOperand(OpNo).getImm()) {
164 O << " offset1:";
165 printU8ImmDecOperand(MI, OpNo, O);
166 }
167}
168
169void AMDGPUInstPrinter::printSMRDOffset8(const MCInst *MI, unsigned OpNo,
170 const MCSubtargetInfo &STI,
171 raw_ostream &O) {
172 printU32ImmOperand(MI, OpNo, STI, O);
173}
174
175void AMDGPUInstPrinter::printSMEMOffset(const MCInst *MI, unsigned OpNo,
176 const MCSubtargetInfo &STI,
177 raw_ostream &O) {
178 O << formatHex(MI->getOperand(OpNo).getImm());
179}
180
181void AMDGPUInstPrinter::printSMEMOffsetMod(const MCInst *MI, unsigned OpNo,
182 const MCSubtargetInfo &STI,
183 raw_ostream &O) {
184 O << " offset:";
185 printSMEMOffset(MI, OpNo, STI, O);
186}
187
188void AMDGPUInstPrinter::printSMRDLiteralOffset(const MCInst *MI, unsigned OpNo,
189 const MCSubtargetInfo &STI,
190 raw_ostream &O) {
191 printU32ImmOperand(MI, OpNo, STI, O);
192}
193
194void AMDGPUInstPrinter::printGDS(const MCInst *MI, unsigned OpNo,
195 const MCSubtargetInfo &STI, raw_ostream &O) {
196 printNamedBit(MI, OpNo, O, "gds");
197}
198
199void AMDGPUInstPrinter::printCPol(const MCInst *MI, unsigned OpNo,
200 const MCSubtargetInfo &STI, raw_ostream &O) {
201 auto Imm = MI->getOperand(OpNo).getImm();
202 if (Imm & CPol::GLC)
203 O << ((AMDGPU::isGFX940(STI) &&
204 !(MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::SMRD)) ? " sc0"
205 : " glc");
206 if (Imm & CPol::SLC)
207 O << (AMDGPU::isGFX940(STI) ? " nt" : " slc");
208 if ((Imm & CPol::DLC) && AMDGPU::isGFX10Plus(STI))
209 O << " dlc";
210 if ((Imm & CPol::SCC) && AMDGPU::isGFX90A(STI))
211 O << (AMDGPU::isGFX940(STI) ? " sc1" : " scc");
212 if (Imm & ~CPol::ALL)
213 O << " /* unexpected cache policy bit */";
214}
215
216void AMDGPUInstPrinter::printSWZ(const MCInst *MI, unsigned OpNo,
217 const MCSubtargetInfo &STI, raw_ostream &O) {
218}
219
220void AMDGPUInstPrinter::printTFE(const MCInst *MI, unsigned OpNo,
221 const MCSubtargetInfo &STI, raw_ostream &O) {
222 printNamedBit(MI, OpNo, O, "tfe");
223}
224
225void AMDGPUInstPrinter::printDMask(const MCInst *MI, unsigned OpNo,
226 const MCSubtargetInfo &STI, raw_ostream &O) {
227 if (MI->getOperand(OpNo).getImm()) {
228 O << " dmask:";
229 printU16ImmOperand(MI, OpNo, STI, O);
230 }
231}
232
233void AMDGPUInstPrinter::printDim(const MCInst *MI, unsigned OpNo,
234 const MCSubtargetInfo &STI, raw_ostream &O) {
235 unsigned Dim = MI->getOperand(OpNo).getImm();
236 O << " dim:SQ_RSRC_IMG_";
237
239 if (DimInfo)
240 O << DimInfo->AsmSuffix;
241 else
242 O << Dim;
243}
244
245void AMDGPUInstPrinter::printUNorm(const MCInst *MI, unsigned OpNo,
246 const MCSubtargetInfo &STI, raw_ostream &O) {
247 printNamedBit(MI, OpNo, O, "unorm");
248}
249
250void AMDGPUInstPrinter::printDA(const MCInst *MI, unsigned OpNo,
251 const MCSubtargetInfo &STI, raw_ostream &O) {
252 printNamedBit(MI, OpNo, O, "da");
253}
254
255void AMDGPUInstPrinter::printR128A16(const MCInst *MI, unsigned OpNo,
256 const MCSubtargetInfo &STI, raw_ostream &O) {
257 if (STI.hasFeature(AMDGPU::FeatureR128A16))
258 printNamedBit(MI, OpNo, O, "a16");
259 else
260 printNamedBit(MI, OpNo, O, "r128");
261}
262
263void AMDGPUInstPrinter::printA16(const MCInst *MI, unsigned OpNo,
264 const MCSubtargetInfo &STI, raw_ostream &O) {
265 printNamedBit(MI, OpNo, O, "a16");
266}
267
268void AMDGPUInstPrinter::printLWE(const MCInst *MI, unsigned OpNo,
269 const MCSubtargetInfo &STI, raw_ostream &O) {
270 printNamedBit(MI, OpNo, O, "lwe");
271}
272
273void AMDGPUInstPrinter::printD16(const MCInst *MI, unsigned OpNo,
274 const MCSubtargetInfo &STI, raw_ostream &O) {
275 printNamedBit(MI, OpNo, O, "d16");
276}
277
278void AMDGPUInstPrinter::printExpCompr(const MCInst *MI, unsigned OpNo,
279 const MCSubtargetInfo &STI,
280 raw_ostream &O) {
281 printNamedBit(MI, OpNo, O, "compr");
282}
283
284void AMDGPUInstPrinter::printExpVM(const MCInst *MI, unsigned OpNo,
285 const MCSubtargetInfo &STI,
286 raw_ostream &O) {
287 printNamedBit(MI, OpNo, O, "vm");
288}
289
290void AMDGPUInstPrinter::printFORMAT(const MCInst *MI, unsigned OpNo,
291 const MCSubtargetInfo &STI,
292 raw_ostream &O) {
293}
294
295void AMDGPUInstPrinter::printSymbolicFormat(const MCInst *MI,
296 const MCSubtargetInfo &STI,
297 raw_ostream &O) {
298 using namespace llvm::AMDGPU::MTBUFFormat;
299
300 int OpNo =
301 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::format);
302 assert(OpNo != -1);
303
304 unsigned Val = MI->getOperand(OpNo).getImm();
305 if (AMDGPU::isGFX10Plus(STI)) {
306 if (Val == UFMT_DEFAULT)
307 return;
308 if (isValidUnifiedFormat(Val, STI)) {
309 O << " format:[" << getUnifiedFormatName(Val, STI) << ']';
310 } else {
311 O << " format:" << Val;
312 }
313 } else {
314 if (Val == DFMT_NFMT_DEFAULT)
315 return;
316 if (isValidDfmtNfmt(Val, STI)) {
317 unsigned Dfmt;
318 unsigned Nfmt;
319 decodeDfmtNfmt(Val, Dfmt, Nfmt);
320 O << " format:[";
321 if (Dfmt != DFMT_DEFAULT) {
322 O << getDfmtName(Dfmt);
323 if (Nfmt != NFMT_DEFAULT) {
324 O << ',';
325 }
326 }
327 if (Nfmt != NFMT_DEFAULT) {
328 O << getNfmtName(Nfmt, STI);
329 }
330 O << ']';
331 } else {
332 O << " format:" << Val;
333 }
334 }
335}
336
338 const MCRegisterInfo &MRI) {
339#if !defined(NDEBUG)
340 switch (RegNo) {
341 case AMDGPU::FP_REG:
342 case AMDGPU::SP_REG:
343 case AMDGPU::PRIVATE_RSRC_REG:
344 llvm_unreachable("pseudo-register should not ever be emitted");
345 case AMDGPU::SCC:
346 llvm_unreachable("pseudo scc should not ever be emitted");
347 default:
348 break;
349 }
350#endif
351
354 if (!RegName.consume_back(".l"))
355 RegName.consume_back(".h");
356
357 O << RegName;
358}
359
360void AMDGPUInstPrinter::printVOPDst(const MCInst *MI, unsigned OpNo,
361 const MCSubtargetInfo &STI, raw_ostream &O) {
362 auto Opcode = MI->getOpcode();
363 auto Flags = MII.get(Opcode).TSFlags;
364 if (OpNo == 0) {
365 if (Flags & SIInstrFlags::VOP3 && Flags & SIInstrFlags::DPP)
366 O << "_e64_dpp";
367 else if (Flags & SIInstrFlags::VOP3) {
368 if (!getVOP3IsSingle(Opcode))
369 O << "_e64";
370 } else if (Flags & SIInstrFlags::DPP)
371 O << "_dpp";
372 else if (Flags & SIInstrFlags::SDWA)
373 O << "_sdwa";
374 else if (((Flags & SIInstrFlags::VOP1) && !getVOP1IsSingle(Opcode)) ||
375 ((Flags & SIInstrFlags::VOP2) && !getVOP2IsSingle(Opcode)))
376 O << "_e32";
377 O << " ";
378 }
379
380 printRegularOperand(MI, OpNo, STI, O);
381
382 // Print default vcc/vcc_lo operand.
383 switch (Opcode) {
384 default: break;
385
386 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
387 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
388 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
389 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
390 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
391 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
392 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
393 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
394 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
395 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
396 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
397 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
398 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
399 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
400 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
401 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
402 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
403 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
404 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
405 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
406 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
407 printDefaultVccOperand(false, STI, O);
408 break;
409 }
410}
411
412void AMDGPUInstPrinter::printVINTRPDst(const MCInst *MI, unsigned OpNo,
413 const MCSubtargetInfo &STI, raw_ostream &O) {
414 if (AMDGPU::isSI(STI) || AMDGPU::isCI(STI))
415 O << " ";
416 else
417 O << "_e32 ";
418
419 printRegularOperand(MI, OpNo, STI, O);
420}
421
422void AMDGPUInstPrinter::printImmediateInt16(uint32_t Imm,
423 const MCSubtargetInfo &STI,
424 raw_ostream &O) {
425 int16_t SImm = static_cast<int16_t>(Imm);
426 if (isInlinableIntLiteral(SImm)) {
427 O << SImm;
428 } else {
429 uint64_t Imm16 = static_cast<uint16_t>(Imm);
430 O << formatHex(Imm16);
431 }
432}
433
434void AMDGPUInstPrinter::printImmediate16(uint32_t Imm,
435 const MCSubtargetInfo &STI,
436 raw_ostream &O) {
437 int16_t SImm = static_cast<int16_t>(Imm);
438 if (isInlinableIntLiteral(SImm)) {
439 O << SImm;
440 return;
441 }
442
443 if (Imm == 0x3C00)
444 O<< "1.0";
445 else if (Imm == 0xBC00)
446 O<< "-1.0";
447 else if (Imm == 0x3800)
448 O<< "0.5";
449 else if (Imm == 0xB800)
450 O<< "-0.5";
451 else if (Imm == 0x4000)
452 O<< "2.0";
453 else if (Imm == 0xC000)
454 O<< "-2.0";
455 else if (Imm == 0x4400)
456 O<< "4.0";
457 else if (Imm == 0xC400)
458 O<< "-4.0";
459 else if (Imm == 0x3118 &&
460 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) {
461 O << "0.15915494";
462 } else {
463 uint64_t Imm16 = static_cast<uint16_t>(Imm);
464 O << formatHex(Imm16);
465 }
466}
467
468void AMDGPUInstPrinter::printImmediateV216(uint32_t Imm,
469 const MCSubtargetInfo &STI,
470 raw_ostream &O) {
471 uint16_t Lo16 = static_cast<uint16_t>(Imm);
472 printImmediate16(Lo16, STI, O);
473}
474
475void AMDGPUInstPrinter::printImmediate32(uint32_t Imm,
476 const MCSubtargetInfo &STI,
477 raw_ostream &O) {
478 int32_t SImm = static_cast<int32_t>(Imm);
479 if (SImm >= -16 && SImm <= 64) {
480 O << SImm;
481 return;
482 }
483
484 if (Imm == FloatToBits(0.0f))
485 O << "0.0";
486 else if (Imm == FloatToBits(1.0f))
487 O << "1.0";
488 else if (Imm == FloatToBits(-1.0f))
489 O << "-1.0";
490 else if (Imm == FloatToBits(0.5f))
491 O << "0.5";
492 else if (Imm == FloatToBits(-0.5f))
493 O << "-0.5";
494 else if (Imm == FloatToBits(2.0f))
495 O << "2.0";
496 else if (Imm == FloatToBits(-2.0f))
497 O << "-2.0";
498 else if (Imm == FloatToBits(4.0f))
499 O << "4.0";
500 else if (Imm == FloatToBits(-4.0f))
501 O << "-4.0";
502 else if (Imm == 0x3e22f983 &&
503 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
504 O << "0.15915494";
505 else
506 O << formatHex(static_cast<uint64_t>(Imm));
507}
508
509void AMDGPUInstPrinter::printImmediate64(uint64_t Imm,
510 const MCSubtargetInfo &STI,
511 raw_ostream &O) {
512 int64_t SImm = static_cast<int64_t>(Imm);
513 if (SImm >= -16 && SImm <= 64) {
514 O << SImm;
515 return;
516 }
517
518 if (Imm == DoubleToBits(0.0))
519 O << "0.0";
520 else if (Imm == DoubleToBits(1.0))
521 O << "1.0";
522 else if (Imm == DoubleToBits(-1.0))
523 O << "-1.0";
524 else if (Imm == DoubleToBits(0.5))
525 O << "0.5";
526 else if (Imm == DoubleToBits(-0.5))
527 O << "-0.5";
528 else if (Imm == DoubleToBits(2.0))
529 O << "2.0";
530 else if (Imm == DoubleToBits(-2.0))
531 O << "-2.0";
532 else if (Imm == DoubleToBits(4.0))
533 O << "4.0";
534 else if (Imm == DoubleToBits(-4.0))
535 O << "-4.0";
536 else if (Imm == 0x3fc45f306dc9c882 &&
537 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
538 O << "0.15915494309189532";
539 else {
540 assert(isUInt<32>(Imm) || isInt<32>(Imm));
541
542 // In rare situations, we will have a 32-bit literal in a 64-bit
543 // operand. This is technically allowed for the encoding of s_mov_b64.
544 O << formatHex(static_cast<uint64_t>(Imm));
545 }
546}
547
548void AMDGPUInstPrinter::printBLGP(const MCInst *MI, unsigned OpNo,
549 const MCSubtargetInfo &STI,
550 raw_ostream &O) {
551 unsigned Imm = MI->getOperand(OpNo).getImm();
552 if (!Imm)
553 return;
554
555 if (AMDGPU::isGFX940(STI)) {
556 switch (MI->getOpcode()) {
557 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_acd:
558 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_vcd:
559 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_acd:
560 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_vcd:
561 O << " neg:[" << (Imm & 1) << ',' << ((Imm >> 1) & 1) << ','
562 << ((Imm >> 2) & 1) << ']';
563 return;
564 }
565 }
566
567 O << " blgp:" << Imm;
568}
569
570void AMDGPUInstPrinter::printCBSZ(const MCInst *MI, unsigned OpNo,
571 const MCSubtargetInfo &STI,
572 raw_ostream &O) {
573 unsigned Imm = MI->getOperand(OpNo).getImm();
574 if (!Imm)
575 return;
576
577 O << " cbsz:" << Imm;
578}
579
580void AMDGPUInstPrinter::printABID(const MCInst *MI, unsigned OpNo,
581 const MCSubtargetInfo &STI,
582 raw_ostream &O) {
583 unsigned Imm = MI->getOperand(OpNo).getImm();
584 if (!Imm)
585 return;
586
587 O << " abid:" << Imm;
588}
589
590void AMDGPUInstPrinter::printDefaultVccOperand(bool FirstOperand,
591 const MCSubtargetInfo &STI,
592 raw_ostream &O) {
593 if (!FirstOperand)
594 O << ", ";
595 printRegOperand(STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64]
596 ? AMDGPU::VCC
597 : AMDGPU::VCC_LO,
598 O, MRI);
599 if (FirstOperand)
600 O << ", ";
601}
602
603void AMDGPUInstPrinter::printWaitVDST(const MCInst *MI, unsigned OpNo,
604 const MCSubtargetInfo &STI,
605 raw_ostream &O) {
606 uint8_t Imm = MI->getOperand(OpNo).getImm();
607 if (Imm != 0) {
608 O << " wait_vdst:";
609 printU4ImmDecOperand(MI, OpNo, O);
610 }
611}
612
613void AMDGPUInstPrinter::printWaitEXP(const MCInst *MI, unsigned OpNo,
614 const MCSubtargetInfo &STI,
615 raw_ostream &O) {
616 uint8_t Imm = MI->getOperand(OpNo).getImm();
617 if (Imm != 0) {
618 O << " wait_exp:";
619 printU4ImmDecOperand(MI, OpNo, O);
620 }
621}
622
623bool AMDGPUInstPrinter::needsImpliedVcc(const MCInstrDesc &Desc,
624 unsigned OpNo) const {
625 return OpNo == 0 && (Desc.TSFlags & SIInstrFlags::DPP) &&
626 (Desc.TSFlags & SIInstrFlags::VOPC) &&
627 (Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
628 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO));
629}
630
631// Print default vcc/vcc_lo operand of VOPC.
632void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
633 const MCSubtargetInfo &STI,
634 raw_ostream &O) {
635 unsigned Opc = MI->getOpcode();
636 const MCInstrDesc &Desc = MII.get(Opc);
637 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
638 // 0, 1 and 2 are the first printed operands in different cases
639 // If there are printed modifiers, printOperandAndFPInputMods or
640 // printOperandAndIntInputMods will be called instead
641 if ((OpNo == 0 ||
642 (OpNo == 1 && (Desc.TSFlags & SIInstrFlags::DPP) && ModIdx != -1)) &&
643 (Desc.TSFlags & SIInstrFlags::VOPC) &&
644 (Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
645 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO)))
646 printDefaultVccOperand(true, STI, O);
647
648 printRegularOperand(MI, OpNo, STI, O);
649}
650
651// Print operands after vcc or modifier handling.
652void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
653 const MCSubtargetInfo &STI,
654 raw_ostream &O) {
655 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
656
657 if (OpNo >= MI->getNumOperands()) {
658 O << "/*Missing OP" << OpNo << "*/";
659 return;
660 }
661
662 const MCOperand &Op = MI->getOperand(OpNo);
663 if (Op.isReg()) {
664 printRegOperand(Op.getReg(), O, MRI);
665
666 // Check if operand register class contains register used.
667 // Intention: print disassembler message when invalid code is decoded,
668 // for example sgpr register used in VReg or VISrc(VReg or imm) operand.
669 int RCID = Desc.operands()[OpNo].RegClass;
670 if (RCID != -1) {
671 const MCRegisterClass RC = MRI.getRegClass(RCID);
672 auto Reg = mc2PseudoReg(Op.getReg());
673 if (!RC.contains(Reg) && !isInlineValue(Reg)) {
674 O << "/*Invalid register, operand has \'" << MRI.getRegClassName(&RC)
675 << "\' register class*/";
676 }
677 }
678 } else if (Op.isImm()) {
679 const uint8_t OpTy = Desc.operands()[OpNo].OperandType;
680 switch (OpTy) {
693 printImmediate32(Op.getImm(), STI, O);
694 break;
700 printImmediate64(Op.getImm(), STI, O);
701 break;
705 printImmediateInt16(Op.getImm(), STI, O);
706 break;
711 printImmediate16(Op.getImm(), STI, O);
712 break;
715 if (!isUInt<16>(Op.getImm()) &&
716 STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) {
717 printImmediate32(Op.getImm(), STI, O);
718 break;
719 }
720
721 // Deal with 16-bit FP inline immediates not working.
722 if (OpTy == AMDGPU::OPERAND_REG_IMM_V2FP16) {
723 printImmediate16(static_cast<uint16_t>(Op.getImm()), STI, O);
724 break;
725 }
726 [[fallthrough]];
729 printImmediateInt16(static_cast<uint16_t>(Op.getImm()), STI, O);
730 break;
733 printImmediateV216(Op.getImm(), STI, O);
734 break;
737 O << formatDec(Op.getImm());
738 break;
740 // Disassembler does not fail when operand should not allow immediate
741 // operands but decodes them into 32bit immediate operand.
742 printImmediate32(Op.getImm(), STI, O);
743 O << "/*Invalid immediate*/";
744 break;
745 default:
746 // We hit this for the immediate instruction bits that don't yet have a
747 // custom printer.
748 llvm_unreachable("unexpected immediate operand type");
749 }
750 } else if (Op.isDFPImm()) {
751 double Value = bit_cast<double>(Op.getDFPImm());
752 // We special case 0.0 because otherwise it will be printed as an integer.
753 if (Value == 0.0)
754 O << "0.0";
755 else {
756 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
757 int RCID = Desc.operands()[OpNo].RegClass;
758 unsigned RCBits = AMDGPU::getRegBitWidth(MRI.getRegClass(RCID));
759 if (RCBits == 32)
760 printImmediate32(FloatToBits(Value), STI, O);
761 else if (RCBits == 64)
762 printImmediate64(DoubleToBits(Value), STI, O);
763 else
764 llvm_unreachable("Invalid register class size");
765 }
766 } else if (Op.isExpr()) {
767 const MCExpr *Exp = Op.getExpr();
768 Exp->print(O, &MAI);
769 } else {
770 O << "/*INV_OP*/";
771 }
772
773 // Print default vcc/vcc_lo operand of v_cndmask_b32_e32.
774 switch (MI->getOpcode()) {
775 default: break;
776
777 case AMDGPU::V_CNDMASK_B32_e32_gfx10:
778 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
779 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
780 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
781 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
782 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
783 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
784 case AMDGPU::V_CNDMASK_B32_dpp8_gfx10:
785 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
786 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
787 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
788 case AMDGPU::V_CNDMASK_B32_e32_gfx11:
789 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
790 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
791 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
792 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
793 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
794 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
795 case AMDGPU::V_CNDMASK_B32_dpp8_gfx11:
796 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
797 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
798 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
799
800 case AMDGPU::V_CNDMASK_B32_e32_gfx6_gfx7:
801 case AMDGPU::V_CNDMASK_B32_e32_vi:
802 if ((int)OpNo == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
803 AMDGPU::OpName::src1))
804 printDefaultVccOperand(OpNo == 0, STI, O);
805 break;
806 }
807
808 if (Desc.TSFlags & SIInstrFlags::MTBUF) {
809 int SOffsetIdx =
810 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::soffset);
811 assert(SOffsetIdx != -1);
812 if ((int)OpNo == SOffsetIdx)
813 printSymbolicFormat(MI, STI, O);
814 }
815}
816
817void AMDGPUInstPrinter::printOperandAndFPInputMods(const MCInst *MI,
818 unsigned OpNo,
819 const MCSubtargetInfo &STI,
820 raw_ostream &O) {
821 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
822 if (needsImpliedVcc(Desc, OpNo))
823 printDefaultVccOperand(true, STI, O);
824
825 unsigned InputModifiers = MI->getOperand(OpNo).getImm();
826
827 // Use 'neg(...)' instead of '-' to avoid ambiguity.
828 // This is important for integer literals because
829 // -1 is not the same value as neg(1).
830 bool NegMnemo = false;
831
832 if (InputModifiers & SISrcMods::NEG) {
833 if (OpNo + 1 < MI->getNumOperands() &&
834 (InputModifiers & SISrcMods::ABS) == 0) {
835 const MCOperand &Op = MI->getOperand(OpNo + 1);
836 NegMnemo = Op.isImm() || Op.isDFPImm();
837 }
838 if (NegMnemo) {
839 O << "neg(";
840 } else {
841 O << '-';
842 }
843 }
844
845 if (InputModifiers & SISrcMods::ABS)
846 O << '|';
847 printRegularOperand(MI, OpNo + 1, STI, O);
848 if (InputModifiers & SISrcMods::ABS)
849 O << '|';
850
851 if (NegMnemo) {
852 O << ')';
853 }
854
855 // Print default vcc/vcc_lo operand of VOP2b.
856 switch (MI->getOpcode()) {
857 default:
858 break;
859
860 case AMDGPU::V_CNDMASK_B32_sdwa_gfx10:
861 case AMDGPU::V_CNDMASK_B32_dpp_gfx10:
862 case AMDGPU::V_CNDMASK_B32_dpp_gfx11:
863 if ((int)OpNo + 1 ==
864 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::src1))
865 printDefaultVccOperand(OpNo == 0, STI, O);
866 break;
867 }
868}
869
870void AMDGPUInstPrinter::printOperandAndIntInputMods(const MCInst *MI,
871 unsigned OpNo,
872 const MCSubtargetInfo &STI,
873 raw_ostream &O) {
874 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
875 if (needsImpliedVcc(Desc, OpNo))
876 printDefaultVccOperand(true, STI, O);
877
878 unsigned InputModifiers = MI->getOperand(OpNo).getImm();
879 if (InputModifiers & SISrcMods::SEXT)
880 O << "sext(";
881 printRegularOperand(MI, OpNo + 1, STI, O);
882 if (InputModifiers & SISrcMods::SEXT)
883 O << ')';
884
885 // Print default vcc/vcc_lo operand of VOP2b.
886 switch (MI->getOpcode()) {
887 default: break;
888
889 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
890 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
891 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
892 if ((int)OpNo + 1 == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
893 AMDGPU::OpName::src1))
894 printDefaultVccOperand(OpNo == 0, STI, O);
895 break;
896 }
897}
898
899void AMDGPUInstPrinter::printDPP8(const MCInst *MI, unsigned OpNo,
900 const MCSubtargetInfo &STI,
901 raw_ostream &O) {
902 if (!AMDGPU::isGFX10Plus(STI))
903 llvm_unreachable("dpp8 is not supported on ASICs earlier than GFX10");
904
905 unsigned Imm = MI->getOperand(OpNo).getImm();
906 O << "dpp8:[" << formatDec(Imm & 0x7);
907 for (size_t i = 1; i < 8; ++i) {
908 O << ',' << formatDec((Imm >> (3 * i)) & 0x7);
909 }
910 O << ']';
911}
912
913void AMDGPUInstPrinter::printDPPCtrl(const MCInst *MI, unsigned OpNo,
914 const MCSubtargetInfo &STI,
915 raw_ostream &O) {
916 using namespace AMDGPU::DPP;
917
918 unsigned Imm = MI->getOperand(OpNo).getImm();
919 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
920 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
921 AMDGPU::OpName::src0);
922
923 if (Src0Idx >= 0 &&
924 Desc.operands()[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID &&
926 O << " /* 64 bit dpp only supports row_newbcast */";
927 return;
928 } else if (Imm <= DppCtrl::QUAD_PERM_LAST) {
929 O << "quad_perm:[";
930 O << formatDec(Imm & 0x3) << ',';
931 O << formatDec((Imm & 0xc) >> 2) << ',';
932 O << formatDec((Imm & 0x30) >> 4) << ',';
933 O << formatDec((Imm & 0xc0) >> 6) << ']';
934 } else if ((Imm >= DppCtrl::ROW_SHL_FIRST) &&
935 (Imm <= DppCtrl::ROW_SHL_LAST)) {
936 O << "row_shl:";
937 printU4ImmDecOperand(MI, OpNo, O);
938 } else if ((Imm >= DppCtrl::ROW_SHR_FIRST) &&
939 (Imm <= DppCtrl::ROW_SHR_LAST)) {
940 O << "row_shr:";
941 printU4ImmDecOperand(MI, OpNo, O);
942 } else if ((Imm >= DppCtrl::ROW_ROR_FIRST) &&
943 (Imm <= DppCtrl::ROW_ROR_LAST)) {
944 O << "row_ror:";
945 printU4ImmDecOperand(MI, OpNo, O);
946 } else if (Imm == DppCtrl::WAVE_SHL1) {
947 if (AMDGPU::isGFX10Plus(STI)) {
948 O << "/* wave_shl is not supported starting from GFX10 */";
949 return;
950 }
951 O << "wave_shl:1";
952 } else if (Imm == DppCtrl::WAVE_ROL1) {
953 if (AMDGPU::isGFX10Plus(STI)) {
954 O << "/* wave_rol is not supported starting from GFX10 */";
955 return;
956 }
957 O << "wave_rol:1";
958 } else if (Imm == DppCtrl::WAVE_SHR1) {
959 if (AMDGPU::isGFX10Plus(STI)) {
960 O << "/* wave_shr is not supported starting from GFX10 */";
961 return;
962 }
963 O << "wave_shr:1";
964 } else if (Imm == DppCtrl::WAVE_ROR1) {
965 if (AMDGPU::isGFX10Plus(STI)) {
966 O << "/* wave_ror is not supported starting from GFX10 */";
967 return;
968 }
969 O << "wave_ror:1";
970 } else if (Imm == DppCtrl::ROW_MIRROR) {
971 O << "row_mirror";
972 } else if (Imm == DppCtrl::ROW_HALF_MIRROR) {
973 O << "row_half_mirror";
974 } else if (Imm == DppCtrl::BCAST15) {
975 if (AMDGPU::isGFX10Plus(STI)) {
976 O << "/* row_bcast is not supported starting from GFX10 */";
977 return;
978 }
979 O << "row_bcast:15";
980 } else if (Imm == DppCtrl::BCAST31) {
981 if (AMDGPU::isGFX10Plus(STI)) {
982 O << "/* row_bcast is not supported starting from GFX10 */";
983 return;
984 }
985 O << "row_bcast:31";
986 } else if ((Imm >= DppCtrl::ROW_SHARE_FIRST) &&
987 (Imm <= DppCtrl::ROW_SHARE_LAST)) {
988 if (AMDGPU::isGFX90A(STI)) {
989 O << "row_newbcast:";
990 } else if (AMDGPU::isGFX10Plus(STI)) {
991 O << "row_share:";
992 } else {
993 O << " /* row_newbcast/row_share is not supported on ASICs earlier "
994 "than GFX90A/GFX10 */";
995 return;
996 }
997 printU4ImmDecOperand(MI, OpNo, O);
998 } else if ((Imm >= DppCtrl::ROW_XMASK_FIRST) &&
999 (Imm <= DppCtrl::ROW_XMASK_LAST)) {
1000 if (!AMDGPU::isGFX10Plus(STI)) {
1001 O << "/* row_xmask is not supported on ASICs earlier than GFX10 */";
1002 return;
1003 }
1004 O << "row_xmask:";
1005 printU4ImmDecOperand(MI, OpNo, O);
1006 } else {
1007 O << "/* Invalid dpp_ctrl value */";
1008 }
1009}
1010
1011void AMDGPUInstPrinter::printRowMask(const MCInst *MI, unsigned OpNo,
1012 const MCSubtargetInfo &STI,
1013 raw_ostream &O) {
1014 O << " row_mask:";
1015 printU4ImmOperand(MI, OpNo, STI, O);
1016}
1017
1018void AMDGPUInstPrinter::printBankMask(const MCInst *MI, unsigned OpNo,
1019 const MCSubtargetInfo &STI,
1020 raw_ostream &O) {
1021 O << " bank_mask:";
1022 printU4ImmOperand(MI, OpNo, STI, O);
1023}
1024
1025void AMDGPUInstPrinter::printDppBoundCtrl(const MCInst *MI, unsigned OpNo,
1026 const MCSubtargetInfo &STI,
1027 raw_ostream &O) {
1028 unsigned Imm = MI->getOperand(OpNo).getImm();
1029 if (Imm) {
1030 O << " bound_ctrl:1";
1031 }
1032}
1033
1034void AMDGPUInstPrinter::printFI(const MCInst *MI, unsigned OpNo,
1035 const MCSubtargetInfo &STI,
1036 raw_ostream &O) {
1037 using namespace llvm::AMDGPU::DPP;
1038 unsigned Imm = MI->getOperand(OpNo).getImm();
1039 if (Imm == DPP_FI_1 || Imm == DPP8_FI_1) {
1040 O << " fi:1";
1041 }
1042}
1043
1044void AMDGPUInstPrinter::printSDWASel(const MCInst *MI, unsigned OpNo,
1045 raw_ostream &O) {
1046 using namespace llvm::AMDGPU::SDWA;
1047
1048 unsigned Imm = MI->getOperand(OpNo).getImm();
1049 switch (Imm) {
1050 case SdwaSel::BYTE_0: O << "BYTE_0"; break;
1051 case SdwaSel::BYTE_1: O << "BYTE_1"; break;
1052 case SdwaSel::BYTE_2: O << "BYTE_2"; break;
1053 case SdwaSel::BYTE_3: O << "BYTE_3"; break;
1054 case SdwaSel::WORD_0: O << "WORD_0"; break;
1055 case SdwaSel::WORD_1: O << "WORD_1"; break;
1056 case SdwaSel::DWORD: O << "DWORD"; break;
1057 default: llvm_unreachable("Invalid SDWA data select operand");
1058 }
1059}
1060
1061void AMDGPUInstPrinter::printSDWADstSel(const MCInst *MI, unsigned OpNo,
1062 const MCSubtargetInfo &STI,
1063 raw_ostream &O) {
1064 O << "dst_sel:";
1065 printSDWASel(MI, OpNo, O);
1066}
1067
1068void AMDGPUInstPrinter::printSDWASrc0Sel(const MCInst *MI, unsigned OpNo,
1069 const MCSubtargetInfo &STI,
1070 raw_ostream &O) {
1071 O << "src0_sel:";
1072 printSDWASel(MI, OpNo, O);
1073}
1074
1075void AMDGPUInstPrinter::printSDWASrc1Sel(const MCInst *MI, unsigned OpNo,
1076 const MCSubtargetInfo &STI,
1077 raw_ostream &O) {
1078 O << "src1_sel:";
1079 printSDWASel(MI, OpNo, O);
1080}
1081
1082void AMDGPUInstPrinter::printSDWADstUnused(const MCInst *MI, unsigned OpNo,
1083 const MCSubtargetInfo &STI,
1084 raw_ostream &O) {
1085 using namespace llvm::AMDGPU::SDWA;
1086
1087 O << "dst_unused:";
1088 unsigned Imm = MI->getOperand(OpNo).getImm();
1089 switch (Imm) {
1090 case DstUnused::UNUSED_PAD: O << "UNUSED_PAD"; break;
1091 case DstUnused::UNUSED_SEXT: O << "UNUSED_SEXT"; break;
1092 case DstUnused::UNUSED_PRESERVE: O << "UNUSED_PRESERVE"; break;
1093 default: llvm_unreachable("Invalid SDWA dest_unused operand");
1094 }
1095}
1096
1097void AMDGPUInstPrinter::printExpSrcN(const MCInst *MI, unsigned OpNo,
1098 const MCSubtargetInfo &STI, raw_ostream &O,
1099 unsigned N) {
1100 unsigned Opc = MI->getOpcode();
1101 int EnIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::en);
1102 unsigned En = MI->getOperand(EnIdx).getImm();
1103
1104 int ComprIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::compr);
1105
1106 // If compr is set, print as src0, src0, src1, src1
1107 if (MI->getOperand(ComprIdx).getImm())
1108 OpNo = OpNo - N + N / 2;
1109
1110 if (En & (1 << N))
1111 printRegOperand(MI->getOperand(OpNo).getReg(), O, MRI);
1112 else
1113 O << "off";
1114}
1115
1116void AMDGPUInstPrinter::printExpSrc0(const MCInst *MI, unsigned OpNo,
1117 const MCSubtargetInfo &STI,
1118 raw_ostream &O) {
1119 printExpSrcN(MI, OpNo, STI, O, 0);
1120}
1121
1122void AMDGPUInstPrinter::printExpSrc1(const MCInst *MI, unsigned OpNo,
1123 const MCSubtargetInfo &STI,
1124 raw_ostream &O) {
1125 printExpSrcN(MI, OpNo, STI, O, 1);
1126}
1127
1128void AMDGPUInstPrinter::printExpSrc2(const MCInst *MI, unsigned OpNo,
1129 const MCSubtargetInfo &STI,
1130 raw_ostream &O) {
1131 printExpSrcN(MI, OpNo, STI, O, 2);
1132}
1133
1134void AMDGPUInstPrinter::printExpSrc3(const MCInst *MI, unsigned OpNo,
1135 const MCSubtargetInfo &STI,
1136 raw_ostream &O) {
1137 printExpSrcN(MI, OpNo, STI, O, 3);
1138}
1139
1140void AMDGPUInstPrinter::printExpTgt(const MCInst *MI, unsigned OpNo,
1141 const MCSubtargetInfo &STI,
1142 raw_ostream &O) {
1143 using namespace llvm::AMDGPU::Exp;
1144
1145 // This is really a 6 bit field.
1146 unsigned Id = MI->getOperand(OpNo).getImm() & ((1 << 6) - 1);
1147
1148 int Index;
1149 StringRef TgtName;
1150 if (getTgtName(Id, TgtName, Index) && isSupportedTgtId(Id, STI)) {
1151 O << ' ' << TgtName;
1152 if (Index >= 0)
1153 O << Index;
1154 } else {
1155 O << " invalid_target_" << Id;
1156 }
1157}
1158
1159static bool allOpsDefaultValue(const int* Ops, int NumOps, int Mod,
1160 bool IsPacked, bool HasDstSel) {
1161 int DefaultValue = IsPacked && (Mod == SISrcMods::OP_SEL_1);
1162
1163 for (int I = 0; I < NumOps; ++I) {
1164 if (!!(Ops[I] & Mod) != DefaultValue)
1165 return false;
1166 }
1167
1168 if (HasDstSel && (Ops[0] & SISrcMods::DST_OP_SEL) != 0)
1169 return false;
1170
1171 return true;
1172}
1173
1174void AMDGPUInstPrinter::printPackedModifier(const MCInst *MI,
1176 unsigned Mod,
1177 raw_ostream &O) {
1178 unsigned Opc = MI->getOpcode();
1179 int NumOps = 0;
1180 int Ops[3];
1181
1182 for (int OpName : { AMDGPU::OpName::src0_modifiers,
1183 AMDGPU::OpName::src1_modifiers,
1184 AMDGPU::OpName::src2_modifiers }) {
1186 if (Idx == -1)
1187 break;
1188
1189 Ops[NumOps++] = MI->getOperand(Idx).getImm();
1190 }
1191
1192 const bool HasDstSel =
1193 NumOps > 0 &&
1195 MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::VOP3_OPSEL;
1196
1197 const bool IsPacked =
1198 MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::IsPacked;
1199
1200 if (allOpsDefaultValue(Ops, NumOps, Mod, IsPacked, HasDstSel))
1201 return;
1202
1203 O << Name;
1204 for (int I = 0; I < NumOps; ++I) {
1205 if (I != 0)
1206 O << ',';
1207
1208 O << !!(Ops[I] & Mod);
1209 }
1210
1211 if (HasDstSel) {
1212 O << ',' << !!(Ops[0] & SISrcMods::DST_OP_SEL);
1213 }
1214
1215 O << ']';
1216}
1217
1218void AMDGPUInstPrinter::printOpSel(const MCInst *MI, unsigned,
1219 const MCSubtargetInfo &STI,
1220 raw_ostream &O) {
1221 unsigned Opc = MI->getOpcode();
1222 if (isPermlane16(Opc)) {
1223 auto FIN = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1224 auto BCN = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1225 unsigned FI = !!(MI->getOperand(FIN).getImm() & SISrcMods::OP_SEL_0);
1226 unsigned BC = !!(MI->getOperand(BCN).getImm() & SISrcMods::OP_SEL_0);
1227 if (FI || BC)
1228 O << " op_sel:[" << FI << ',' << BC << ']';
1229 return;
1230 }
1231
1232 printPackedModifier(MI, " op_sel:[", SISrcMods::OP_SEL_0, O);
1233}
1234
1235void AMDGPUInstPrinter::printOpSelHi(const MCInst *MI, unsigned OpNo,
1236 const MCSubtargetInfo &STI,
1237 raw_ostream &O) {
1238 printPackedModifier(MI, " op_sel_hi:[", SISrcMods::OP_SEL_1, O);
1239}
1240
1241void AMDGPUInstPrinter::printNegLo(const MCInst *MI, unsigned OpNo,
1242 const MCSubtargetInfo &STI,
1243 raw_ostream &O) {
1244 printPackedModifier(MI, " neg_lo:[", SISrcMods::NEG, O);
1245}
1246
1247void AMDGPUInstPrinter::printNegHi(const MCInst *MI, unsigned OpNo,
1248 const MCSubtargetInfo &STI,
1249 raw_ostream &O) {
1250 printPackedModifier(MI, " neg_hi:[", SISrcMods::NEG_HI, O);
1251}
1252
1253void AMDGPUInstPrinter::printInterpSlot(const MCInst *MI, unsigned OpNum,
1254 const MCSubtargetInfo &STI,
1255 raw_ostream &O) {
1256 unsigned Imm = MI->getOperand(OpNum).getImm();
1257 switch (Imm) {
1258 case 0:
1259 O << "p10";
1260 break;
1261 case 1:
1262 O << "p20";
1263 break;
1264 case 2:
1265 O << "p0";
1266 break;
1267 default:
1268 O << "invalid_param_" << Imm;
1269 }
1270}
1271
1272void AMDGPUInstPrinter::printInterpAttr(const MCInst *MI, unsigned OpNum,
1273 const MCSubtargetInfo &STI,
1274 raw_ostream &O) {
1275 unsigned Attr = MI->getOperand(OpNum).getImm();
1276 O << "attr" << Attr;
1277}
1278
1279void AMDGPUInstPrinter::printInterpAttrChan(const MCInst *MI, unsigned OpNum,
1280 const MCSubtargetInfo &STI,
1281 raw_ostream &O) {
1282 unsigned Chan = MI->getOperand(OpNum).getImm();
1283 O << '.' << "xyzw"[Chan & 0x3];
1284}
1285
1286void AMDGPUInstPrinter::printVGPRIndexMode(const MCInst *MI, unsigned OpNo,
1287 const MCSubtargetInfo &STI,
1288 raw_ostream &O) {
1289 using namespace llvm::AMDGPU::VGPRIndexMode;
1290 unsigned Val = MI->getOperand(OpNo).getImm();
1291
1292 if ((Val & ~ENABLE_MASK) != 0) {
1293 O << formatHex(static_cast<uint64_t>(Val));
1294 } else {
1295 O << "gpr_idx(";
1296 bool NeedComma = false;
1297 for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) {
1298 if (Val & (1 << ModeId)) {
1299 if (NeedComma)
1300 O << ',';
1301 O << IdSymbolic[ModeId];
1302 NeedComma = true;
1303 }
1304 }
1305 O << ')';
1306 }
1307}
1308
1309void AMDGPUInstPrinter::printMemOperand(const MCInst *MI, unsigned OpNo,
1310 const MCSubtargetInfo &STI,
1311 raw_ostream &O) {
1312 printRegularOperand(MI, OpNo, STI, O);
1313 O << ", ";
1314 printRegularOperand(MI, OpNo + 1, STI, O);
1315}
1316
1317void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo,
1318 raw_ostream &O, StringRef Asm,
1320 const MCOperand &Op = MI->getOperand(OpNo);
1321 assert(Op.isImm());
1322 if (Op.getImm() == 1) {
1323 O << Asm;
1324 } else {
1325 O << Default;
1326 }
1327}
1328
1329void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo,
1330 raw_ostream &O, char Asm) {
1331 const MCOperand &Op = MI->getOperand(OpNo);
1332 assert(Op.isImm());
1333 if (Op.getImm() == 1)
1334 O << Asm;
1335}
1336
1337void AMDGPUInstPrinter::printHigh(const MCInst *MI, unsigned OpNo,
1338 const MCSubtargetInfo &STI,
1339 raw_ostream &O) {
1340 printNamedBit(MI, OpNo, O, "high");
1341}
1342
1343void AMDGPUInstPrinter::printClampSI(const MCInst *MI, unsigned OpNo,
1344 const MCSubtargetInfo &STI,
1345 raw_ostream &O) {
1346 printNamedBit(MI, OpNo, O, "clamp");
1347}
1348
1349void AMDGPUInstPrinter::printOModSI(const MCInst *MI, unsigned OpNo,
1350 const MCSubtargetInfo &STI,
1351 raw_ostream &O) {
1352 int Imm = MI->getOperand(OpNo).getImm();
1353 if (Imm == SIOutMods::MUL2)
1354 O << " mul:2";
1355 else if (Imm == SIOutMods::MUL4)
1356 O << " mul:4";
1357 else if (Imm == SIOutMods::DIV2)
1358 O << " div:2";
1359}
1360
1361void AMDGPUInstPrinter::printSendMsg(const MCInst *MI, unsigned OpNo,
1362 const MCSubtargetInfo &STI,
1363 raw_ostream &O) {
1364 using namespace llvm::AMDGPU::SendMsg;
1365
1366 const unsigned Imm16 = MI->getOperand(OpNo).getImm();
1367
1368 uint16_t MsgId;
1369 uint16_t OpId;
1371 decodeMsg(Imm16, MsgId, OpId, StreamId, STI);
1372
1373 StringRef MsgName = getMsgName(MsgId, STI);
1374
1375 if (!MsgName.empty() && isValidMsgOp(MsgId, OpId, STI) &&
1376 isValidMsgStream(MsgId, OpId, StreamId, STI)) {
1377 O << "sendmsg(" << MsgName;
1378 if (msgRequiresOp(MsgId, STI)) {
1379 O << ", " << getMsgOpName(MsgId, OpId, STI);
1380 if (msgSupportsStream(MsgId, OpId, STI)) {
1381 O << ", " << StreamId;
1382 }
1383 }
1384 O << ')';
1385 } else if (encodeMsg(MsgId, OpId, StreamId) == Imm16) {
1386 O << "sendmsg(" << MsgId << ", " << OpId << ", " << StreamId << ')';
1387 } else {
1388 O << Imm16; // Unknown imm16 code.
1389 }
1390}
1391
1392static void printSwizzleBitmask(const uint16_t AndMask,
1393 const uint16_t OrMask,
1394 const uint16_t XorMask,
1395 raw_ostream &O) {
1396 using namespace llvm::AMDGPU::Swizzle;
1397
1398 uint16_t Probe0 = ((0 & AndMask) | OrMask) ^ XorMask;
1399 uint16_t Probe1 = ((BITMASK_MASK & AndMask) | OrMask) ^ XorMask;
1400
1401 O << "\"";
1402
1403 for (unsigned Mask = 1 << (BITMASK_WIDTH - 1); Mask > 0; Mask >>= 1) {
1404 uint16_t p0 = Probe0 & Mask;
1405 uint16_t p1 = Probe1 & Mask;
1406
1407 if (p0 == p1) {
1408 if (p0 == 0) {
1409 O << "0";
1410 } else {
1411 O << "1";
1412 }
1413 } else {
1414 if (p0 == 0) {
1415 O << "p";
1416 } else {
1417 O << "i";
1418 }
1419 }
1420 }
1421
1422 O << "\"";
1423}
1424
1425void AMDGPUInstPrinter::printSwizzle(const MCInst *MI, unsigned OpNo,
1426 const MCSubtargetInfo &STI,
1427 raw_ostream &O) {
1428 using namespace llvm::AMDGPU::Swizzle;
1429
1430 uint16_t Imm = MI->getOperand(OpNo).getImm();
1431 if (Imm == 0) {
1432 return;
1433 }
1434
1435 O << " offset:";
1436
1437 if ((Imm & QUAD_PERM_ENC_MASK) == QUAD_PERM_ENC) {
1438
1439 O << "swizzle(" << IdSymbolic[ID_QUAD_PERM];
1440 for (unsigned I = 0; I < LANE_NUM; ++I) {
1441 O << ",";
1442 O << formatDec(Imm & LANE_MASK);
1443 Imm >>= LANE_SHIFT;
1444 }
1445 O << ")";
1446
1447 } else if ((Imm & BITMASK_PERM_ENC_MASK) == BITMASK_PERM_ENC) {
1448
1449 uint16_t AndMask = (Imm >> BITMASK_AND_SHIFT) & BITMASK_MASK;
1450 uint16_t OrMask = (Imm >> BITMASK_OR_SHIFT) & BITMASK_MASK;
1451 uint16_t XorMask = (Imm >> BITMASK_XOR_SHIFT) & BITMASK_MASK;
1452
1453 if (AndMask == BITMASK_MAX && OrMask == 0 && llvm::popcount(XorMask) == 1) {
1454
1455 O << "swizzle(" << IdSymbolic[ID_SWAP];
1456 O << ",";
1457 O << formatDec(XorMask);
1458 O << ")";
1459
1460 } else if (AndMask == BITMASK_MAX && OrMask == 0 && XorMask > 0 &&
1461 isPowerOf2_64(XorMask + 1)) {
1462
1463 O << "swizzle(" << IdSymbolic[ID_REVERSE];
1464 O << ",";
1465 O << formatDec(XorMask + 1);
1466 O << ")";
1467
1468 } else {
1469
1470 uint16_t GroupSize = BITMASK_MAX - AndMask + 1;
1471 if (GroupSize > 1 &&
1472 isPowerOf2_64(GroupSize) &&
1473 OrMask < GroupSize &&
1474 XorMask == 0) {
1475
1476 O << "swizzle(" << IdSymbolic[ID_BROADCAST];
1477 O << ",";
1478 O << formatDec(GroupSize);
1479 O << ",";
1480 O << formatDec(OrMask);
1481 O << ")";
1482
1483 } else {
1484 O << "swizzle(" << IdSymbolic[ID_BITMASK_PERM];
1485 O << ",";
1486 printSwizzleBitmask(AndMask, OrMask, XorMask, O);
1487 O << ")";
1488 }
1489 }
1490 } else {
1491 printU16ImmDecOperand(MI, OpNo, O);
1492 }
1493}
1494
1495void AMDGPUInstPrinter::printWaitFlag(const MCInst *MI, unsigned OpNo,
1496 const MCSubtargetInfo &STI,
1497 raw_ostream &O) {
1499
1500 unsigned SImm16 = MI->getOperand(OpNo).getImm();
1501 unsigned Vmcnt, Expcnt, Lgkmcnt;
1502 decodeWaitcnt(ISA, SImm16, Vmcnt, Expcnt, Lgkmcnt);
1503
1504 bool IsDefaultVmcnt = Vmcnt == getVmcntBitMask(ISA);
1505 bool IsDefaultExpcnt = Expcnt == getExpcntBitMask(ISA);
1506 bool IsDefaultLgkmcnt = Lgkmcnt == getLgkmcntBitMask(ISA);
1507 bool PrintAll = IsDefaultVmcnt && IsDefaultExpcnt && IsDefaultLgkmcnt;
1508
1509 bool NeedSpace = false;
1510
1511 if (!IsDefaultVmcnt || PrintAll) {
1512 O << "vmcnt(" << Vmcnt << ')';
1513 NeedSpace = true;
1514 }
1515
1516 if (!IsDefaultExpcnt || PrintAll) {
1517 if (NeedSpace)
1518 O << ' ';
1519 O << "expcnt(" << Expcnt << ')';
1520 NeedSpace = true;
1521 }
1522
1523 if (!IsDefaultLgkmcnt || PrintAll) {
1524 if (NeedSpace)
1525 O << ' ';
1526 O << "lgkmcnt(" << Lgkmcnt << ')';
1527 }
1528}
1529
1530void AMDGPUInstPrinter::printDepCtr(const MCInst *MI, unsigned OpNo,
1531 const MCSubtargetInfo &STI,
1532 raw_ostream &O) {
1533 using namespace llvm::AMDGPU::DepCtr;
1534
1535 uint64_t Imm16 = MI->getOperand(OpNo).getImm() & 0xffff;
1536
1537 bool HasNonDefaultVal = false;
1538 if (isSymbolicDepCtrEncoding(Imm16, HasNonDefaultVal, STI)) {
1539 int Id = 0;
1541 unsigned Val;
1542 bool IsDefault;
1543 bool NeedSpace = false;
1544 while (decodeDepCtr(Imm16, Id, Name, Val, IsDefault, STI)) {
1545 if (!IsDefault || !HasNonDefaultVal) {
1546 if (NeedSpace)
1547 O << ' ';
1548 O << Name << '(' << Val << ')';
1549 NeedSpace = true;
1550 }
1551 }
1552 } else {
1553 O << formatHex(Imm16);
1554 }
1555}
1556
1558 const MCSubtargetInfo &STI,
1559 raw_ostream &O) {
1560 const char *BadInstId = "/* invalid instid value */";
1561 static const std::array<const char *, 12> InstIds = {
1562 "NO_DEP", "VALU_DEP_1", "VALU_DEP_2",
1563 "VALU_DEP_3", "VALU_DEP_4", "TRANS32_DEP_1",
1564 "TRANS32_DEP_2", "TRANS32_DEP_3", "FMA_ACCUM_CYCLE_1",
1565 "SALU_CYCLE_1", "SALU_CYCLE_2", "SALU_CYCLE_3"};
1566
1567 const char *BadInstSkip = "/* invalid instskip value */";
1568 static const std::array<const char *, 6> InstSkips = {
1569 "SAME", "NEXT", "SKIP_1", "SKIP_2", "SKIP_3", "SKIP_4"};
1570
1571 unsigned SImm16 = MI->getOperand(OpNo).getImm();
1572 const char *Prefix = "";
1573
1574 unsigned Value = SImm16 & 0xF;
1575 if (Value) {
1576 const char *Name = Value < InstIds.size() ? InstIds[Value] : BadInstId;
1577 O << Prefix << "instid0(" << Name << ')';
1578 Prefix = " | ";
1579 }
1580
1581 Value = (SImm16 >> 4) & 7;
1582 if (Value) {
1583 const char *Name =
1584 Value < InstSkips.size() ? InstSkips[Value] : BadInstSkip;
1585 O << Prefix << "instskip(" << Name << ')';
1586 Prefix = " | ";
1587 }
1588
1589 Value = (SImm16 >> 7) & 0xF;
1590 if (Value) {
1591 const char *Name = Value < InstIds.size() ? InstIds[Value] : BadInstId;
1592 O << Prefix << "instid1(" << Name << ')';
1593 Prefix = " | ";
1594 }
1595
1596 if (!*Prefix)
1597 O << "0";
1598}
1599
1600void AMDGPUInstPrinter::printHwreg(const MCInst *MI, unsigned OpNo,
1601 const MCSubtargetInfo &STI, raw_ostream &O) {
1602 unsigned Id;
1603 unsigned Offset;
1604 unsigned Width;
1605
1606 using namespace llvm::AMDGPU::Hwreg;
1607 unsigned Val = MI->getOperand(OpNo).getImm();
1608 decodeHwreg(Val, Id, Offset, Width);
1609 StringRef HwRegName = getHwreg(Id, STI);
1610
1611 O << "hwreg(";
1612 if (!HwRegName.empty()) {
1613 O << HwRegName;
1614 } else {
1615 O << Id;
1616 }
1617 if (Width != WIDTH_DEFAULT_ || Offset != OFFSET_DEFAULT_) {
1618 O << ", " << Offset << ", " << Width;
1619 }
1620 O << ')';
1621}
1622
1623void AMDGPUInstPrinter::printEndpgm(const MCInst *MI, unsigned OpNo,
1624 const MCSubtargetInfo &STI,
1625 raw_ostream &O) {
1626 uint16_t Imm = MI->getOperand(OpNo).getImm();
1627 if (Imm == 0) {
1628 return;
1629 }
1630
1631 O << ' ' << formatDec(Imm);
1632}
1633
1634#include "AMDGPUGenAsmWriter.inc"
unsigned const MachineRegisterInfo * MRI
static void printSwizzleBitmask(const uint16_t AndMask, const uint16_t OrMask, const uint16_t XorMask, raw_ostream &O)
static bool allOpsDefaultValue(const int *Ops, int NumOps, int Mod, bool IsPacked, bool HasDstSel)
static cl::opt< bool > Keep16BitSuffixes("amdgpu-keep-16-bit-reg-suffixes", cl::desc("Keep .l and .h suffixes in asm for debugging purposes"), cl::init(false), cl::ReallyHidden)
Provides AMDGPU specific target descriptions.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::string Name
IRTranslator LLVM IR MI
#define RegName(no)
#define I(x, y, z)
Definition: MD5.cpp:58
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Interface definition for SIRegisterInfo.
This header is deprecated in favour of llvm/TargetParser/TargetParser.h.
void printSwizzle(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printClampSI(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printDelayFlag(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printEndpgm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static const char * getRegisterName(MCRegister Reg)
static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, StringRef Asm, StringRef Default="")
void printDepCtr(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printHwreg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSendMsg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printRegName(raw_ostream &OS, MCRegister Reg) const override
Print the assembler register name.
static void printRegOperand(unsigned RegNo, raw_ostream &O, const MCRegisterInfo &MRI)
void printWaitFlag(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printInst(const MCInst *MI, uint64_t Address, StringRef Annot, const MCSubtargetInfo &STI, raw_ostream &O) override
Print the specified MCInst to the specified raw_ostream.
void printInstruction(const MCInst *MI, uint64_t Address, const MCSubtargetInfo &STI, raw_ostream &O)
void printOModSI(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printHigh(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
format_object< int64_t > formatHex(int64_t Value) const
const MCInstrInfo & MII
Definition: MCInstPrinter.h:51
format_object< int64_t > formatDec(int64_t Value) const
Utility functions to print decimal/hexadecimal values.
const MCRegisterInfo & MRI
Definition: MCInstPrinter.h:52
void printAnnotation(raw_ostream &OS, StringRef Annot)
Utility function for printing annotations.
const MCAsmInfo & MAI
Definition: MCInstPrinter.h:50
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
bool hasImplicitDefOfPhysReg(unsigned Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
Definition: MCInstrDesc.cpp:32
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
MCRegisterClass - Base class of TargetRegisterClass.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
const char * getRegClassName(const MCRegisterClass *Class) const
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const FeatureBitset & getFeatureBits() const
StringRef getCPU() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
StringRef getDfmtName(unsigned Id)
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
const char *const IdSymbolic[]
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
LLVM_READNONE bool isLegal64BitDPPControl(unsigned DC)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For FLAT segment the offset must be positive; MSB is ignored and forced to zero.
unsigned mc2PseudoReg(unsigned Reg)
Convert hardware register Reg to a pseudo register.
bool isGFX940(const MCSubtargetInfo &STI)
IsaVersion getIsaVersion(StringRef GPU)
unsigned getRegBitWidth(unsigned RCID)
Get the size in bits of a register from the register class RC.
bool isSI(const MCSubtargetInfo &STI)
bool getVOP3IsSingle(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
unsigned getVmcntBitMask(const IsaVersion &Version)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool isInlineValue(unsigned Reg)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_REG_IMM_INT64
Definition: SIDefines.h:158
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:165
@ OPERAND_REG_INLINE_C_V2INT32
Definition: SIDefines.h:179
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:176
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:166
@ OPERAND_REG_INLINE_AC_V2FP16
Definition: SIDefines.h:193
@ OPERAND_REG_IMM_INT32
Operands with register or 32-bit immediate.
Definition: SIDefines.h:157
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:162
@ OPERAND_REG_INLINE_C_INT64
Definition: SIDefines.h:173
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition: SIDefines.h:171
@ OPERAND_REG_INLINE_AC_INT16
Operands with an AccVGPR register or inline constant.
Definition: SIDefines.h:187
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:161
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:178
@ OPERAND_REG_INLINE_AC_V2INT16
Definition: SIDefines.h:192
@ OPERAND_REG_INLINE_AC_FP16
Definition: SIDefines.h:189
@ OPERAND_REG_INLINE_AC_INT32
Definition: SIDefines.h:188
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:190
@ OPERAND_REG_IMM_V2INT32
Definition: SIDefines.h:167
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:160
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:175
@ OPERAND_REG_INLINE_C_INT32
Definition: SIDefines.h:172
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:177
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:168
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:191
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:174
@ OPERAND_REG_IMM_INT16
Definition: SIDefines.h:159
@ OPERAND_REG_INLINE_C_V2FP32
Definition: SIDefines.h:180
@ OPERAND_REG_IMM_FP32_DEFERRED
Definition: SIDefines.h:164
@ OPERAND_REG_IMM_FP16_DEFERRED
Definition: SIDefines.h:163
bool isCI(const MCSubtargetInfo &STI)
bool getVOP2IsSingle(unsigned Opc)
bool isPermlane16(unsigned Opc)
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
@ OPERAND_UNKNOWN
Definition: MCInstrDesc.h:59
Reg
All possible values of the reg field in the ModR/M byte.
@ ReallyHidden
Definition: CommandLine.h:139
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:349
uint64_t DoubleToBits(double Double)
This function takes a double and returns the bit equivalent 64-bit integer.
Definition: MathExtras.h:411
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:293
uint32_t FloatToBits(float Float)
This function takes a float and returns the bit equivalent 32-bit integer.
Definition: MathExtras.h:419
@ Mod
The access may modify the value stored in memory.
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
Definition: MathExtras.h:527
@ Default
The result values are uniform if and only if all operands are uniform.
#define N
Instruction set architecture version.
Definition: TargetParser.h:113