LLVM 18.0.0git
AMDGPUInstPrinter.cpp
Go to the documentation of this file.
1//===-- AMDGPUInstPrinter.cpp - AMDGPU MC Inst -> ASM ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7// \file
8//===----------------------------------------------------------------------===//
9
10#include "AMDGPUInstPrinter.h"
12#include "SIDefines.h"
15#include "llvm/MC/MCExpr.h"
16#include "llvm/MC/MCInst.h"
17#include "llvm/MC/MCInstrDesc.h"
18#include "llvm/MC/MCInstrInfo.h"
23
24using namespace llvm;
25using namespace llvm::AMDGPU;
26
28 // FIXME: The current implementation of
29 // AsmParser::parseRegisterOrRegisterNumber in MC implies we either emit this
30 // as an integer or we provide a name which represents a physical register.
31 // For CFI instructions we really want to emit a name for the DWARF register
32 // instead, because there may be multiple DWARF registers corresponding to a
33 // single physical register. One case where this problem manifests is with
34 // wave32/wave64 where using the physical register name is ambiguous: if we
35 // write e.g. `.cfi_undefined v0` we lose information about the wavefront
36 // size which we need to encode the register in the final DWARF. Ideally we
37 // would extend MC to support parsing DWARF register names so we could do
38 // something like `.cfi_undefined dwarf_wave32_v0`. For now we just live with
39 // non-pretty DWARF register names in assembly text.
40 OS << Reg.id();
41}
42
44 StringRef Annot, const MCSubtargetInfo &STI,
45 raw_ostream &OS) {
46 OS.flush();
48 printAnnotation(OS, Annot);
49}
50
51void AMDGPUInstPrinter::printU4ImmOperand(const MCInst *MI, unsigned OpNo,
52 const MCSubtargetInfo &STI,
53 raw_ostream &O) {
54 O << formatHex(MI->getOperand(OpNo).getImm() & 0xf);
55}
56
57void AMDGPUInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo,
58 const MCSubtargetInfo &STI,
59 raw_ostream &O) {
60 // It's possible to end up with a 32-bit literal used with a 16-bit operand
61 // with ignored high bits. Print as 32-bit anyway in that case.
62 int64_t Imm = MI->getOperand(OpNo).getImm();
63 if (isInt<16>(Imm) || isUInt<16>(Imm))
64 O << formatHex(static_cast<uint64_t>(Imm & 0xffff));
65 else
66 printU32ImmOperand(MI, OpNo, STI, O);
67}
68
69void AMDGPUInstPrinter::printU4ImmDecOperand(const MCInst *MI, unsigned OpNo,
70 raw_ostream &O) {
71 O << formatDec(MI->getOperand(OpNo).getImm() & 0xf);
72}
73
74void AMDGPUInstPrinter::printU8ImmDecOperand(const MCInst *MI, unsigned OpNo,
75 raw_ostream &O) {
76 O << formatDec(MI->getOperand(OpNo).getImm() & 0xff);
77}
78
79void AMDGPUInstPrinter::printU16ImmDecOperand(const MCInst *MI, unsigned OpNo,
80 raw_ostream &O) {
81 O << formatDec(MI->getOperand(OpNo).getImm() & 0xffff);
82}
83
84void AMDGPUInstPrinter::printU32ImmOperand(const MCInst *MI, unsigned OpNo,
85 const MCSubtargetInfo &STI,
86 raw_ostream &O) {
87 O << formatHex(MI->getOperand(OpNo).getImm() & 0xffffffff);
88}
89
90void AMDGPUInstPrinter::printNamedBit(const MCInst *MI, unsigned OpNo,
91 raw_ostream &O, StringRef BitName) {
92 if (MI->getOperand(OpNo).getImm()) {
93 O << ' ' << BitName;
94 }
95}
96
97void AMDGPUInstPrinter::printOffset(const MCInst *MI, unsigned OpNo,
98 const MCSubtargetInfo &STI,
99 raw_ostream &O) {
100 uint16_t Imm = MI->getOperand(OpNo).getImm();
101 if (Imm != 0) {
102 O << " offset:";
103 printU16ImmDecOperand(MI, OpNo, O);
104 }
105}
106
107void AMDGPUInstPrinter::printFlatOffset(const MCInst *MI, unsigned OpNo,
108 const MCSubtargetInfo &STI,
109 raw_ostream &O) {
110 uint16_t Imm = MI->getOperand(OpNo).getImm();
111 if (Imm != 0) {
112 O << " offset:";
113
114 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
115 bool IsFlatSeg = !(Desc.TSFlags &
117
118 if (IsFlatSeg) // Unsigned offset
119 printU16ImmDecOperand(MI, OpNo, O);
120 else // Signed offset
122 }
123}
124
125void AMDGPUInstPrinter::printOffset0(const MCInst *MI, unsigned OpNo,
126 const MCSubtargetInfo &STI,
127 raw_ostream &O) {
128 if (MI->getOperand(OpNo).getImm()) {
129 O << " offset0:";
130 printU8ImmDecOperand(MI, OpNo, O);
131 }
132}
133
134void AMDGPUInstPrinter::printOffset1(const MCInst *MI, unsigned OpNo,
135 const MCSubtargetInfo &STI,
136 raw_ostream &O) {
137 if (MI->getOperand(OpNo).getImm()) {
138 O << " offset1:";
139 printU8ImmDecOperand(MI, OpNo, O);
140 }
141}
142
143void AMDGPUInstPrinter::printSMRDOffset8(const MCInst *MI, unsigned OpNo,
144 const MCSubtargetInfo &STI,
145 raw_ostream &O) {
146 printU32ImmOperand(MI, OpNo, STI, O);
147}
148
149void AMDGPUInstPrinter::printSMEMOffset(const MCInst *MI, unsigned OpNo,
150 const MCSubtargetInfo &STI,
151 raw_ostream &O) {
152 O << formatHex(MI->getOperand(OpNo).getImm());
153}
154
155void AMDGPUInstPrinter::printSMEMOffsetMod(const MCInst *MI, unsigned OpNo,
156 const MCSubtargetInfo &STI,
157 raw_ostream &O) {
158 O << " offset:";
159 printSMEMOffset(MI, OpNo, STI, O);
160}
161
162void AMDGPUInstPrinter::printSMRDLiteralOffset(const MCInst *MI, unsigned OpNo,
163 const MCSubtargetInfo &STI,
164 raw_ostream &O) {
165 printU32ImmOperand(MI, OpNo, STI, O);
166}
167
168void AMDGPUInstPrinter::printCPol(const MCInst *MI, unsigned OpNo,
169 const MCSubtargetInfo &STI, raw_ostream &O) {
170 auto Imm = MI->getOperand(OpNo).getImm();
171 if (Imm & CPol::GLC)
172 O << ((AMDGPU::isGFX940(STI) &&
173 !(MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::SMRD)) ? " sc0"
174 : " glc");
175 if (Imm & CPol::SLC)
176 O << (AMDGPU::isGFX940(STI) ? " nt" : " slc");
177 if ((Imm & CPol::DLC) && AMDGPU::isGFX10Plus(STI))
178 O << " dlc";
179 if ((Imm & CPol::SCC) && AMDGPU::isGFX90A(STI))
180 O << (AMDGPU::isGFX940(STI) ? " sc1" : " scc");
181 if (Imm & ~CPol::ALL)
182 O << " /* unexpected cache policy bit */";
183}
184
185void AMDGPUInstPrinter::printDMask(const MCInst *MI, unsigned OpNo,
186 const MCSubtargetInfo &STI, raw_ostream &O) {
187 if (MI->getOperand(OpNo).getImm()) {
188 O << " dmask:";
189 printU16ImmOperand(MI, OpNo, STI, O);
190 }
191}
192
193void AMDGPUInstPrinter::printDim(const MCInst *MI, unsigned OpNo,
194 const MCSubtargetInfo &STI, raw_ostream &O) {
195 unsigned Dim = MI->getOperand(OpNo).getImm();
196 O << " dim:SQ_RSRC_IMG_";
197
199 if (DimInfo)
200 O << DimInfo->AsmSuffix;
201 else
202 O << Dim;
203}
204
205void AMDGPUInstPrinter::printR128A16(const MCInst *MI, unsigned OpNo,
206 const MCSubtargetInfo &STI, raw_ostream &O) {
207 if (STI.hasFeature(AMDGPU::FeatureR128A16))
208 printNamedBit(MI, OpNo, O, "a16");
209 else
210 printNamedBit(MI, OpNo, O, "r128");
211}
212
213void AMDGPUInstPrinter::printFORMAT(const MCInst *MI, unsigned OpNo,
214 const MCSubtargetInfo &STI,
215 raw_ostream &O) {
216}
217
218void AMDGPUInstPrinter::printSymbolicFormat(const MCInst *MI,
219 const MCSubtargetInfo &STI,
220 raw_ostream &O) {
221 using namespace llvm::AMDGPU::MTBUFFormat;
222
223 int OpNo =
224 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::format);
225 assert(OpNo != -1);
226
227 unsigned Val = MI->getOperand(OpNo).getImm();
228 if (AMDGPU::isGFX10Plus(STI)) {
229 if (Val == UFMT_DEFAULT)
230 return;
231 if (isValidUnifiedFormat(Val, STI)) {
232 O << " format:[" << getUnifiedFormatName(Val, STI) << ']';
233 } else {
234 O << " format:" << Val;
235 }
236 } else {
237 if (Val == DFMT_NFMT_DEFAULT)
238 return;
239 if (isValidDfmtNfmt(Val, STI)) {
240 unsigned Dfmt;
241 unsigned Nfmt;
242 decodeDfmtNfmt(Val, Dfmt, Nfmt);
243 O << " format:[";
244 if (Dfmt != DFMT_DEFAULT) {
245 O << getDfmtName(Dfmt);
246 if (Nfmt != NFMT_DEFAULT) {
247 O << ',';
248 }
249 }
250 if (Nfmt != NFMT_DEFAULT) {
251 O << getNfmtName(Nfmt, STI);
252 }
253 O << ']';
254 } else {
255 O << " format:" << Val;
256 }
257 }
258}
259
261 const MCRegisterInfo &MRI) {
262#if !defined(NDEBUG)
263 switch (RegNo) {
264 case AMDGPU::FP_REG:
265 case AMDGPU::SP_REG:
266 case AMDGPU::PRIVATE_RSRC_REG:
267 llvm_unreachable("pseudo-register should not ever be emitted");
268 case AMDGPU::SCC:
269 llvm_unreachable("pseudo scc should not ever be emitted");
270 default:
271 break;
272 }
273#endif
274
275 O << getRegisterName(RegNo);
276}
277
278void AMDGPUInstPrinter::printVOPDst(const MCInst *MI, unsigned OpNo,
279 const MCSubtargetInfo &STI, raw_ostream &O) {
280 auto Opcode = MI->getOpcode();
281 auto Flags = MII.get(Opcode).TSFlags;
282 if (OpNo == 0) {
283 if (Flags & SIInstrFlags::VOP3 && Flags & SIInstrFlags::DPP)
284 O << "_e64_dpp";
285 else if (Flags & SIInstrFlags::VOP3) {
286 if (!getVOP3IsSingle(Opcode))
287 O << "_e64";
288 } else if (Flags & SIInstrFlags::DPP)
289 O << "_dpp";
290 else if (Flags & SIInstrFlags::SDWA)
291 O << "_sdwa";
292 else if (((Flags & SIInstrFlags::VOP1) && !getVOP1IsSingle(Opcode)) ||
293 ((Flags & SIInstrFlags::VOP2) && !getVOP2IsSingle(Opcode)))
294 O << "_e32";
295 O << " ";
296 }
297
298 printRegularOperand(MI, OpNo, STI, O);
299
300 // Print default vcc/vcc_lo operand.
301 switch (Opcode) {
302 default: break;
303
304 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
305 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
306 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
307 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
308 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
309 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
310 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
311 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
312 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
313 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
314 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
315 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
316 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
317 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
318 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
319 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
320 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
321 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
322 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
323 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
324 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
325 printDefaultVccOperand(false, STI, O);
326 break;
327 }
328}
329
330void AMDGPUInstPrinter::printVINTRPDst(const MCInst *MI, unsigned OpNo,
331 const MCSubtargetInfo &STI, raw_ostream &O) {
332 if (AMDGPU::isSI(STI) || AMDGPU::isCI(STI))
333 O << " ";
334 else
335 O << "_e32 ";
336
337 printRegularOperand(MI, OpNo, STI, O);
338}
339
340void AMDGPUInstPrinter::printImmediateInt16(uint32_t Imm,
341 const MCSubtargetInfo &STI,
342 raw_ostream &O) {
343 int16_t SImm = static_cast<int16_t>(Imm);
344 if (isInlinableIntLiteral(SImm)) {
345 O << SImm;
346 } else {
347 uint64_t Imm16 = static_cast<uint16_t>(Imm);
348 O << formatHex(Imm16);
349 }
350}
351
352void AMDGPUInstPrinter::printImmediate16(uint32_t Imm,
353 const MCSubtargetInfo &STI,
354 raw_ostream &O) {
355 int16_t SImm = static_cast<int16_t>(Imm);
356 if (isInlinableIntLiteral(SImm)) {
357 O << SImm;
358 return;
359 }
360
361 if (Imm == 0x3C00)
362 O<< "1.0";
363 else if (Imm == 0xBC00)
364 O<< "-1.0";
365 else if (Imm == 0x3800)
366 O<< "0.5";
367 else if (Imm == 0xB800)
368 O<< "-0.5";
369 else if (Imm == 0x4000)
370 O<< "2.0";
371 else if (Imm == 0xC000)
372 O<< "-2.0";
373 else if (Imm == 0x4400)
374 O<< "4.0";
375 else if (Imm == 0xC400)
376 O<< "-4.0";
377 else if (Imm == 0x3118 &&
378 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) {
379 O << "0.15915494";
380 } else {
381 uint64_t Imm16 = static_cast<uint16_t>(Imm);
382 O << formatHex(Imm16);
383 }
384}
385
386void AMDGPUInstPrinter::printImmediateV216(uint32_t Imm,
387 const MCSubtargetInfo &STI,
388 raw_ostream &O) {
389 uint16_t Lo16 = static_cast<uint16_t>(Imm);
390 printImmediate16(Lo16, STI, O);
391}
392
393void AMDGPUInstPrinter::printImmediate32(uint32_t Imm,
394 const MCSubtargetInfo &STI,
395 raw_ostream &O) {
396 int32_t SImm = static_cast<int32_t>(Imm);
397 if (SImm >= -16 && SImm <= 64) {
398 O << SImm;
399 return;
400 }
401
402 if (Imm == llvm::bit_cast<uint32_t>(0.0f))
403 O << "0.0";
404 else if (Imm == llvm::bit_cast<uint32_t>(1.0f))
405 O << "1.0";
406 else if (Imm == llvm::bit_cast<uint32_t>(-1.0f))
407 O << "-1.0";
408 else if (Imm == llvm::bit_cast<uint32_t>(0.5f))
409 O << "0.5";
410 else if (Imm == llvm::bit_cast<uint32_t>(-0.5f))
411 O << "-0.5";
412 else if (Imm == llvm::bit_cast<uint32_t>(2.0f))
413 O << "2.0";
414 else if (Imm == llvm::bit_cast<uint32_t>(-2.0f))
415 O << "-2.0";
416 else if (Imm == llvm::bit_cast<uint32_t>(4.0f))
417 O << "4.0";
418 else if (Imm == llvm::bit_cast<uint32_t>(-4.0f))
419 O << "-4.0";
420 else if (Imm == 0x3e22f983 &&
421 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
422 O << "0.15915494";
423 else
424 O << formatHex(static_cast<uint64_t>(Imm));
425}
426
427void AMDGPUInstPrinter::printImmediate64(uint64_t Imm,
428 const MCSubtargetInfo &STI,
429 raw_ostream &O) {
430 int64_t SImm = static_cast<int64_t>(Imm);
431 if (SImm >= -16 && SImm <= 64) {
432 O << SImm;
433 return;
434 }
435
436 if (Imm == llvm::bit_cast<uint64_t>(0.0))
437 O << "0.0";
438 else if (Imm == llvm::bit_cast<uint64_t>(1.0))
439 O << "1.0";
440 else if (Imm == llvm::bit_cast<uint64_t>(-1.0))
441 O << "-1.0";
442 else if (Imm == llvm::bit_cast<uint64_t>(0.5))
443 O << "0.5";
444 else if (Imm == llvm::bit_cast<uint64_t>(-0.5))
445 O << "-0.5";
446 else if (Imm == llvm::bit_cast<uint64_t>(2.0))
447 O << "2.0";
448 else if (Imm == llvm::bit_cast<uint64_t>(-2.0))
449 O << "-2.0";
450 else if (Imm == llvm::bit_cast<uint64_t>(4.0))
451 O << "4.0";
452 else if (Imm == llvm::bit_cast<uint64_t>(-4.0))
453 O << "-4.0";
454 else if (Imm == 0x3fc45f306dc9c882 &&
455 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
456 O << "0.15915494309189532";
457 else {
458 assert(isUInt<32>(Imm) || isInt<32>(Imm));
459
460 // In rare situations, we will have a 32-bit literal in a 64-bit
461 // operand. This is technically allowed for the encoding of s_mov_b64.
462 O << formatHex(static_cast<uint64_t>(Imm));
463 }
464}
465
466void AMDGPUInstPrinter::printBLGP(const MCInst *MI, unsigned OpNo,
467 const MCSubtargetInfo &STI,
468 raw_ostream &O) {
469 unsigned Imm = MI->getOperand(OpNo).getImm();
470 if (!Imm)
471 return;
472
473 if (AMDGPU::isGFX940(STI)) {
474 switch (MI->getOpcode()) {
475 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_acd:
476 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_vcd:
477 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_acd:
478 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_vcd:
479 O << " neg:[" << (Imm & 1) << ',' << ((Imm >> 1) & 1) << ','
480 << ((Imm >> 2) & 1) << ']';
481 return;
482 }
483 }
484
485 O << " blgp:" << Imm;
486}
487
488void AMDGPUInstPrinter::printCBSZ(const MCInst *MI, unsigned OpNo,
489 const MCSubtargetInfo &STI,
490 raw_ostream &O) {
491 unsigned Imm = MI->getOperand(OpNo).getImm();
492 if (!Imm)
493 return;
494
495 O << " cbsz:" << Imm;
496}
497
498void AMDGPUInstPrinter::printABID(const MCInst *MI, unsigned OpNo,
499 const MCSubtargetInfo &STI,
500 raw_ostream &O) {
501 unsigned Imm = MI->getOperand(OpNo).getImm();
502 if (!Imm)
503 return;
504
505 O << " abid:" << Imm;
506}
507
508void AMDGPUInstPrinter::printDefaultVccOperand(bool FirstOperand,
509 const MCSubtargetInfo &STI,
510 raw_ostream &O) {
511 if (!FirstOperand)
512 O << ", ";
513 printRegOperand(STI.hasFeature(AMDGPU::FeatureWavefrontSize64)
514 ? AMDGPU::VCC
515 : AMDGPU::VCC_LO,
516 O, MRI);
517 if (FirstOperand)
518 O << ", ";
519}
520
521void AMDGPUInstPrinter::printWaitVDST(const MCInst *MI, unsigned OpNo,
522 const MCSubtargetInfo &STI,
523 raw_ostream &O) {
524 O << " wait_vdst:";
525 printU4ImmDecOperand(MI, OpNo, O);
526}
527
528void AMDGPUInstPrinter::printWaitEXP(const MCInst *MI, unsigned OpNo,
529 const MCSubtargetInfo &STI,
530 raw_ostream &O) {
531 O << " wait_exp:";
532 printU4ImmDecOperand(MI, OpNo, O);
533}
534
535bool AMDGPUInstPrinter::needsImpliedVcc(const MCInstrDesc &Desc,
536 unsigned OpNo) const {
537 return OpNo == 0 && (Desc.TSFlags & SIInstrFlags::DPP) &&
538 (Desc.TSFlags & SIInstrFlags::VOPC) &&
539 (Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
540 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO));
541}
542
543// Print default vcc/vcc_lo operand of VOPC.
544void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
545 const MCSubtargetInfo &STI,
546 raw_ostream &O) {
547 unsigned Opc = MI->getOpcode();
548 const MCInstrDesc &Desc = MII.get(Opc);
549 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
550 // 0, 1 and 2 are the first printed operands in different cases
551 // If there are printed modifiers, printOperandAndFPInputMods or
552 // printOperandAndIntInputMods will be called instead
553 if ((OpNo == 0 ||
554 (OpNo == 1 && (Desc.TSFlags & SIInstrFlags::DPP) && ModIdx != -1)) &&
555 (Desc.TSFlags & SIInstrFlags::VOPC) &&
556 (Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
557 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO)))
558 printDefaultVccOperand(true, STI, O);
559
560 printRegularOperand(MI, OpNo, STI, O);
561}
562
563// Print operands after vcc or modifier handling.
564void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
565 const MCSubtargetInfo &STI,
566 raw_ostream &O) {
567 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
568
569 if (OpNo >= MI->getNumOperands()) {
570 O << "/*Missing OP" << OpNo << "*/";
571 return;
572 }
573
574 const MCOperand &Op = MI->getOperand(OpNo);
575 if (Op.isReg()) {
576 printRegOperand(Op.getReg(), O, MRI);
577
578 // Check if operand register class contains register used.
579 // Intention: print disassembler message when invalid code is decoded,
580 // for example sgpr register used in VReg or VISrc(VReg or imm) operand.
581 int RCID = Desc.operands()[OpNo].RegClass;
582 if (RCID != -1) {
583 const MCRegisterClass RC = MRI.getRegClass(RCID);
584 auto Reg = mc2PseudoReg(Op.getReg());
585 if (!RC.contains(Reg) && !isInlineValue(Reg)) {
586 O << "/*Invalid register, operand has \'" << MRI.getRegClassName(&RC)
587 << "\' register class*/";
588 }
589 }
590 } else if (Op.isImm()) {
591 const uint8_t OpTy = Desc.operands()[OpNo].OperandType;
592 switch (OpTy) {
605 printImmediate32(Op.getImm(), STI, O);
606 break;
612 printImmediate64(Op.getImm(), STI, O);
613 break;
617 printImmediateInt16(Op.getImm(), STI, O);
618 break;
623 printImmediate16(Op.getImm(), STI, O);
624 break;
627 if (!isUInt<16>(Op.getImm()) &&
628 STI.hasFeature(AMDGPU::FeatureVOP3Literal)) {
629 printImmediate32(Op.getImm(), STI, O);
630 break;
631 }
632
633 // Deal with 16-bit FP inline immediates not working.
634 if (OpTy == AMDGPU::OPERAND_REG_IMM_V2FP16) {
635 printImmediate16(static_cast<uint16_t>(Op.getImm()), STI, O);
636 break;
637 }
638 [[fallthrough]];
641 printImmediateInt16(static_cast<uint16_t>(Op.getImm()), STI, O);
642 break;
645 printImmediateV216(Op.getImm(), STI, O);
646 break;
649 O << formatDec(Op.getImm());
650 break;
652 // Disassembler does not fail when operand should not allow immediate
653 // operands but decodes them into 32bit immediate operand.
654 printImmediate32(Op.getImm(), STI, O);
655 O << "/*Invalid immediate*/";
656 break;
657 default:
658 // We hit this for the immediate instruction bits that don't yet have a
659 // custom printer.
660 llvm_unreachable("unexpected immediate operand type");
661 }
662 } else if (Op.isDFPImm()) {
663 double Value = bit_cast<double>(Op.getDFPImm());
664 // We special case 0.0 because otherwise it will be printed as an integer.
665 if (Value == 0.0)
666 O << "0.0";
667 else {
668 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
669 int RCID = Desc.operands()[OpNo].RegClass;
670 unsigned RCBits = AMDGPU::getRegBitWidth(MRI.getRegClass(RCID));
671 if (RCBits == 32)
672 printImmediate32(llvm::bit_cast<uint32_t>((float)Value), STI, O);
673 else if (RCBits == 64)
674 printImmediate64(llvm::bit_cast<uint64_t>(Value), STI, O);
675 else
676 llvm_unreachable("Invalid register class size");
677 }
678 } else if (Op.isExpr()) {
679 const MCExpr *Exp = Op.getExpr();
680 Exp->print(O, &MAI);
681 } else {
682 O << "/*INV_OP*/";
683 }
684
685 // Print default vcc/vcc_lo operand of v_cndmask_b32_e32.
686 switch (MI->getOpcode()) {
687 default: break;
688
689 case AMDGPU::V_CNDMASK_B32_e32_gfx10:
690 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
691 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
692 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
693 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
694 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
695 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
696 case AMDGPU::V_CNDMASK_B32_dpp8_gfx10:
697 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
698 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
699 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
700 case AMDGPU::V_CNDMASK_B32_e32_gfx11:
701 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
702 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
703 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
704 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
705 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
706 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
707 case AMDGPU::V_CNDMASK_B32_dpp8_gfx11:
708 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
709 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
710 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
711
712 case AMDGPU::V_CNDMASK_B32_e32_gfx6_gfx7:
713 case AMDGPU::V_CNDMASK_B32_e32_vi:
714 if ((int)OpNo == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
715 AMDGPU::OpName::src1))
716 printDefaultVccOperand(OpNo == 0, STI, O);
717 break;
718 }
719
720 if (Desc.TSFlags & SIInstrFlags::MTBUF) {
721 int SOffsetIdx =
722 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::soffset);
723 assert(SOffsetIdx != -1);
724 if ((int)OpNo == SOffsetIdx)
725 printSymbolicFormat(MI, STI, O);
726 }
727}
728
729void AMDGPUInstPrinter::printOperandAndFPInputMods(const MCInst *MI,
730 unsigned OpNo,
731 const MCSubtargetInfo &STI,
732 raw_ostream &O) {
733 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
734 if (needsImpliedVcc(Desc, OpNo))
735 printDefaultVccOperand(true, STI, O);
736
737 unsigned InputModifiers = MI->getOperand(OpNo).getImm();
738
739 // Use 'neg(...)' instead of '-' to avoid ambiguity.
740 // This is important for integer literals because
741 // -1 is not the same value as neg(1).
742 bool NegMnemo = false;
743
744 if (InputModifiers & SISrcMods::NEG) {
745 if (OpNo + 1 < MI->getNumOperands() &&
746 (InputModifiers & SISrcMods::ABS) == 0) {
747 const MCOperand &Op = MI->getOperand(OpNo + 1);
748 NegMnemo = Op.isImm() || Op.isDFPImm();
749 }
750 if (NegMnemo) {
751 O << "neg(";
752 } else {
753 O << '-';
754 }
755 }
756
757 if (InputModifiers & SISrcMods::ABS)
758 O << '|';
759 printRegularOperand(MI, OpNo + 1, STI, O);
760 if (InputModifiers & SISrcMods::ABS)
761 O << '|';
762
763 if (NegMnemo) {
764 O << ')';
765 }
766
767 // Print default vcc/vcc_lo operand of VOP2b.
768 switch (MI->getOpcode()) {
769 default:
770 break;
771
772 case AMDGPU::V_CNDMASK_B32_sdwa_gfx10:
773 case AMDGPU::V_CNDMASK_B32_dpp_gfx10:
774 case AMDGPU::V_CNDMASK_B32_dpp_gfx11:
775 if ((int)OpNo + 1 ==
776 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::src1))
777 printDefaultVccOperand(OpNo == 0, STI, O);
778 break;
779 }
780}
781
782void AMDGPUInstPrinter::printOperandAndIntInputMods(const MCInst *MI,
783 unsigned OpNo,
784 const MCSubtargetInfo &STI,
785 raw_ostream &O) {
786 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
787 if (needsImpliedVcc(Desc, OpNo))
788 printDefaultVccOperand(true, STI, O);
789
790 unsigned InputModifiers = MI->getOperand(OpNo).getImm();
791 if (InputModifiers & SISrcMods::SEXT)
792 O << "sext(";
793 printRegularOperand(MI, OpNo + 1, STI, O);
794 if (InputModifiers & SISrcMods::SEXT)
795 O << ')';
796
797 // Print default vcc/vcc_lo operand of VOP2b.
798 switch (MI->getOpcode()) {
799 default: break;
800
801 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
802 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
803 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
804 if ((int)OpNo + 1 == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
805 AMDGPU::OpName::src1))
806 printDefaultVccOperand(OpNo == 0, STI, O);
807 break;
808 }
809}
810
811void AMDGPUInstPrinter::printDPP8(const MCInst *MI, unsigned OpNo,
812 const MCSubtargetInfo &STI,
813 raw_ostream &O) {
814 if (!AMDGPU::isGFX10Plus(STI))
815 llvm_unreachable("dpp8 is not supported on ASICs earlier than GFX10");
816
817 unsigned Imm = MI->getOperand(OpNo).getImm();
818 O << "dpp8:[" << formatDec(Imm & 0x7);
819 for (size_t i = 1; i < 8; ++i) {
820 O << ',' << formatDec((Imm >> (3 * i)) & 0x7);
821 }
822 O << ']';
823}
824
825void AMDGPUInstPrinter::printDPPCtrl(const MCInst *MI, unsigned OpNo,
826 const MCSubtargetInfo &STI,
827 raw_ostream &O) {
828 using namespace AMDGPU::DPP;
829
830 unsigned Imm = MI->getOperand(OpNo).getImm();
831 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
832
834 O << " /* DP ALU dpp only supports row_newbcast */";
835 return;
836 } else if (Imm <= DppCtrl::QUAD_PERM_LAST) {
837 O << "quad_perm:[";
838 O << formatDec(Imm & 0x3) << ',';
839 O << formatDec((Imm & 0xc) >> 2) << ',';
840 O << formatDec((Imm & 0x30) >> 4) << ',';
841 O << formatDec((Imm & 0xc0) >> 6) << ']';
842 } else if ((Imm >= DppCtrl::ROW_SHL_FIRST) &&
843 (Imm <= DppCtrl::ROW_SHL_LAST)) {
844 O << "row_shl:";
845 printU4ImmDecOperand(MI, OpNo, O);
846 } else if ((Imm >= DppCtrl::ROW_SHR_FIRST) &&
847 (Imm <= DppCtrl::ROW_SHR_LAST)) {
848 O << "row_shr:";
849 printU4ImmDecOperand(MI, OpNo, O);
850 } else if ((Imm >= DppCtrl::ROW_ROR_FIRST) &&
851 (Imm <= DppCtrl::ROW_ROR_LAST)) {
852 O << "row_ror:";
853 printU4ImmDecOperand(MI, OpNo, O);
854 } else if (Imm == DppCtrl::WAVE_SHL1) {
855 if (AMDGPU::isGFX10Plus(STI)) {
856 O << "/* wave_shl is not supported starting from GFX10 */";
857 return;
858 }
859 O << "wave_shl:1";
860 } else if (Imm == DppCtrl::WAVE_ROL1) {
861 if (AMDGPU::isGFX10Plus(STI)) {
862 O << "/* wave_rol is not supported starting from GFX10 */";
863 return;
864 }
865 O << "wave_rol:1";
866 } else if (Imm == DppCtrl::WAVE_SHR1) {
867 if (AMDGPU::isGFX10Plus(STI)) {
868 O << "/* wave_shr is not supported starting from GFX10 */";
869 return;
870 }
871 O << "wave_shr:1";
872 } else if (Imm == DppCtrl::WAVE_ROR1) {
873 if (AMDGPU::isGFX10Plus(STI)) {
874 O << "/* wave_ror is not supported starting from GFX10 */";
875 return;
876 }
877 O << "wave_ror:1";
878 } else if (Imm == DppCtrl::ROW_MIRROR) {
879 O << "row_mirror";
880 } else if (Imm == DppCtrl::ROW_HALF_MIRROR) {
881 O << "row_half_mirror";
882 } else if (Imm == DppCtrl::BCAST15) {
883 if (AMDGPU::isGFX10Plus(STI)) {
884 O << "/* row_bcast is not supported starting from GFX10 */";
885 return;
886 }
887 O << "row_bcast:15";
888 } else if (Imm == DppCtrl::BCAST31) {
889 if (AMDGPU::isGFX10Plus(STI)) {
890 O << "/* row_bcast is not supported starting from GFX10 */";
891 return;
892 }
893 O << "row_bcast:31";
894 } else if ((Imm >= DppCtrl::ROW_SHARE_FIRST) &&
895 (Imm <= DppCtrl::ROW_SHARE_LAST)) {
896 if (AMDGPU::isGFX90A(STI)) {
897 O << "row_newbcast:";
898 } else if (AMDGPU::isGFX10Plus(STI)) {
899 O << "row_share:";
900 } else {
901 O << " /* row_newbcast/row_share is not supported on ASICs earlier "
902 "than GFX90A/GFX10 */";
903 return;
904 }
905 printU4ImmDecOperand(MI, OpNo, O);
906 } else if ((Imm >= DppCtrl::ROW_XMASK_FIRST) &&
907 (Imm <= DppCtrl::ROW_XMASK_LAST)) {
908 if (!AMDGPU::isGFX10Plus(STI)) {
909 O << "/* row_xmask is not supported on ASICs earlier than GFX10 */";
910 return;
911 }
912 O << "row_xmask:";
913 printU4ImmDecOperand(MI, OpNo, O);
914 } else {
915 O << "/* Invalid dpp_ctrl value */";
916 }
917}
918
919void AMDGPUInstPrinter::printDppRowMask(const MCInst *MI, unsigned OpNo,
920 const MCSubtargetInfo &STI,
921 raw_ostream &O) {
922 O << " row_mask:";
923 printU4ImmOperand(MI, OpNo, STI, O);
924}
925
926void AMDGPUInstPrinter::printDppBankMask(const MCInst *MI, unsigned OpNo,
927 const MCSubtargetInfo &STI,
928 raw_ostream &O) {
929 O << " bank_mask:";
930 printU4ImmOperand(MI, OpNo, STI, O);
931}
932
933void AMDGPUInstPrinter::printDppBoundCtrl(const MCInst *MI, unsigned OpNo,
934 const MCSubtargetInfo &STI,
935 raw_ostream &O) {
936 unsigned Imm = MI->getOperand(OpNo).getImm();
937 if (Imm) {
938 O << " bound_ctrl:1";
939 }
940}
941
942void AMDGPUInstPrinter::printDppFI(const MCInst *MI, unsigned OpNo,
943 const MCSubtargetInfo &STI, raw_ostream &O) {
944 using namespace llvm::AMDGPU::DPP;
945 unsigned Imm = MI->getOperand(OpNo).getImm();
946 if (Imm == DPP_FI_1 || Imm == DPP8_FI_1) {
947 O << " fi:1";
948 }
949}
950
951void AMDGPUInstPrinter::printSDWASel(const MCInst *MI, unsigned OpNo,
952 raw_ostream &O) {
953 using namespace llvm::AMDGPU::SDWA;
954
955 unsigned Imm = MI->getOperand(OpNo).getImm();
956 switch (Imm) {
957 case SdwaSel::BYTE_0: O << "BYTE_0"; break;
958 case SdwaSel::BYTE_1: O << "BYTE_1"; break;
959 case SdwaSel::BYTE_2: O << "BYTE_2"; break;
960 case SdwaSel::BYTE_3: O << "BYTE_3"; break;
961 case SdwaSel::WORD_0: O << "WORD_0"; break;
962 case SdwaSel::WORD_1: O << "WORD_1"; break;
963 case SdwaSel::DWORD: O << "DWORD"; break;
964 default: llvm_unreachable("Invalid SDWA data select operand");
965 }
966}
967
968void AMDGPUInstPrinter::printSDWADstSel(const MCInst *MI, unsigned OpNo,
969 const MCSubtargetInfo &STI,
970 raw_ostream &O) {
971 O << "dst_sel:";
972 printSDWASel(MI, OpNo, O);
973}
974
975void AMDGPUInstPrinter::printSDWASrc0Sel(const MCInst *MI, unsigned OpNo,
976 const MCSubtargetInfo &STI,
977 raw_ostream &O) {
978 O << "src0_sel:";
979 printSDWASel(MI, OpNo, O);
980}
981
982void AMDGPUInstPrinter::printSDWASrc1Sel(const MCInst *MI, unsigned OpNo,
983 const MCSubtargetInfo &STI,
984 raw_ostream &O) {
985 O << "src1_sel:";
986 printSDWASel(MI, OpNo, O);
987}
988
989void AMDGPUInstPrinter::printSDWADstUnused(const MCInst *MI, unsigned OpNo,
990 const MCSubtargetInfo &STI,
991 raw_ostream &O) {
992 using namespace llvm::AMDGPU::SDWA;
993
994 O << "dst_unused:";
995 unsigned Imm = MI->getOperand(OpNo).getImm();
996 switch (Imm) {
997 case DstUnused::UNUSED_PAD: O << "UNUSED_PAD"; break;
998 case DstUnused::UNUSED_SEXT: O << "UNUSED_SEXT"; break;
999 case DstUnused::UNUSED_PRESERVE: O << "UNUSED_PRESERVE"; break;
1000 default: llvm_unreachable("Invalid SDWA dest_unused operand");
1001 }
1002}
1003
1004void AMDGPUInstPrinter::printExpSrcN(const MCInst *MI, unsigned OpNo,
1005 const MCSubtargetInfo &STI, raw_ostream &O,
1006 unsigned N) {
1007 unsigned Opc = MI->getOpcode();
1008 int EnIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::en);
1009 unsigned En = MI->getOperand(EnIdx).getImm();
1010
1011 int ComprIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::compr);
1012
1013 // If compr is set, print as src0, src0, src1, src1
1014 if (MI->getOperand(ComprIdx).getImm())
1015 OpNo = OpNo - N + N / 2;
1016
1017 if (En & (1 << N))
1018 printRegOperand(MI->getOperand(OpNo).getReg(), O, MRI);
1019 else
1020 O << "off";
1021}
1022
1023void AMDGPUInstPrinter::printExpSrc0(const MCInst *MI, unsigned OpNo,
1024 const MCSubtargetInfo &STI,
1025 raw_ostream &O) {
1026 printExpSrcN(MI, OpNo, STI, O, 0);
1027}
1028
1029void AMDGPUInstPrinter::printExpSrc1(const MCInst *MI, unsigned OpNo,
1030 const MCSubtargetInfo &STI,
1031 raw_ostream &O) {
1032 printExpSrcN(MI, OpNo, STI, O, 1);
1033}
1034
1035void AMDGPUInstPrinter::printExpSrc2(const MCInst *MI, unsigned OpNo,
1036 const MCSubtargetInfo &STI,
1037 raw_ostream &O) {
1038 printExpSrcN(MI, OpNo, STI, O, 2);
1039}
1040
1041void AMDGPUInstPrinter::printExpSrc3(const MCInst *MI, unsigned OpNo,
1042 const MCSubtargetInfo &STI,
1043 raw_ostream &O) {
1044 printExpSrcN(MI, OpNo, STI, O, 3);
1045}
1046
1047void AMDGPUInstPrinter::printExpTgt(const MCInst *MI, unsigned OpNo,
1048 const MCSubtargetInfo &STI,
1049 raw_ostream &O) {
1050 using namespace llvm::AMDGPU::Exp;
1051
1052 // This is really a 6 bit field.
1053 unsigned Id = MI->getOperand(OpNo).getImm() & ((1 << 6) - 1);
1054
1055 int Index;
1056 StringRef TgtName;
1057 if (getTgtName(Id, TgtName, Index) && isSupportedTgtId(Id, STI)) {
1058 O << ' ' << TgtName;
1059 if (Index >= 0)
1060 O << Index;
1061 } else {
1062 O << " invalid_target_" << Id;
1063 }
1064}
1065
1066static bool allOpsDefaultValue(const int* Ops, int NumOps, int Mod,
1067 bool IsPacked, bool HasDstSel) {
1068 int DefaultValue = IsPacked && (Mod == SISrcMods::OP_SEL_1);
1069
1070 for (int I = 0; I < NumOps; ++I) {
1071 if (!!(Ops[I] & Mod) != DefaultValue)
1072 return false;
1073 }
1074
1075 if (HasDstSel && (Ops[0] & SISrcMods::DST_OP_SEL) != 0)
1076 return false;
1077
1078 return true;
1079}
1080
1081void AMDGPUInstPrinter::printPackedModifier(const MCInst *MI,
1083 unsigned Mod,
1084 raw_ostream &O) {
1085 unsigned Opc = MI->getOpcode();
1086 int NumOps = 0;
1087 int Ops[3];
1088
1089 for (int OpName : { AMDGPU::OpName::src0_modifiers,
1090 AMDGPU::OpName::src1_modifiers,
1091 AMDGPU::OpName::src2_modifiers }) {
1093 if (Idx == -1)
1094 break;
1095
1096 Ops[NumOps++] = MI->getOperand(Idx).getImm();
1097 }
1098
1099 const bool HasDstSel =
1100 NumOps > 0 &&
1102 MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::VOP3_OPSEL;
1103
1104 const bool IsPacked =
1105 MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::IsPacked;
1106
1107 if (allOpsDefaultValue(Ops, NumOps, Mod, IsPacked, HasDstSel))
1108 return;
1109
1110 O << Name;
1111 for (int I = 0; I < NumOps; ++I) {
1112 if (I != 0)
1113 O << ',';
1114
1115 O << !!(Ops[I] & Mod);
1116 }
1117
1118 if (HasDstSel) {
1119 O << ',' << !!(Ops[0] & SISrcMods::DST_OP_SEL);
1120 }
1121
1122 O << ']';
1123}
1124
1125void AMDGPUInstPrinter::printOpSel(const MCInst *MI, unsigned,
1126 const MCSubtargetInfo &STI,
1127 raw_ostream &O) {
1128 unsigned Opc = MI->getOpcode();
1129 if (isPermlane16(Opc)) {
1130 auto FIN = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1131 auto BCN = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1132 unsigned FI = !!(MI->getOperand(FIN).getImm() & SISrcMods::OP_SEL_0);
1133 unsigned BC = !!(MI->getOperand(BCN).getImm() & SISrcMods::OP_SEL_0);
1134 if (FI || BC)
1135 O << " op_sel:[" << FI << ',' << BC << ']';
1136 return;
1137 }
1138
1139 printPackedModifier(MI, " op_sel:[", SISrcMods::OP_SEL_0, O);
1140}
1141
1142void AMDGPUInstPrinter::printOpSelHi(const MCInst *MI, unsigned OpNo,
1143 const MCSubtargetInfo &STI,
1144 raw_ostream &O) {
1145 printPackedModifier(MI, " op_sel_hi:[", SISrcMods::OP_SEL_1, O);
1146}
1147
1148void AMDGPUInstPrinter::printNegLo(const MCInst *MI, unsigned OpNo,
1149 const MCSubtargetInfo &STI,
1150 raw_ostream &O) {
1151 printPackedModifier(MI, " neg_lo:[", SISrcMods::NEG, O);
1152}
1153
1154void AMDGPUInstPrinter::printNegHi(const MCInst *MI, unsigned OpNo,
1155 const MCSubtargetInfo &STI,
1156 raw_ostream &O) {
1157 printPackedModifier(MI, " neg_hi:[", SISrcMods::NEG_HI, O);
1158}
1159
1160void AMDGPUInstPrinter::printInterpSlot(const MCInst *MI, unsigned OpNum,
1161 const MCSubtargetInfo &STI,
1162 raw_ostream &O) {
1163 unsigned Imm = MI->getOperand(OpNum).getImm();
1164 switch (Imm) {
1165 case 0:
1166 O << "p10";
1167 break;
1168 case 1:
1169 O << "p20";
1170 break;
1171 case 2:
1172 O << "p0";
1173 break;
1174 default:
1175 O << "invalid_param_" << Imm;
1176 }
1177}
1178
1179void AMDGPUInstPrinter::printInterpAttr(const MCInst *MI, unsigned OpNum,
1180 const MCSubtargetInfo &STI,
1181 raw_ostream &O) {
1182 unsigned Attr = MI->getOperand(OpNum).getImm();
1183 O << "attr" << Attr;
1184}
1185
1186void AMDGPUInstPrinter::printInterpAttrChan(const MCInst *MI, unsigned OpNum,
1187 const MCSubtargetInfo &STI,
1188 raw_ostream &O) {
1189 unsigned Chan = MI->getOperand(OpNum).getImm();
1190 O << '.' << "xyzw"[Chan & 0x3];
1191}
1192
1193void AMDGPUInstPrinter::printGPRIdxMode(const MCInst *MI, unsigned OpNo,
1194 const MCSubtargetInfo &STI,
1195 raw_ostream &O) {
1196 using namespace llvm::AMDGPU::VGPRIndexMode;
1197 unsigned Val = MI->getOperand(OpNo).getImm();
1198
1199 if ((Val & ~ENABLE_MASK) != 0) {
1200 O << formatHex(static_cast<uint64_t>(Val));
1201 } else {
1202 O << "gpr_idx(";
1203 bool NeedComma = false;
1204 for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) {
1205 if (Val & (1 << ModeId)) {
1206 if (NeedComma)
1207 O << ',';
1208 O << IdSymbolic[ModeId];
1209 NeedComma = true;
1210 }
1211 }
1212 O << ')';
1213 }
1214}
1215
1216void AMDGPUInstPrinter::printMemOperand(const MCInst *MI, unsigned OpNo,
1217 const MCSubtargetInfo &STI,
1218 raw_ostream &O) {
1219 printRegularOperand(MI, OpNo, STI, O);
1220 O << ", ";
1221 printRegularOperand(MI, OpNo + 1, STI, O);
1222}
1223
1224void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo,
1225 raw_ostream &O, StringRef Asm,
1227 const MCOperand &Op = MI->getOperand(OpNo);
1228 assert(Op.isImm());
1229 if (Op.getImm() == 1) {
1230 O << Asm;
1231 } else {
1232 O << Default;
1233 }
1234}
1235
1236void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo,
1237 raw_ostream &O, char Asm) {
1238 const MCOperand &Op = MI->getOperand(OpNo);
1239 assert(Op.isImm());
1240 if (Op.getImm() == 1)
1241 O << Asm;
1242}
1243
1244void AMDGPUInstPrinter::printOModSI(const MCInst *MI, unsigned OpNo,
1245 const MCSubtargetInfo &STI,
1246 raw_ostream &O) {
1247 int Imm = MI->getOperand(OpNo).getImm();
1248 if (Imm == SIOutMods::MUL2)
1249 O << " mul:2";
1250 else if (Imm == SIOutMods::MUL4)
1251 O << " mul:4";
1252 else if (Imm == SIOutMods::DIV2)
1253 O << " div:2";
1254}
1255
1256void AMDGPUInstPrinter::printSendMsg(const MCInst *MI, unsigned OpNo,
1257 const MCSubtargetInfo &STI,
1258 raw_ostream &O) {
1259 using namespace llvm::AMDGPU::SendMsg;
1260
1261 const unsigned Imm16 = MI->getOperand(OpNo).getImm();
1262
1263 uint16_t MsgId;
1264 uint16_t OpId;
1266 decodeMsg(Imm16, MsgId, OpId, StreamId, STI);
1267
1268 StringRef MsgName = getMsgName(MsgId, STI);
1269
1270 if (!MsgName.empty() && isValidMsgOp(MsgId, OpId, STI) &&
1271 isValidMsgStream(MsgId, OpId, StreamId, STI)) {
1272 O << "sendmsg(" << MsgName;
1273 if (msgRequiresOp(MsgId, STI)) {
1274 O << ", " << getMsgOpName(MsgId, OpId, STI);
1275 if (msgSupportsStream(MsgId, OpId, STI)) {
1276 O << ", " << StreamId;
1277 }
1278 }
1279 O << ')';
1280 } else if (encodeMsg(MsgId, OpId, StreamId) == Imm16) {
1281 O << "sendmsg(" << MsgId << ", " << OpId << ", " << StreamId << ')';
1282 } else {
1283 O << Imm16; // Unknown imm16 code.
1284 }
1285}
1286
1287static void printSwizzleBitmask(const uint16_t AndMask,
1288 const uint16_t OrMask,
1289 const uint16_t XorMask,
1290 raw_ostream &O) {
1291 using namespace llvm::AMDGPU::Swizzle;
1292
1293 uint16_t Probe0 = ((0 & AndMask) | OrMask) ^ XorMask;
1294 uint16_t Probe1 = ((BITMASK_MASK & AndMask) | OrMask) ^ XorMask;
1295
1296 O << "\"";
1297
1298 for (unsigned Mask = 1 << (BITMASK_WIDTH - 1); Mask > 0; Mask >>= 1) {
1299 uint16_t p0 = Probe0 & Mask;
1300 uint16_t p1 = Probe1 & Mask;
1301
1302 if (p0 == p1) {
1303 if (p0 == 0) {
1304 O << "0";
1305 } else {
1306 O << "1";
1307 }
1308 } else {
1309 if (p0 == 0) {
1310 O << "p";
1311 } else {
1312 O << "i";
1313 }
1314 }
1315 }
1316
1317 O << "\"";
1318}
1319
1320void AMDGPUInstPrinter::printSwizzle(const MCInst *MI, unsigned OpNo,
1321 const MCSubtargetInfo &STI,
1322 raw_ostream &O) {
1323 using namespace llvm::AMDGPU::Swizzle;
1324
1325 uint16_t Imm = MI->getOperand(OpNo).getImm();
1326 if (Imm == 0) {
1327 return;
1328 }
1329
1330 O << " offset:";
1331
1332 if ((Imm & QUAD_PERM_ENC_MASK) == QUAD_PERM_ENC) {
1333
1334 O << "swizzle(" << IdSymbolic[ID_QUAD_PERM];
1335 for (unsigned I = 0; I < LANE_NUM; ++I) {
1336 O << ",";
1337 O << formatDec(Imm & LANE_MASK);
1338 Imm >>= LANE_SHIFT;
1339 }
1340 O << ")";
1341
1342 } else if ((Imm & BITMASK_PERM_ENC_MASK) == BITMASK_PERM_ENC) {
1343
1344 uint16_t AndMask = (Imm >> BITMASK_AND_SHIFT) & BITMASK_MASK;
1345 uint16_t OrMask = (Imm >> BITMASK_OR_SHIFT) & BITMASK_MASK;
1346 uint16_t XorMask = (Imm >> BITMASK_XOR_SHIFT) & BITMASK_MASK;
1347
1348 if (AndMask == BITMASK_MAX && OrMask == 0 && llvm::popcount(XorMask) == 1) {
1349
1350 O << "swizzle(" << IdSymbolic[ID_SWAP];
1351 O << ",";
1352 O << formatDec(XorMask);
1353 O << ")";
1354
1355 } else if (AndMask == BITMASK_MAX && OrMask == 0 && XorMask > 0 &&
1356 isPowerOf2_64(XorMask + 1)) {
1357
1358 O << "swizzle(" << IdSymbolic[ID_REVERSE];
1359 O << ",";
1360 O << formatDec(XorMask + 1);
1361 O << ")";
1362
1363 } else {
1364
1365 uint16_t GroupSize = BITMASK_MAX - AndMask + 1;
1366 if (GroupSize > 1 &&
1367 isPowerOf2_64(GroupSize) &&
1368 OrMask < GroupSize &&
1369 XorMask == 0) {
1370
1371 O << "swizzle(" << IdSymbolic[ID_BROADCAST];
1372 O << ",";
1373 O << formatDec(GroupSize);
1374 O << ",";
1375 O << formatDec(OrMask);
1376 O << ")";
1377
1378 } else {
1379 O << "swizzle(" << IdSymbolic[ID_BITMASK_PERM];
1380 O << ",";
1381 printSwizzleBitmask(AndMask, OrMask, XorMask, O);
1382 O << ")";
1383 }
1384 }
1385 } else {
1386 printU16ImmDecOperand(MI, OpNo, O);
1387 }
1388}
1389
1390void AMDGPUInstPrinter::printSWaitCnt(const MCInst *MI, unsigned OpNo,
1391 const MCSubtargetInfo &STI,
1392 raw_ostream &O) {
1394
1395 unsigned SImm16 = MI->getOperand(OpNo).getImm();
1396 unsigned Vmcnt, Expcnt, Lgkmcnt;
1397 decodeWaitcnt(ISA, SImm16, Vmcnt, Expcnt, Lgkmcnt);
1398
1399 bool IsDefaultVmcnt = Vmcnt == getVmcntBitMask(ISA);
1400 bool IsDefaultExpcnt = Expcnt == getExpcntBitMask(ISA);
1401 bool IsDefaultLgkmcnt = Lgkmcnt == getLgkmcntBitMask(ISA);
1402 bool PrintAll = IsDefaultVmcnt && IsDefaultExpcnt && IsDefaultLgkmcnt;
1403
1404 bool NeedSpace = false;
1405
1406 if (!IsDefaultVmcnt || PrintAll) {
1407 O << "vmcnt(" << Vmcnt << ')';
1408 NeedSpace = true;
1409 }
1410
1411 if (!IsDefaultExpcnt || PrintAll) {
1412 if (NeedSpace)
1413 O << ' ';
1414 O << "expcnt(" << Expcnt << ')';
1415 NeedSpace = true;
1416 }
1417
1418 if (!IsDefaultLgkmcnt || PrintAll) {
1419 if (NeedSpace)
1420 O << ' ';
1421 O << "lgkmcnt(" << Lgkmcnt << ')';
1422 }
1423}
1424
1425void AMDGPUInstPrinter::printDepCtr(const MCInst *MI, unsigned OpNo,
1426 const MCSubtargetInfo &STI,
1427 raw_ostream &O) {
1428 using namespace llvm::AMDGPU::DepCtr;
1429
1430 uint64_t Imm16 = MI->getOperand(OpNo).getImm() & 0xffff;
1431
1432 bool HasNonDefaultVal = false;
1433 if (isSymbolicDepCtrEncoding(Imm16, HasNonDefaultVal, STI)) {
1434 int Id = 0;
1436 unsigned Val;
1437 bool IsDefault;
1438 bool NeedSpace = false;
1439 while (decodeDepCtr(Imm16, Id, Name, Val, IsDefault, STI)) {
1440 if (!IsDefault || !HasNonDefaultVal) {
1441 if (NeedSpace)
1442 O << ' ';
1443 O << Name << '(' << Val << ')';
1444 NeedSpace = true;
1445 }
1446 }
1447 } else {
1448 O << formatHex(Imm16);
1449 }
1450}
1451
1453 const MCSubtargetInfo &STI,
1454 raw_ostream &O) {
1455 const char *BadInstId = "/* invalid instid value */";
1456 static const std::array<const char *, 12> InstIds = {
1457 "NO_DEP", "VALU_DEP_1", "VALU_DEP_2",
1458 "VALU_DEP_3", "VALU_DEP_4", "TRANS32_DEP_1",
1459 "TRANS32_DEP_2", "TRANS32_DEP_3", "FMA_ACCUM_CYCLE_1",
1460 "SALU_CYCLE_1", "SALU_CYCLE_2", "SALU_CYCLE_3"};
1461
1462 const char *BadInstSkip = "/* invalid instskip value */";
1463 static const std::array<const char *, 6> InstSkips = {
1464 "SAME", "NEXT", "SKIP_1", "SKIP_2", "SKIP_3", "SKIP_4"};
1465
1466 unsigned SImm16 = MI->getOperand(OpNo).getImm();
1467 const char *Prefix = "";
1468
1469 unsigned Value = SImm16 & 0xF;
1470 if (Value) {
1471 const char *Name = Value < InstIds.size() ? InstIds[Value] : BadInstId;
1472 O << Prefix << "instid0(" << Name << ')';
1473 Prefix = " | ";
1474 }
1475
1476 Value = (SImm16 >> 4) & 7;
1477 if (Value) {
1478 const char *Name =
1479 Value < InstSkips.size() ? InstSkips[Value] : BadInstSkip;
1480 O << Prefix << "instskip(" << Name << ')';
1481 Prefix = " | ";
1482 }
1483
1484 Value = (SImm16 >> 7) & 0xF;
1485 if (Value) {
1486 const char *Name = Value < InstIds.size() ? InstIds[Value] : BadInstId;
1487 O << Prefix << "instid1(" << Name << ')';
1488 Prefix = " | ";
1489 }
1490
1491 if (!*Prefix)
1492 O << "0";
1493}
1494
1495void AMDGPUInstPrinter::printHwreg(const MCInst *MI, unsigned OpNo,
1496 const MCSubtargetInfo &STI, raw_ostream &O) {
1497 unsigned Id;
1498 unsigned Offset;
1499 unsigned Width;
1500
1501 using namespace llvm::AMDGPU::Hwreg;
1502 unsigned Val = MI->getOperand(OpNo).getImm();
1503 decodeHwreg(Val, Id, Offset, Width);
1504 StringRef HwRegName = getHwreg(Id, STI);
1505
1506 O << "hwreg(";
1507 if (!HwRegName.empty()) {
1508 O << HwRegName;
1509 } else {
1510 O << Id;
1511 }
1512 if (Width != WIDTH_DEFAULT_ || Offset != OFFSET_DEFAULT_) {
1513 O << ", " << Offset << ", " << Width;
1514 }
1515 O << ')';
1516}
1517
1518void AMDGPUInstPrinter::printEndpgm(const MCInst *MI, unsigned OpNo,
1519 const MCSubtargetInfo &STI,
1520 raw_ostream &O) {
1521 uint16_t Imm = MI->getOperand(OpNo).getImm();
1522 if (Imm == 0) {
1523 return;
1524 }
1525
1526 O << ' ' << formatDec(Imm);
1527}
1528
1529#include "AMDGPUGenAsmWriter.inc"
unsigned const MachineRegisterInfo * MRI
static void printSwizzleBitmask(const uint16_t AndMask, const uint16_t OrMask, const uint16_t XorMask, raw_ostream &O)
static bool allOpsDefaultValue(const int *Ops, int NumOps, int Mod, bool IsPacked, bool HasDstSel)
Provides AMDGPU specific target descriptions.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::string Name
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
void printSwizzle(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printEndpgm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static const char * getRegisterName(MCRegister Reg)
static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, StringRef Asm, StringRef Default="")
void printDepCtr(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printHwreg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSendMsg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printRegName(raw_ostream &OS, MCRegister Reg) const override
Print the assembler register name.
static void printRegOperand(unsigned RegNo, raw_ostream &O, const MCRegisterInfo &MRI)
void printInst(const MCInst *MI, uint64_t Address, StringRef Annot, const MCSubtargetInfo &STI, raw_ostream &O) override
Print the specified MCInst to the specified raw_ostream.
void printInstruction(const MCInst *MI, uint64_t Address, const MCSubtargetInfo &STI, raw_ostream &O)
void printSWaitCnt(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printOModSI(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSDelayALU(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
This class represents an Operation in the Expression.
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
format_object< int64_t > formatHex(int64_t Value) const
const MCInstrInfo & MII
Definition: MCInstPrinter.h:52
format_object< int64_t > formatDec(int64_t Value) const
Utility functions to print decimal/hexadecimal values.
const MCRegisterInfo & MRI
Definition: MCInstPrinter.h:53
void printAnnotation(raw_ostream &OS, StringRef Annot)
Utility function for printing annotations.
const MCAsmInfo & MAI
Definition: MCInstPrinter.h:51
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
MCRegisterClass - Base class of TargetRegisterClass.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
const char * getRegClassName(const MCRegisterClass *Class) const
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
StringRef getCPU() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
StringRef getDfmtName(unsigned Id)
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
const char *const IdSymbolic[]
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
LLVM_READNONE bool isLegalDPALU_DPPControl(unsigned DC)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For FLAT segment the offset must be positive; MSB is ignored and forced to zero.
unsigned mc2PseudoReg(unsigned Reg)
Convert hardware register Reg to a pseudo register.
bool isGFX940(const MCSubtargetInfo &STI)
IsaVersion getIsaVersion(StringRef GPU)
bool isDPALU_DPP(const MCInstrDesc &OpDesc)
bool isSI(const MCSubtargetInfo &STI)
bool getVOP3IsSingle(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
unsigned getVmcntBitMask(const IsaVersion &Version)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool isInlineValue(unsigned Reg)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_REG_IMM_INT64
Definition: SIDefines.h:189
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:196
@ OPERAND_REG_INLINE_C_V2INT32
Definition: SIDefines.h:210
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:207
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:197
@ OPERAND_REG_INLINE_AC_V2FP16
Definition: SIDefines.h:224
@ OPERAND_REG_IMM_INT32
Operands with register or 32-bit immediate.
Definition: SIDefines.h:188
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:193
@ OPERAND_REG_INLINE_C_INT64
Definition: SIDefines.h:204
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition: SIDefines.h:202
@ OPERAND_REG_INLINE_AC_INT16
Operands with an AccVGPR register or inline constant.
Definition: SIDefines.h:218
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:192
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:209
@ OPERAND_REG_INLINE_AC_V2INT16
Definition: SIDefines.h:223
@ OPERAND_REG_INLINE_AC_FP16
Definition: SIDefines.h:220
@ OPERAND_REG_INLINE_AC_INT32
Definition: SIDefines.h:219
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:221
@ OPERAND_REG_IMM_V2INT32
Definition: SIDefines.h:198
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:191
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:206
@ OPERAND_REG_INLINE_C_INT32
Definition: SIDefines.h:203
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:208
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:199
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:222
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:205
@ OPERAND_REG_IMM_INT16
Definition: SIDefines.h:190
@ OPERAND_REG_INLINE_C_V2FP32
Definition: SIDefines.h:211
@ OPERAND_REG_IMM_FP32_DEFERRED
Definition: SIDefines.h:195
@ OPERAND_REG_IMM_FP16_DEFERRED
Definition: SIDefines.h:194
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
bool isCI(const MCSubtargetInfo &STI)
bool getVOP2IsSingle(unsigned Opc)
bool isPermlane16(unsigned Opc)
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
@ OPERAND_UNKNOWN
Definition: MCInstrDesc.h:59
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:349
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:269
@ Mod
The access may modify the value stored in memory.
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
Definition: MathExtras.h:433
@ Default
The result values are uniform if and only if all operands are uniform.
#define N
Instruction set architecture version.
Definition: TargetParser.h:114
Description of the encoding of one expression Op.