50void AMDGPUInstPrinter::printU16ImmOperand(
const MCInst *
MI,
unsigned OpNo,
61 int64_t
Imm =
Op.getImm();
63 O << formatHex(static_cast<uint64_t>(
Imm & 0xffff));
65 printU32ImmOperand(
MI, OpNo, STI, O);
68void AMDGPUInstPrinter::printU16ImmDecOperand(
const MCInst *
MI,
unsigned OpNo,
73void AMDGPUInstPrinter::printU32ImmOperand(
const MCInst *
MI,
unsigned OpNo,
76 const MCOperand &
Op =
MI->getOperand(OpNo);
78 MAI.printExpr(O, *
Op.getExpr());
85void AMDGPUInstPrinter::printFP64ImmOperand(
const MCInst *
MI,
unsigned OpNo,
89 const MCOperand &
Op =
MI->getOperand(OpNo);
91 MAI.printExpr(O, *
Op.getExpr());
95 printLiteral64(
Op.getImm(), O,
true);
98void AMDGPUInstPrinter::printNamedBit(
const MCInst *
MI,
unsigned OpNo,
100 if (
MI->getOperand(OpNo).getImm()) {
105void AMDGPUInstPrinter::printOffset(
const MCInst *
MI,
unsigned OpNo,
108 uint32_t
Imm =
MI->getOperand(OpNo).getImm();
113 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
116 O << formatDec(SignExtend32<24>(
Imm));
118 printU16ImmDecOperand(
MI, OpNo, O);
122void AMDGPUInstPrinter::printFlatOffset(
const MCInst *
MI,
unsigned OpNo,
125 uint32_t
Imm =
MI->getOperand(OpNo).getImm();
129 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
137 printU16ImmDecOperand(
MI, OpNo, O);
141void AMDGPUInstPrinter::printSMRDOffset8(
const MCInst *
MI,
unsigned OpNo,
144 printU32ImmOperand(
MI, OpNo, STI, O);
147void AMDGPUInstPrinter::printSMEMOffset(
const MCInst *
MI,
unsigned OpNo,
153void AMDGPUInstPrinter::printSMRDLiteralOffset(
const MCInst *
MI,
unsigned OpNo,
156 printU32ImmOperand(
MI, OpNo, STI, O);
159void AMDGPUInstPrinter::printCPol(
const MCInst *
MI,
unsigned OpNo,
161 auto Imm =
MI->getOperand(OpNo).getImm();
168 O <<
" scale_offset";
170 printTH(
MI, TH, Scope, O);
171 printScope(Scope, O);
190 O <<
" /* unexpected cache policy bit */";
193void AMDGPUInstPrinter::printTH(
const MCInst *
MI, int64_t TH, int64_t Scope,
199 const unsigned Opcode =
MI->getOpcode();
200 const MCInstrDesc &TID =
MII.get(Opcode);
223 O << (IsStore ?
"TH_STORE_" :
"TH_LOAD_");
233 : (IsStore ?
"WB" :
"LU"));
254void AMDGPUInstPrinter::printScope(int64_t Scope,
raw_ostream &O) {
270void AMDGPUInstPrinter::printDim(
const MCInst *
MI,
unsigned OpNo,
272 unsigned Dim =
MI->getOperand(OpNo).getImm();
273 O <<
" dim:SQ_RSRC_IMG_";
282void AMDGPUInstPrinter::printR128A16(
const MCInst *
MI,
unsigned OpNo,
285 printNamedBit(
MI, OpNo, O,
"a16");
287 printNamedBit(
MI, OpNo, O,
"r128");
290void AMDGPUInstPrinter::printFORMAT(
const MCInst *
MI,
unsigned OpNo,
295void AMDGPUInstPrinter::printSymbolicFormat(
const MCInst *
MI,
298 using namespace llvm::AMDGPU::MTBUFFormat;
301 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::format);
304 unsigned Val =
MI->getOperand(OpNo).getImm();
306 if (Val == UFMT_DEFAULT)
311 O <<
" format:" << Val;
314 if (Val == DFMT_NFMT_DEFAULT)
321 if (Dfmt != DFMT_DEFAULT) {
323 if (Nfmt != NFMT_DEFAULT) {
327 if (Nfmt != NFMT_DEFAULT) {
332 O <<
" format:" << Val;
340 unsigned Enc =
MRI.getEncodingValue(
Reg);
345 unsigned RegNo = Idx % 0x100;
347 if (RC->
getID() == AMDGPU::VGPR_16RegClassID) {
366 unsigned Enc =
MRI.getEncodingValue(
Reg);
373 unsigned Opc =
Desc.getOpcode();
375 for (
I = 0;
I < 4; ++
I) {
376 if (
Ops.first[
I] != AMDGPU::OpName::NUM_OPERAND_NAMES &&
377 (
unsigned)AMDGPU::getNamedOperandIdx(
Opc,
Ops.first[
I]) == OpNo)
379 if (
Ops.second &&
Ops.second[
I] != AMDGPU::OpName::NUM_OPERAND_NAMES &&
380 (
unsigned)AMDGPU::getNamedOperandIdx(
Opc,
Ops.second[
I]) == OpNo)
385 unsigned OpMSBs = (VgprMSBs >> (
I * 2)) & 3;
399 case AMDGPU::PRIVATE_RSRC_REG:
422void AMDGPUInstPrinter::printVOPDst(
const MCInst *
MI,
unsigned OpNo,
424 auto Opcode =
MI->getOpcode();
442 printRegularOperand(
MI, OpNo, STI, O);
448 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
449 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
450 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
451 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
452 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
453 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
454 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
455 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
456 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
457 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
458 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
459 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
460 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
461 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
462 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
463 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
464 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
465 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
466 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
467 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
468 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
469 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx12:
470 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx12:
471 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx12:
472 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx12:
473 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx12:
474 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx12:
475 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx12:
476 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx12:
477 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx12:
478 printDefaultVccOperand(
false, STI, O);
483void AMDGPUInstPrinter::printVINTRPDst(
const MCInst *
MI,
unsigned OpNo,
490 printRegularOperand(
MI, OpNo, STI, O);
493void AMDGPUInstPrinter::printImmediateInt16(uint32_t
Imm,
496 int32_t SImm =
static_cast<int32_t
>(
Imm);
502 if (printImmediateFloat32(
Imm, STI, O))
505 O << formatHex(static_cast<uint64_t>(
Imm & 0xffff));
512 else if (
Imm == 0xBC00)
514 else if (
Imm == 0x3800)
516 else if (
Imm == 0xB800)
518 else if (
Imm == 0x4000)
520 else if (
Imm == 0xC000)
522 else if (
Imm == 0x4400)
524 else if (
Imm == 0xC400)
526 else if (
Imm == 0x3118 && STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
538 else if (
Imm == 0xBF80)
540 else if (
Imm == 0x3F00)
542 else if (
Imm == 0xBF00)
544 else if (
Imm == 0x4000)
546 else if (
Imm == 0xC000)
548 else if (
Imm == 0x4080)
550 else if (
Imm == 0xC080)
552 else if (
Imm == 0x3E22 && STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
560void AMDGPUInstPrinter::printImmediateBF16(uint32_t
Imm,
563 int16_t SImm =
static_cast<int16_t
>(
Imm);
572 O << formatHex(static_cast<uint64_t>(
Imm));
575void AMDGPUInstPrinter::printImmediateF16(uint32_t
Imm,
578 int16_t SImm =
static_cast<int16_t
>(
Imm);
584 uint16_t HImm =
static_cast<uint16_t
>(
Imm);
588 uint64_t
Imm16 =
static_cast<uint16_t
>(
Imm);
592void AMDGPUInstPrinter::printImmediateV216(uint32_t
Imm, uint8_t OpType,
595 int32_t SImm =
static_cast<int32_t
>(
Imm);
604 if (printImmediateFloat32(
Imm, STI, O))
625 O << formatHex(static_cast<uint64_t>(
Imm));
628bool AMDGPUInstPrinter::printImmediateFloat32(uint32_t
Imm,
649 else if (
Imm == 0x3e22f983 &&
650 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
658void AMDGPUInstPrinter::printImmediate32(uint32_t
Imm,
661 int32_t SImm =
static_cast<int32_t
>(
Imm);
667 if (printImmediateFloat32(
Imm, STI, O))
670 O << formatHex(static_cast<uint64_t>(
Imm));
673void AMDGPUInstPrinter::printImmediate64(uint64_t
Imm,
676 int64_t SImm =
static_cast<int64_t
>(
Imm);
677 if (SImm >= -16 && SImm <= 64) {
700 else if (
Imm == 0x3fc45f306dc9c882 &&
701 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
702 O <<
"0.15915494309189532";
704 printLiteral64(
Imm, O, IsFP);
707void AMDGPUInstPrinter::printLiteral64(uint64_t
Imm,
raw_ostream &O,
710 O << formatHex(static_cast<uint64_t>(
Hi_32(
Imm)));
715void AMDGPUInstPrinter::printBLGP(
const MCInst *
MI,
unsigned OpNo,
718 unsigned Imm =
MI->getOperand(OpNo).getImm();
723 switch (
MI->getOpcode()) {
724 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_acd:
725 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_vcd:
726 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_acd:
727 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_vcd:
728 O <<
" neg:[" << (
Imm & 1) <<
',' << ((
Imm >> 1) & 1) <<
','
729 << ((
Imm >> 2) & 1) <<
']';
734 O <<
" blgp:" <<
Imm;
737void AMDGPUInstPrinter::printDefaultVccOperand(
bool FirstOperand,
751 unsigned OpNo)
const {
755 (
Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
756 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO));
760void AMDGPUInstPrinter::printOperand(
const MCInst *
MI,
unsigned OpNo,
763 unsigned Opc =
MI->getOpcode();
765 int ModIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
772 (
Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
773 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO)))
774 printDefaultVccOperand(
true, STI, O);
776 printRegularOperand(
MI, OpNo, STI, O);
780void AMDGPUInstPrinter::printRegularOperand(
const MCInst *
MI,
unsigned OpNo,
783 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
785 if (OpNo >=
MI->getNumOperands()) {
786 O <<
"/*Missing OP" << OpNo <<
"*/";
790 const MCOperand &
Op =
MI->getOperand(OpNo);
797 const MCOperandInfo &OpInfo =
Desc.operands()[OpNo];
799 int16_t RCID =
MII.getOpRegClassID(
801 const MCRegisterClass &RC =
MRI.getRegClass(RCID);
805 (OpInfo.
RegClass == AMDGPU::SReg_1 ||
806 OpInfo.
RegClass == AMDGPU::SReg_1_XEXEC);
813 O <<
"/*Invalid register, operand has \'" <<
MRI.getRegClassName(&RC)
814 <<
"\' register class*/";
818 }
else if (
Op.isImm()) {
819 const uint8_t OpTy =
Desc.operands()[OpNo].OperandType;
831 printImmediate32(
Op.getImm(), STI, O);
835 printImmediate64(
Op.getImm(), STI, O,
false);
840 printImmediate64(
Op.getImm(), STI, O,
true);
844 printImmediateInt16(
Op.getImm(), STI, O);
848 printImmediateF16(
Op.getImm(), STI, O);
852 printImmediateBF16(
Op.getImm(), STI, O);
861 printImmediateV216(
Op.getImm(), OpTy, STI, O);
870 printImmediate32(
Op.getImm(), STI, O);
871 O <<
"/*Invalid immediate*/";
878 }
else if (
Op.isExpr()) {
879 const MCExpr *
Exp =
Op.getExpr();
880 MAI.printExpr(O, *Exp);
886 switch (
MI->getOpcode()) {
889 case AMDGPU::V_CNDMASK_B32_e32_gfx10:
890 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
891 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
892 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
893 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
894 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
895 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
896 case AMDGPU::V_CNDMASK_B32_dpp8_gfx10:
897 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
898 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
899 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
900 case AMDGPU::V_CNDMASK_B32_e32_gfx11:
901 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
902 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
903 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
904 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
905 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
906 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
907 case AMDGPU::V_CNDMASK_B32_dpp8_gfx11:
908 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
909 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
910 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
911 case AMDGPU::V_CNDMASK_B32_e32_gfx12:
912 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx12:
913 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx12:
914 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx12:
915 case AMDGPU::V_CNDMASK_B32_dpp_gfx12:
916 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx12:
917 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx12:
918 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx12:
919 case AMDGPU::V_CNDMASK_B32_dpp8_gfx12:
920 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx12:
921 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx12:
922 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx12:
924 case AMDGPU::V_CNDMASK_B32_e32_gfx6_gfx7:
925 case AMDGPU::V_CNDMASK_B32_e32_vi:
926 if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
MI->getOpcode(),
927 AMDGPU::OpName::src1))
928 printDefaultVccOperand(OpNo == 0, STI, O);
934 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::soffset);
936 if ((
int)OpNo == SOffsetIdx)
937 printSymbolicFormat(
MI, STI, O);
941void AMDGPUInstPrinter::printOperandAndFPInputMods(
const MCInst *
MI,
945 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
946 if (needsImpliedVcc(
Desc, OpNo))
947 printDefaultVccOperand(
true, STI, O);
949 unsigned InputModifiers =
MI->getOperand(OpNo).getImm();
954 bool NegMnemo =
false;
957 if (OpNo + 1 <
MI->getNumOperands() &&
959 const MCOperand &
Op =
MI->getOperand(OpNo + 1);
960 NegMnemo =
Op.isImm();
971 printRegularOperand(
MI, OpNo + 1, STI, O);
980 switch (
MI->getOpcode()) {
984 case AMDGPU::V_CNDMASK_B32_sdwa_gfx10:
985 case AMDGPU::V_CNDMASK_B32_dpp_gfx10:
986 case AMDGPU::V_CNDMASK_B32_dpp_gfx11:
988 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::src1))
989 printDefaultVccOperand(OpNo == 0, STI, O);
994void AMDGPUInstPrinter::printOperandAndIntInputMods(
const MCInst *
MI,
998 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
999 if (needsImpliedVcc(
Desc, OpNo))
1000 printDefaultVccOperand(
true, STI, O);
1002 unsigned InputModifiers =
MI->getOperand(OpNo).getImm();
1005 printRegularOperand(
MI, OpNo + 1, STI, O);
1010 switch (
MI->getOpcode()) {
1013 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
1014 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
1015 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
1016 if ((
int)OpNo + 1 == AMDGPU::getNamedOperandIdx(
MI->getOpcode(),
1017 AMDGPU::OpName::src1))
1018 printDefaultVccOperand(OpNo == 0, STI, O);
1023void AMDGPUInstPrinter::printDPP8(
const MCInst *
MI,
unsigned OpNo,
1029 unsigned Imm =
MI->getOperand(OpNo).getImm();
1031 for (
size_t i = 1; i < 8; ++i) {
1037void AMDGPUInstPrinter::printDPPCtrl(
const MCInst *
MI,
unsigned OpNo,
1040 using namespace AMDGPU::DPP;
1042 unsigned Imm =
MI->getOperand(OpNo).getImm();
1043 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
1047 O <<
" /* DP ALU dpp only supports "
1048 << (
isGFX12(STI) ?
"row_share" :
"row_newbcast") <<
" */";
1051 if (
Imm <= DppCtrl::QUAD_PERM_LAST) {
1057 }
else if ((
Imm >= DppCtrl::ROW_SHL_FIRST) &&
1058 (
Imm <= DppCtrl::ROW_SHL_LAST)) {
1060 }
else if ((
Imm >= DppCtrl::ROW_SHR_FIRST) &&
1061 (
Imm <= DppCtrl::ROW_SHR_LAST)) {
1063 }
else if ((
Imm >= DppCtrl::ROW_ROR_FIRST) &&
1064 (
Imm <= DppCtrl::ROW_ROR_LAST)) {
1066 }
else if (
Imm == DppCtrl::WAVE_SHL1) {
1068 O <<
"/* wave_shl is not supported starting from GFX10 */";
1072 }
else if (
Imm == DppCtrl::WAVE_ROL1) {
1074 O <<
"/* wave_rol is not supported starting from GFX10 */";
1078 }
else if (
Imm == DppCtrl::WAVE_SHR1) {
1080 O <<
"/* wave_shr is not supported starting from GFX10 */";
1084 }
else if (
Imm == DppCtrl::WAVE_ROR1) {
1086 O <<
"/* wave_ror is not supported starting from GFX10 */";
1090 }
else if (
Imm == DppCtrl::ROW_MIRROR) {
1092 }
else if (
Imm == DppCtrl::ROW_HALF_MIRROR) {
1093 O <<
"row_half_mirror";
1094 }
else if (
Imm == DppCtrl::BCAST15) {
1096 O <<
"/* row_bcast is not supported starting from GFX10 */";
1099 O <<
"row_bcast:15";
1100 }
else if (
Imm == DppCtrl::BCAST31) {
1102 O <<
"/* row_bcast is not supported starting from GFX10 */";
1105 O <<
"row_bcast:31";
1106 }
else if ((
Imm >= DppCtrl::ROW_SHARE_FIRST) &&
1107 (
Imm <= DppCtrl::ROW_SHARE_LAST)) {
1109 O <<
"row_newbcast:";
1113 O <<
" /* row_newbcast/row_share is not supported on ASICs earlier "
1114 "than GFX90A/GFX10 */";
1118 }
else if ((
Imm >= DppCtrl::ROW_XMASK_FIRST) &&
1119 (
Imm <= DppCtrl::ROW_XMASK_LAST)) {
1121 O <<
"/* row_xmask is not supported on ASICs earlier than GFX10 */";
1124 O <<
"row_xmask:" <<
formatDec(
Imm - DppCtrl::ROW_XMASK_FIRST);
1126 O <<
"/* Invalid dpp_ctrl value */";
1130void AMDGPUInstPrinter::printDppBoundCtrl(
const MCInst *
MI,
unsigned OpNo,
1133 unsigned Imm =
MI->getOperand(OpNo).getImm();
1135 O <<
" bound_ctrl:1";
1139void AMDGPUInstPrinter::printDppFI(
const MCInst *
MI,
unsigned OpNo,
1141 using namespace llvm::AMDGPU::DPP;
1142 unsigned Imm =
MI->getOperand(OpNo).getImm();
1143 if (
Imm == DPP_FI_1 ||
Imm == DPP8_FI_1) {
1148void AMDGPUInstPrinter::printSDWASel(
const MCInst *
MI,
unsigned OpNo,
1150 using namespace llvm::AMDGPU::SDWA;
1152 unsigned Imm =
MI->getOperand(OpNo).getImm();
1154 case SdwaSel::BYTE_0:
O <<
"BYTE_0";
break;
1155 case SdwaSel::BYTE_1:
O <<
"BYTE_1";
break;
1156 case SdwaSel::BYTE_2:
O <<
"BYTE_2";
break;
1157 case SdwaSel::BYTE_3:
O <<
"BYTE_3";
break;
1158 case SdwaSel::WORD_0:
O <<
"WORD_0";
break;
1159 case SdwaSel::WORD_1:
O <<
"WORD_1";
break;
1160 case SdwaSel::DWORD:
O <<
"DWORD";
break;
1165void AMDGPUInstPrinter::printSDWADstSel(
const MCInst *
MI,
unsigned OpNo,
1169 printSDWASel(
MI, OpNo, O);
1172void AMDGPUInstPrinter::printSDWASrc0Sel(
const MCInst *
MI,
unsigned OpNo,
1176 printSDWASel(
MI, OpNo, O);
1179void AMDGPUInstPrinter::printSDWASrc1Sel(
const MCInst *
MI,
unsigned OpNo,
1183 printSDWASel(
MI, OpNo, O);
1186void AMDGPUInstPrinter::printSDWADstUnused(
const MCInst *
MI,
unsigned OpNo,
1189 using namespace llvm::AMDGPU::SDWA;
1192 unsigned Imm =
MI->getOperand(OpNo).getImm();
1194 case DstUnused::UNUSED_PAD:
O <<
"UNUSED_PAD";
break;
1195 case DstUnused::UNUSED_SEXT:
O <<
"UNUSED_SEXT";
break;
1196 case DstUnused::UNUSED_PRESERVE:
O <<
"UNUSED_PRESERVE";
break;
1201void AMDGPUInstPrinter::printExpSrcN(
const MCInst *
MI,
unsigned OpNo,
1204 unsigned Opc =
MI->getOpcode();
1205 int EnIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::en);
1206 unsigned En =
MI->getOperand(EnIdx).getImm();
1208 int ComprIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::compr);
1211 if (
MI->getOperand(ComprIdx).getImm())
1212 OpNo = OpNo -
N +
N / 2;
1220void AMDGPUInstPrinter::printExpSrc0(
const MCInst *
MI,
unsigned OpNo,
1223 printExpSrcN(
MI, OpNo, STI, O, 0);
1226void AMDGPUInstPrinter::printExpSrc1(
const MCInst *
MI,
unsigned OpNo,
1229 printExpSrcN(
MI, OpNo, STI, O, 1);
1232void AMDGPUInstPrinter::printExpSrc2(
const MCInst *
MI,
unsigned OpNo,
1235 printExpSrcN(
MI, OpNo, STI, O, 2);
1238void AMDGPUInstPrinter::printExpSrc3(
const MCInst *
MI,
unsigned OpNo,
1241 printExpSrcN(
MI, OpNo, STI, O, 3);
1244void AMDGPUInstPrinter::printExpTgt(
const MCInst *
MI,
unsigned OpNo,
1247 using namespace llvm::AMDGPU::Exp;
1250 unsigned Id =
MI->getOperand(OpNo).getImm() & ((1 << 6) - 1);
1255 O <<
' ' << TgtName;
1259 O <<
" invalid_target_" <<
Id;
1264 bool IsPacked,
bool HasDstSel) {
1268 if (!!(
Ops[
I] &
Mod) != DefaultValue)
1278void AMDGPUInstPrinter::printPackedModifier(
const MCInst *
MI,
1282 unsigned Opc =
MI->getOpcode();
1286 std::pair<AMDGPU::OpName, AMDGPU::OpName> MOps[] = {
1287 {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src0},
1288 {AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src1},
1289 {AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::src2}};
1292 for (
auto [SrcMod, Src] : MOps) {
1296 int ModIdx = AMDGPU::getNamedOperandIdx(
Opc, SrcMod);
1298 (ModIdx != -1) ?
MI->getOperand(ModIdx).getImm() : DefaultValue;
1307 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2_modifiers);
1313 (AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst) != -1) ||
1314 (AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::sdst) != -1);
1322 for (AMDGPU::OpName OpName :
1323 {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
1324 AMDGPU::OpName::src2_modifiers}) {
1325 int Idx = AMDGPU::getNamedOperandIdx(
Opc, OpName);
1333 const bool HasDstSel =
1358void AMDGPUInstPrinter::printOpSel(
const MCInst *
MI,
unsigned,
1361 unsigned Opc =
MI->getOpcode();
1364 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
1365 unsigned Mod =
MI->getOperand(SrcMod).getImm();
1368 if (Index0 || Index1)
1369 O <<
" op_sel:[" << Index0 <<
',' << Index1 <<
']';
1373 auto FIN = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
1374 auto BCN = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1_modifiers);
1378 O <<
" op_sel:[" << FI <<
',' << BC <<
']';
1385void AMDGPUInstPrinter::printOpSelHi(
const MCInst *
MI,
unsigned OpNo,
1391void AMDGPUInstPrinter::printNegLo(
const MCInst *
MI,
unsigned OpNo,
1397void AMDGPUInstPrinter::printNegHi(
const MCInst *
MI,
unsigned OpNo,
1403void AMDGPUInstPrinter::printIndexKey8bit(
const MCInst *
MI,
unsigned OpNo,
1406 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1410 O <<
" index_key:" <<
Imm;
1413void AMDGPUInstPrinter::printIndexKey16bit(
const MCInst *
MI,
unsigned OpNo,
1416 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1420 O <<
" index_key:" <<
Imm;
1423void AMDGPUInstPrinter::printIndexKey32bit(
const MCInst *
MI,
unsigned OpNo,
1426 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1430 O <<
" index_key:" <<
Imm;
1433void AMDGPUInstPrinter::printMatrixFMT(
const MCInst *
MI,
unsigned OpNo,
1436 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1440 O <<
" matrix_" << AorB <<
"_fmt:";
1445 case WMMA::MatrixFMT::MATRIX_FMT_FP8:
1446 O <<
"MATRIX_FMT_FP8";
1448 case WMMA::MatrixFMT::MATRIX_FMT_BF8:
1449 O <<
"MATRIX_FMT_BF8";
1451 case WMMA::MatrixFMT::MATRIX_FMT_FP6:
1452 O <<
"MATRIX_FMT_FP6";
1454 case WMMA::MatrixFMT::MATRIX_FMT_BF6:
1455 O <<
"MATRIX_FMT_BF6";
1457 case WMMA::MatrixFMT::MATRIX_FMT_FP4:
1458 O <<
"MATRIX_FMT_FP4";
1463void AMDGPUInstPrinter::printMatrixAFMT(
const MCInst *
MI,
unsigned OpNo,
1466 printMatrixFMT(
MI, OpNo, STI, O,
'a');
1469void AMDGPUInstPrinter::printMatrixBFMT(
const MCInst *
MI,
unsigned OpNo,
1472 printMatrixFMT(
MI, OpNo, STI, O,
'b');
1475void AMDGPUInstPrinter::printMatrixScale(
const MCInst *
MI,
unsigned OpNo,
1478 auto Imm =
MI->getOperand(OpNo).getImm() & 1;
1482 O <<
" matrix_" << AorB <<
"_scale:";
1487 case WMMA::MatrixScale::MATRIX_SCALE_ROW0:
1488 O <<
"MATRIX_SCALE_ROW0";
1490 case WMMA::MatrixScale::MATRIX_SCALE_ROW1:
1491 O <<
"MATRIX_SCALE_ROW1";
1496void AMDGPUInstPrinter::printMatrixAScale(
const MCInst *
MI,
unsigned OpNo,
1499 printMatrixScale(
MI, OpNo, STI, O,
'a');
1502void AMDGPUInstPrinter::printMatrixBScale(
const MCInst *
MI,
unsigned OpNo,
1505 printMatrixScale(
MI, OpNo, STI, O,
'b');
1508void AMDGPUInstPrinter::printMatrixScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1511 auto Imm =
MI->getOperand(OpNo).getImm() & 3;
1515 O <<
" matrix_" << AorB <<
"_scale_fmt:";
1520 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E8:
1521 O <<
"MATRIX_SCALE_FMT_E8";
1523 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E5M3:
1524 O <<
"MATRIX_SCALE_FMT_E5M3";
1526 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E4M3:
1527 O <<
"MATRIX_SCALE_FMT_E4M3";
1532void AMDGPUInstPrinter::printMatrixAScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1535 printMatrixScaleFmt(
MI, OpNo, STI, O,
'a');
1538void AMDGPUInstPrinter::printMatrixBScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1541 printMatrixScaleFmt(
MI, OpNo, STI, O,
'b');
1544void AMDGPUInstPrinter::printInterpSlot(
const MCInst *
MI,
unsigned OpNum,
1547 unsigned Imm =
MI->getOperand(OpNum).getImm();
1559 O <<
"invalid_param_" <<
Imm;
1563void AMDGPUInstPrinter::printInterpAttr(
const MCInst *
MI,
unsigned OpNum,
1566 unsigned Attr =
MI->getOperand(OpNum).getImm();
1567 O <<
"attr" << Attr;
1570void AMDGPUInstPrinter::printInterpAttrChan(
const MCInst *
MI,
unsigned OpNum,
1573 unsigned Chan =
MI->getOperand(OpNum).getImm();
1574 O <<
'.' <<
"xyzw"[Chan & 0x3];
1577void AMDGPUInstPrinter::printGPRIdxMode(
const MCInst *
MI,
unsigned OpNo,
1580 using namespace llvm::AMDGPU::VGPRIndexMode;
1581 unsigned Val =
MI->getOperand(OpNo).getImm();
1583 if ((Val & ~ENABLE_MASK) != 0) {
1584 O << formatHex(static_cast<uint64_t>(Val));
1587 bool NeedComma =
false;
1588 for (
unsigned ModeId = ID_MIN; ModeId <=
ID_MAX; ++ModeId) {
1589 if (Val & (1 << ModeId)) {
1600void AMDGPUInstPrinter::printMemOperand(
const MCInst *
MI,
unsigned OpNo,
1603 printRegularOperand(
MI, OpNo, STI, O);
1605 printRegularOperand(
MI, OpNo + 1, STI, O);
1613 if (
Op.getImm() == 1) {
1624 if (
Op.getImm() == 1)
1631 int Imm =
MI->getOperand(OpNo).getImm();
1645 const unsigned Imm16 =
MI->getOperand(OpNo).getImm();
1656 O <<
"sendmsg(" << MsgName;
1665 O <<
"sendmsg(" << MsgId <<
", " << OpId <<
", " <<
StreamId <<
')';
1677 uint16_t Probe0 = ((0 & AndMask) | OrMask) ^ XorMask;
1682 for (
unsigned Mask = 1 << (
BITMASK_WIDTH - 1); Mask > 0; Mask >>= 1) {
1752 }
else if (AndMask ==
BITMASK_MAX && OrMask == 0 && XorMask > 0 &&
1763 if (GroupSize > 1 &&
1765 OrMask < GroupSize &&
1783 printU16ImmDecOperand(
MI, OpNo, O);
1792 unsigned SImm16 =
MI->getOperand(OpNo).getImm();
1793 unsigned Vmcnt, Expcnt, Lgkmcnt;
1799 bool PrintAll = IsDefaultVmcnt && IsDefaultExpcnt && IsDefaultLgkmcnt;
1801 bool NeedSpace =
false;
1803 if (!IsDefaultVmcnt || PrintAll) {
1804 O <<
"vmcnt(" << Vmcnt <<
')';
1808 if (!IsDefaultExpcnt || PrintAll) {
1811 O <<
"expcnt(" << Expcnt <<
')';
1815 if (!IsDefaultLgkmcnt || PrintAll) {
1818 O <<
"lgkmcnt(" << Lgkmcnt <<
')';
1827 uint64_t Imm16 =
MI->getOperand(OpNo).getImm() & 0xffff;
1829 bool HasNonDefaultVal =
false;
1835 bool NeedSpace =
false;
1837 if (!IsDefault || !HasNonDefaultVal) {
1840 O << Name <<
'(' << Val <<
')';
1852 const char *BadInstId =
"/* invalid instid value */";
1853 static const std::array<const char *, 12> InstIds = {
1854 "NO_DEP",
"VALU_DEP_1",
"VALU_DEP_2",
1855 "VALU_DEP_3",
"VALU_DEP_4",
"TRANS32_DEP_1",
1856 "TRANS32_DEP_2",
"TRANS32_DEP_3",
"FMA_ACCUM_CYCLE_1",
1857 "SALU_CYCLE_1",
"SALU_CYCLE_2",
"SALU_CYCLE_3"};
1859 const char *BadInstSkip =
"/* invalid instskip value */";
1860 static const std::array<const char *, 6> InstSkips = {
1861 "SAME",
"NEXT",
"SKIP_1",
"SKIP_2",
"SKIP_3",
"SKIP_4"};
1863 unsigned SImm16 =
MI->getOperand(OpNo).getImm();
1864 const char *Prefix =
"";
1866 unsigned Value = SImm16 & 0xF;
1868 const char *Name =
Value < InstIds.size() ? InstIds[
Value] : BadInstId;
1869 O << Prefix <<
"instid0(" << Name <<
')';
1873 Value = (SImm16 >> 4) & 7;
1876 Value < InstSkips.size() ? InstSkips[
Value] : BadInstSkip;
1877 O << Prefix <<
"instskip(" << Name <<
')';
1881 Value = (SImm16 >> 7) & 0xF;
1883 const char *Name =
Value < InstIds.size() ? InstIds[
Value] : BadInstId;
1884 O << Prefix <<
"instid1(" << Name <<
')';
1895 unsigned Val =
MI->getOperand(OpNo).getImm();
1900 if (!HwRegName.
empty()) {
1906 O <<
", " <<
Offset <<
", " << Width;
1921void AMDGPUInstPrinter::printNamedInt(
const MCInst *
MI,
unsigned OpNo,
1924 bool PrintInHex,
bool AlwaysPrint) {
1925 int64_t V =
MI->getOperand(OpNo).getImm();
1926 if (AlwaysPrint || V != 0)
1930void AMDGPUInstPrinter::printBitOp3(
const MCInst *
MI,
unsigned OpNo,
1941 O << formatHex(static_cast<uint64_t>(
Imm));
1944void AMDGPUInstPrinter::printScaleSel(
const MCInst *
MI,
unsigned OpNo,
1947 uint8_t
Imm =
MI->getOperand(OpNo).getImm();
1954#include "AMDGPUGenAsmWriter.inc"
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static void printSwizzleBitmask(const uint16_t AndMask, const uint16_t OrMask, const uint16_t XorMask, raw_ostream &O)
static bool printImmediateBFloat16(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O)
static bool allOpsDefaultValue(const int *Ops, int NumOps, int Mod, bool IsPacked, bool HasDstSel)
static MCRegister getRegForPrinting(MCRegister Reg, const MCRegisterInfo &MRI)
static MCRegister getRegFromMIA(MCRegister Reg, unsigned OpNo, const MCInstrDesc &Desc, const MCRegisterInfo &MRI, const AMDGPUMCInstrAnalysis &MIA)
static bool printImmediateFP16(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O)
Provides AMDGPU specific target descriptions.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
void printSwizzle(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printEndpgm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static const char * getRegisterName(MCRegister Reg)
static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, StringRef Asm, StringRef Default="")
void printDepCtr(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printHwreg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSendMsg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static void printRegOperand(MCRegister Reg, raw_ostream &O, const MCRegisterInfo &MRI)
void printRegName(raw_ostream &OS, MCRegister Reg) override
Print the assembler register name.
void printInst(const MCInst *MI, uint64_t Address, StringRef Annot, const MCSubtargetInfo &STI, raw_ostream &O) override
Print the specified MCInst to the specified raw_ostream.
void printInstruction(const MCInst *MI, uint64_t Address, const MCSubtargetInfo &STI, raw_ostream &O)
void printSWaitCnt(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printOModSI(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSDelayALU(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
unsigned getVgprMSBs() const
void printExpr(raw_ostream &, const MCExpr &) const
format_object< int64_t > formatHex(int64_t Value) const
format_object< int64_t > formatDec(int64_t Value) const
Utility functions to print decimal/hexadecimal values.
const MCRegisterInfo & MRI
void printAnnotation(raw_ostream &OS, StringRef Annot)
Utility function for printing annotations.
const MCInstrAnalysis * MIA
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
bool isLookupRegClassByHwMode() const
Set if this operand is a value that requires the current hwmode to look up its register class.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Instances of this class represent operands of the MCInst class.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
virtual unsigned getHwMode(enum HwModeType type=HwMode_Default) const
HwMode ID corresponding to the 'type' parameter is retrieved from the HwMode bit set of the current s...
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
LLVM Value Representation.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
StringRef getHwreg(uint64_t Encoding, const MCSubtargetInfo &STI)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
StringRef getMsgName(uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a msg_id immediate.
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
const char *const IdSymbolic[]
bool isInlineValue(MCRegister Reg)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isVOPCAsmOnly(unsigned Opc)
unsigned getTemporalHintType(const MCInstrDesc TID)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool isGFX12Plus(const MCSubtargetInfo &STI)
const MCRegisterClass * getVGPRPhysRegClass(MCRegister Reg, const MCRegisterInfo &MRI)
bool isGFX940(const MCSubtargetInfo &STI)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
LLVM_READNONE bool isLegalDPALU_DPPControl(const MCSubtargetInfo &ST, unsigned DC)
bool isSI(const MCSubtargetInfo &STI)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
bool isGFX12(const MCSubtargetInfo &STI)
MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
unsigned getVmcntBitMask(const IsaVersion &Version)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
bool isCI(const MCSubtargetInfo &STI)
bool getVOP2IsSingle(unsigned Opc)
bool isPermlane16(unsigned Opc)
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
@ Mod
The access may modify the value stored in memory.
To bit_cast(const From &from) noexcept
DWARFExpression::Operation Op
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
@ Default
The result values are uniform if and only if all operands are uniform.
static constexpr ValueType Default
static std::tuple< typename Fields::ValueType... > decode(uint64_t Encoded)
Instruction set architecture version.