50void AMDGPUInstPrinter::printU16ImmOperand(
const MCInst *
MI,
unsigned OpNo,
61 int64_t
Imm =
Op.getImm();
63 O << formatHex(static_cast<uint64_t>(
Imm & 0xffff));
65 printU32ImmOperand(
MI, OpNo, STI, O);
68void AMDGPUInstPrinter::printU16ImmDecOperand(
const MCInst *
MI,
unsigned OpNo,
73void AMDGPUInstPrinter::printU32ImmOperand(
const MCInst *
MI,
unsigned OpNo,
76 const MCOperand &
Op =
MI->getOperand(OpNo);
78 MAI.printExpr(O, *
Op.getExpr());
85void AMDGPUInstPrinter::printFP64ImmOperand(
const MCInst *
MI,
unsigned OpNo,
89 const MCOperand &
Op =
MI->getOperand(OpNo);
91 MAI.printExpr(O, *
Op.getExpr());
95 printLiteral64(
Op.getImm(), O,
true);
98void AMDGPUInstPrinter::printNamedBit(
const MCInst *
MI,
unsigned OpNo,
100 if (
MI->getOperand(OpNo).getImm()) {
105void AMDGPUInstPrinter::printOffset(
const MCInst *
MI,
unsigned OpNo,
108 uint32_t
Imm =
MI->getOperand(OpNo).getImm();
113 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
116 O << formatDec(SignExtend32<24>(
Imm));
118 printU16ImmDecOperand(
MI, OpNo, O);
122void AMDGPUInstPrinter::printFlatOffset(
const MCInst *
MI,
unsigned OpNo,
125 uint32_t
Imm =
MI->getOperand(OpNo).getImm();
129 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
137 printU16ImmDecOperand(
MI, OpNo, O);
141void AMDGPUInstPrinter::printSMRDOffset8(
const MCInst *
MI,
unsigned OpNo,
144 printU32ImmOperand(
MI, OpNo, STI, O);
147void AMDGPUInstPrinter::printSMEMOffset(
const MCInst *
MI,
unsigned OpNo,
153void AMDGPUInstPrinter::printSMRDLiteralOffset(
const MCInst *
MI,
unsigned OpNo,
156 printU32ImmOperand(
MI, OpNo, STI, O);
159void AMDGPUInstPrinter::printCPol(
const MCInst *
MI,
unsigned OpNo,
161 auto Imm =
MI->getOperand(OpNo).getImm();
168 O <<
" scale_offset";
170 printTH(
MI, TH, Scope, O);
171 printScope(Scope, O);
190 O <<
" /* unexpected cache policy bit */";
193void AMDGPUInstPrinter::printTH(
const MCInst *
MI, int64_t TH, int64_t Scope,
199 const unsigned Opcode =
MI->getOpcode();
200 const MCInstrDesc &TID =
MII.get(Opcode);
223 O << (IsStore ?
"TH_STORE_" :
"TH_LOAD_");
233 : (IsStore ?
"WB" :
"LU"));
254void AMDGPUInstPrinter::printScope(int64_t Scope,
raw_ostream &O) {
270void AMDGPUInstPrinter::printDim(
const MCInst *
MI,
unsigned OpNo,
272 unsigned Dim =
MI->getOperand(OpNo).getImm();
273 O <<
" dim:SQ_RSRC_IMG_";
282void AMDGPUInstPrinter::printR128A16(
const MCInst *
MI,
unsigned OpNo,
285 printNamedBit(
MI, OpNo, O,
"a16");
287 printNamedBit(
MI, OpNo, O,
"r128");
290void AMDGPUInstPrinter::printFORMAT(
const MCInst *
MI,
unsigned OpNo,
295void AMDGPUInstPrinter::printSymbolicFormat(
const MCInst *
MI,
298 using namespace llvm::AMDGPU::MTBUFFormat;
301 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::format);
304 unsigned Val =
MI->getOperand(OpNo).getImm();
306 if (Val == UFMT_DEFAULT)
311 O <<
" format:" << Val;
314 if (Val == DFMT_NFMT_DEFAULT)
321 if (Dfmt != DFMT_DEFAULT) {
323 if (Nfmt != NFMT_DEFAULT) {
327 if (Nfmt != NFMT_DEFAULT) {
332 O <<
" format:" << Val;
340 unsigned Enc =
MRI.getEncodingValue(
Reg);
345 unsigned RegNo = Idx % 0x100;
347 if (RC->
getID() == AMDGPU::VGPR_16RegClassID) {
366 unsigned Enc =
MRI.getEncodingValue(
Reg);
373 unsigned Opc =
Desc.getOpcode();
375 for (
I = 0;
I < 4; ++
I) {
376 if (
Ops.first[
I] != AMDGPU::OpName::NUM_OPERAND_NAMES &&
377 (
unsigned)AMDGPU::getNamedOperandIdx(
Opc,
Ops.first[
I]) == OpNo)
379 if (
Ops.second &&
Ops.second[
I] != AMDGPU::OpName::NUM_OPERAND_NAMES &&
380 (
unsigned)AMDGPU::getNamedOperandIdx(
Opc,
Ops.second[
I]) == OpNo)
385 unsigned OpMSBs = (VgprMSBs >> (
I * 2)) & 3;
399 case AMDGPU::PRIVATE_RSRC_REG:
409 if (PrintReg != Reg.id())
422void AMDGPUInstPrinter::printVOPDst(
const MCInst *
MI,
unsigned OpNo,
424 auto Opcode =
MI->getOpcode();
442 printRegularOperand(
MI, OpNo, STI, O);
448 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
449 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
450 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
451 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
452 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
453 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
454 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
455 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
456 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
457 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
458 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
459 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
460 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
461 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
462 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
463 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
464 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
465 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
466 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
467 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
468 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
469 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx12:
470 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx12:
471 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx12:
472 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx12:
473 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx12:
474 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx12:
475 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx12:
476 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx12:
477 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx12:
478 printDefaultVccOperand(
false, STI, O);
483void AMDGPUInstPrinter::printVINTRPDst(
const MCInst *
MI,
unsigned OpNo,
490 printRegularOperand(
MI, OpNo, STI, O);
493void AMDGPUInstPrinter::printImmediateInt16(uint32_t
Imm,
496 int32_t SImm =
static_cast<int32_t
>(
Imm);
502 if (printImmediateFloat32(
Imm, STI, O))
505 O << formatHex(static_cast<uint64_t>(
Imm & 0xffff));
512 else if (
Imm == 0xBC00)
514 else if (
Imm == 0x3800)
516 else if (
Imm == 0xB800)
518 else if (
Imm == 0x4000)
520 else if (
Imm == 0xC000)
522 else if (
Imm == 0x4400)
524 else if (
Imm == 0xC400)
526 else if (
Imm == 0x3118 && STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
538 else if (
Imm == 0xBF80)
540 else if (
Imm == 0x3F00)
542 else if (
Imm == 0xBF00)
544 else if (
Imm == 0x4000)
546 else if (
Imm == 0xC000)
548 else if (
Imm == 0x4080)
550 else if (
Imm == 0xC080)
552 else if (
Imm == 0x3E22 && STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
560void AMDGPUInstPrinter::printImmediateBF16(uint32_t
Imm,
563 int16_t SImm =
static_cast<int16_t
>(
Imm);
572 O << formatHex(static_cast<uint64_t>(
Imm));
575void AMDGPUInstPrinter::printImmediateF16(uint32_t
Imm,
578 int16_t SImm =
static_cast<int16_t
>(
Imm);
584 uint16_t HImm =
static_cast<uint16_t
>(
Imm);
588 uint64_t
Imm16 =
static_cast<uint16_t
>(
Imm);
592void AMDGPUInstPrinter::printImmediateV216(uint32_t
Imm, uint8_t OpType,
595 int32_t SImm =
static_cast<int32_t
>(
Imm);
604 if (printImmediateFloat32(
Imm, STI, O))
625 O << formatHex(static_cast<uint64_t>(
Imm));
628bool AMDGPUInstPrinter::printImmediateFloat32(uint32_t
Imm,
649 else if (
Imm == 0x3e22f983 &&
650 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
658void AMDGPUInstPrinter::printImmediate32(uint32_t
Imm,
661 int32_t SImm =
static_cast<int32_t
>(
Imm);
667 if (printImmediateFloat32(
Imm, STI, O))
670 O << formatHex(static_cast<uint64_t>(
Imm));
673void AMDGPUInstPrinter::printImmediate64(uint64_t
Imm,
676 int64_t SImm =
static_cast<int64_t
>(
Imm);
677 if (SImm >= -16 && SImm <= 64) {
700 else if (
Imm == 0x3fc45f306dc9c882 &&
701 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
702 O <<
"0.15915494309189532";
704 printLiteral64(
Imm, O, IsFP);
707void AMDGPUInstPrinter::printLiteral64(uint64_t
Imm,
raw_ostream &O,
710 O << formatHex(static_cast<uint64_t>(
Hi_32(
Imm)));
715void AMDGPUInstPrinter::printBLGP(
const MCInst *
MI,
unsigned OpNo,
718 unsigned Imm =
MI->getOperand(OpNo).getImm();
723 switch (
MI->getOpcode()) {
724 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_acd:
725 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_vcd:
726 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_acd:
727 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_vcd:
728 O <<
" neg:[" << (
Imm & 1) <<
',' << ((
Imm >> 1) & 1) <<
','
729 << ((
Imm >> 2) & 1) <<
']';
734 O <<
" blgp:" <<
Imm;
737void AMDGPUInstPrinter::printDefaultVccOperand(
bool FirstOperand,
751 unsigned OpNo)
const {
755 (
Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
756 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO));
760void AMDGPUInstPrinter::printOperand(
const MCInst *
MI,
unsigned OpNo,
763 unsigned Opc =
MI->getOpcode();
765 int ModIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
772 (
Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
773 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO)))
774 printDefaultVccOperand(
true, STI, O);
776 printRegularOperand(
MI, OpNo, STI, O);
780void AMDGPUInstPrinter::printRegularOperand(
const MCInst *
MI,
unsigned OpNo,
783 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
785 if (OpNo >=
MI->getNumOperands()) {
786 O <<
"/*Missing OP" << OpNo <<
"*/";
790 const MCOperand &
Op =
MI->getOperand(OpNo);
797 const MCOperandInfo &OpInfo =
Desc.operands()[OpNo];
798 int16_t RCID =
MII.getOpRegClassID(
801 const MCRegisterClass &RC =
MRI.getRegClass(RCID);
804 O <<
"/*Invalid register, operand has \'" <<
MRI.getRegClassName(&RC)
805 <<
"\' register class*/";
808 }
else if (
Op.isImm()) {
809 const uint8_t OpTy =
Desc.operands()[OpNo].OperandType;
821 printImmediate32(
Op.getImm(), STI, O);
825 printImmediate64(
Op.getImm(), STI, O,
false);
830 printImmediate64(
Op.getImm(), STI, O,
true);
834 printImmediateInt16(
Op.getImm(), STI, O);
838 printImmediateF16(
Op.getImm(), STI, O);
842 printImmediateBF16(
Op.getImm(), STI, O);
851 printImmediateV216(
Op.getImm(), OpTy, STI, O);
860 printImmediate32(
Op.getImm(), STI, O);
861 O <<
"/*Invalid immediate*/";
868 }
else if (
Op.isExpr()) {
869 const MCExpr *
Exp =
Op.getExpr();
870 MAI.printExpr(O, *Exp);
876 switch (
MI->getOpcode()) {
879 case AMDGPU::V_CNDMASK_B32_e32_gfx10:
880 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
881 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
882 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
883 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
884 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
885 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
886 case AMDGPU::V_CNDMASK_B32_dpp8_gfx10:
887 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
888 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
889 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
890 case AMDGPU::V_CNDMASK_B32_e32_gfx11:
891 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
892 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
893 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
894 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
895 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
896 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
897 case AMDGPU::V_CNDMASK_B32_dpp8_gfx11:
898 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
899 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
900 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
901 case AMDGPU::V_CNDMASK_B32_e32_gfx12:
902 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx12:
903 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx12:
904 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx12:
905 case AMDGPU::V_CNDMASK_B32_dpp_gfx12:
906 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx12:
907 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx12:
908 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx12:
909 case AMDGPU::V_CNDMASK_B32_dpp8_gfx12:
910 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx12:
911 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx12:
912 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx12:
914 case AMDGPU::V_CNDMASK_B32_e32_gfx6_gfx7:
915 case AMDGPU::V_CNDMASK_B32_e32_vi:
916 if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
MI->getOpcode(),
917 AMDGPU::OpName::src1))
918 printDefaultVccOperand(OpNo == 0, STI, O);
924 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::soffset);
926 if ((
int)OpNo == SOffsetIdx)
927 printSymbolicFormat(
MI, STI, O);
931void AMDGPUInstPrinter::printOperandAndFPInputMods(
const MCInst *
MI,
935 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
936 if (needsImpliedVcc(
Desc, OpNo))
937 printDefaultVccOperand(
true, STI, O);
939 unsigned InputModifiers =
MI->getOperand(OpNo).getImm();
944 bool NegMnemo =
false;
947 if (OpNo + 1 <
MI->getNumOperands() &&
949 const MCOperand &
Op =
MI->getOperand(OpNo + 1);
950 NegMnemo =
Op.isImm();
961 printRegularOperand(
MI, OpNo + 1, STI, O);
970 switch (
MI->getOpcode()) {
974 case AMDGPU::V_CNDMASK_B32_sdwa_gfx10:
975 case AMDGPU::V_CNDMASK_B32_dpp_gfx10:
976 case AMDGPU::V_CNDMASK_B32_dpp_gfx11:
978 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::src1))
979 printDefaultVccOperand(OpNo == 0, STI, O);
984void AMDGPUInstPrinter::printOperandAndIntInputMods(
const MCInst *
MI,
988 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
989 if (needsImpliedVcc(
Desc, OpNo))
990 printDefaultVccOperand(
true, STI, O);
992 unsigned InputModifiers =
MI->getOperand(OpNo).getImm();
995 printRegularOperand(
MI, OpNo + 1, STI, O);
1000 switch (
MI->getOpcode()) {
1003 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
1004 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
1005 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
1006 if ((
int)OpNo + 1 == AMDGPU::getNamedOperandIdx(
MI->getOpcode(),
1007 AMDGPU::OpName::src1))
1008 printDefaultVccOperand(OpNo == 0, STI, O);
1013void AMDGPUInstPrinter::printDPP8(
const MCInst *
MI,
unsigned OpNo,
1019 unsigned Imm =
MI->getOperand(OpNo).getImm();
1021 for (
size_t i = 1; i < 8; ++i) {
1027void AMDGPUInstPrinter::printDPPCtrl(
const MCInst *
MI,
unsigned OpNo,
1030 using namespace AMDGPU::DPP;
1032 unsigned Imm =
MI->getOperand(OpNo).getImm();
1033 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
1037 O <<
" /* DP ALU dpp only supports "
1038 << (
isGFX12(STI) ?
"row_share" :
"row_newbcast") <<
" */";
1041 if (
Imm <= DppCtrl::QUAD_PERM_LAST) {
1047 }
else if ((
Imm >= DppCtrl::ROW_SHL_FIRST) &&
1048 (
Imm <= DppCtrl::ROW_SHL_LAST)) {
1050 }
else if ((
Imm >= DppCtrl::ROW_SHR_FIRST) &&
1051 (
Imm <= DppCtrl::ROW_SHR_LAST)) {
1053 }
else if ((
Imm >= DppCtrl::ROW_ROR_FIRST) &&
1054 (
Imm <= DppCtrl::ROW_ROR_LAST)) {
1056 }
else if (
Imm == DppCtrl::WAVE_SHL1) {
1058 O <<
"/* wave_shl is not supported starting from GFX10 */";
1062 }
else if (
Imm == DppCtrl::WAVE_ROL1) {
1064 O <<
"/* wave_rol is not supported starting from GFX10 */";
1068 }
else if (
Imm == DppCtrl::WAVE_SHR1) {
1070 O <<
"/* wave_shr is not supported starting from GFX10 */";
1074 }
else if (
Imm == DppCtrl::WAVE_ROR1) {
1076 O <<
"/* wave_ror is not supported starting from GFX10 */";
1080 }
else if (
Imm == DppCtrl::ROW_MIRROR) {
1082 }
else if (
Imm == DppCtrl::ROW_HALF_MIRROR) {
1083 O <<
"row_half_mirror";
1084 }
else if (
Imm == DppCtrl::BCAST15) {
1086 O <<
"/* row_bcast is not supported starting from GFX10 */";
1089 O <<
"row_bcast:15";
1090 }
else if (
Imm == DppCtrl::BCAST31) {
1092 O <<
"/* row_bcast is not supported starting from GFX10 */";
1095 O <<
"row_bcast:31";
1096 }
else if ((
Imm >= DppCtrl::ROW_SHARE_FIRST) &&
1097 (
Imm <= DppCtrl::ROW_SHARE_LAST)) {
1099 O <<
"row_newbcast:";
1103 O <<
" /* row_newbcast/row_share is not supported on ASICs earlier "
1104 "than GFX90A/GFX10 */";
1108 }
else if ((
Imm >= DppCtrl::ROW_XMASK_FIRST) &&
1109 (
Imm <= DppCtrl::ROW_XMASK_LAST)) {
1111 O <<
"/* row_xmask is not supported on ASICs earlier than GFX10 */";
1114 O <<
"row_xmask:" <<
formatDec(
Imm - DppCtrl::ROW_XMASK_FIRST);
1116 O <<
"/* Invalid dpp_ctrl value */";
1120void AMDGPUInstPrinter::printDppBoundCtrl(
const MCInst *
MI,
unsigned OpNo,
1123 unsigned Imm =
MI->getOperand(OpNo).getImm();
1125 O <<
" bound_ctrl:1";
1129void AMDGPUInstPrinter::printDppFI(
const MCInst *
MI,
unsigned OpNo,
1131 using namespace llvm::AMDGPU::DPP;
1132 unsigned Imm =
MI->getOperand(OpNo).getImm();
1133 if (
Imm == DPP_FI_1 ||
Imm == DPP8_FI_1) {
1138void AMDGPUInstPrinter::printSDWASel(
const MCInst *
MI,
unsigned OpNo,
1140 using namespace llvm::AMDGPU::SDWA;
1142 unsigned Imm =
MI->getOperand(OpNo).getImm();
1144 case SdwaSel::BYTE_0:
O <<
"BYTE_0";
break;
1145 case SdwaSel::BYTE_1:
O <<
"BYTE_1";
break;
1146 case SdwaSel::BYTE_2:
O <<
"BYTE_2";
break;
1147 case SdwaSel::BYTE_3:
O <<
"BYTE_3";
break;
1148 case SdwaSel::WORD_0:
O <<
"WORD_0";
break;
1149 case SdwaSel::WORD_1:
O <<
"WORD_1";
break;
1150 case SdwaSel::DWORD:
O <<
"DWORD";
break;
1155void AMDGPUInstPrinter::printSDWADstSel(
const MCInst *
MI,
unsigned OpNo,
1159 printSDWASel(
MI, OpNo, O);
1162void AMDGPUInstPrinter::printSDWASrc0Sel(
const MCInst *
MI,
unsigned OpNo,
1166 printSDWASel(
MI, OpNo, O);
1169void AMDGPUInstPrinter::printSDWASrc1Sel(
const MCInst *
MI,
unsigned OpNo,
1173 printSDWASel(
MI, OpNo, O);
1176void AMDGPUInstPrinter::printSDWADstUnused(
const MCInst *
MI,
unsigned OpNo,
1179 using namespace llvm::AMDGPU::SDWA;
1182 unsigned Imm =
MI->getOperand(OpNo).getImm();
1184 case DstUnused::UNUSED_PAD:
O <<
"UNUSED_PAD";
break;
1185 case DstUnused::UNUSED_SEXT:
O <<
"UNUSED_SEXT";
break;
1186 case DstUnused::UNUSED_PRESERVE:
O <<
"UNUSED_PRESERVE";
break;
1191void AMDGPUInstPrinter::printExpSrcN(
const MCInst *
MI,
unsigned OpNo,
1194 unsigned Opc =
MI->getOpcode();
1195 int EnIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::en);
1196 unsigned En =
MI->getOperand(EnIdx).getImm();
1198 int ComprIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::compr);
1201 if (
MI->getOperand(ComprIdx).getImm())
1202 OpNo = OpNo -
N +
N / 2;
1210void AMDGPUInstPrinter::printExpSrc0(
const MCInst *
MI,
unsigned OpNo,
1213 printExpSrcN(
MI, OpNo, STI, O, 0);
1216void AMDGPUInstPrinter::printExpSrc1(
const MCInst *
MI,
unsigned OpNo,
1219 printExpSrcN(
MI, OpNo, STI, O, 1);
1222void AMDGPUInstPrinter::printExpSrc2(
const MCInst *
MI,
unsigned OpNo,
1225 printExpSrcN(
MI, OpNo, STI, O, 2);
1228void AMDGPUInstPrinter::printExpSrc3(
const MCInst *
MI,
unsigned OpNo,
1231 printExpSrcN(
MI, OpNo, STI, O, 3);
1234void AMDGPUInstPrinter::printExpTgt(
const MCInst *
MI,
unsigned OpNo,
1237 using namespace llvm::AMDGPU::Exp;
1240 unsigned Id =
MI->getOperand(OpNo).getImm() & ((1 << 6) - 1);
1245 O <<
' ' << TgtName;
1249 O <<
" invalid_target_" <<
Id;
1254 bool IsPacked,
bool HasDstSel) {
1258 if (!!(
Ops[
I] &
Mod) != DefaultValue)
1268void AMDGPUInstPrinter::printPackedModifier(
const MCInst *
MI,
1272 unsigned Opc =
MI->getOpcode();
1276 std::pair<AMDGPU::OpName, AMDGPU::OpName> MOps[] = {
1277 {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src0},
1278 {AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src1},
1279 {AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::src2}};
1282 for (
auto [SrcMod, Src] : MOps) {
1286 int ModIdx = AMDGPU::getNamedOperandIdx(
Opc, SrcMod);
1288 (ModIdx != -1) ?
MI->getOperand(ModIdx).getImm() : DefaultValue;
1297 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2_modifiers);
1303 (AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst) != -1) ||
1304 (AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::sdst) != -1);
1312 for (AMDGPU::OpName OpName :
1313 {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
1314 AMDGPU::OpName::src2_modifiers}) {
1315 int Idx = AMDGPU::getNamedOperandIdx(
Opc, OpName);
1323 const bool HasDstSel =
1348void AMDGPUInstPrinter::printOpSel(
const MCInst *
MI,
unsigned,
1351 unsigned Opc =
MI->getOpcode();
1354 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
1355 unsigned Mod =
MI->getOperand(SrcMod).getImm();
1358 if (Index0 || Index1)
1359 O <<
" op_sel:[" << Index0 <<
',' << Index1 <<
']';
1363 auto FIN = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
1364 auto BCN = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1_modifiers);
1368 O <<
" op_sel:[" << FI <<
',' << BC <<
']';
1375void AMDGPUInstPrinter::printOpSelHi(
const MCInst *
MI,
unsigned OpNo,
1381void AMDGPUInstPrinter::printNegLo(
const MCInst *
MI,
unsigned OpNo,
1387void AMDGPUInstPrinter::printNegHi(
const MCInst *
MI,
unsigned OpNo,
1393void AMDGPUInstPrinter::printIndexKey8bit(
const MCInst *
MI,
unsigned OpNo,
1396 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1400 O <<
" index_key:" <<
Imm;
1403void AMDGPUInstPrinter::printIndexKey16bit(
const MCInst *
MI,
unsigned OpNo,
1406 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1410 O <<
" index_key:" <<
Imm;
1413void AMDGPUInstPrinter::printIndexKey32bit(
const MCInst *
MI,
unsigned OpNo,
1416 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1420 O <<
" index_key:" <<
Imm;
1423void AMDGPUInstPrinter::printMatrixFMT(
const MCInst *
MI,
unsigned OpNo,
1426 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1430 O <<
" matrix_" << AorB <<
"_fmt:";
1435 case WMMA::MatrixFMT::MATRIX_FMT_FP8:
1436 O <<
"MATRIX_FMT_FP8";
1438 case WMMA::MatrixFMT::MATRIX_FMT_BF8:
1439 O <<
"MATRIX_FMT_BF8";
1441 case WMMA::MatrixFMT::MATRIX_FMT_FP6:
1442 O <<
"MATRIX_FMT_FP6";
1444 case WMMA::MatrixFMT::MATRIX_FMT_BF6:
1445 O <<
"MATRIX_FMT_BF6";
1447 case WMMA::MatrixFMT::MATRIX_FMT_FP4:
1448 O <<
"MATRIX_FMT_FP4";
1453void AMDGPUInstPrinter::printMatrixAFMT(
const MCInst *
MI,
unsigned OpNo,
1456 printMatrixFMT(
MI, OpNo, STI, O,
'a');
1459void AMDGPUInstPrinter::printMatrixBFMT(
const MCInst *
MI,
unsigned OpNo,
1462 printMatrixFMT(
MI, OpNo, STI, O,
'b');
1465void AMDGPUInstPrinter::printMatrixScale(
const MCInst *
MI,
unsigned OpNo,
1468 auto Imm =
MI->getOperand(OpNo).getImm() & 1;
1472 O <<
" matrix_" << AorB <<
"_scale:";
1477 case WMMA::MatrixScale::MATRIX_SCALE_ROW0:
1478 O <<
"MATRIX_SCALE_ROW0";
1480 case WMMA::MatrixScale::MATRIX_SCALE_ROW1:
1481 O <<
"MATRIX_SCALE_ROW1";
1486void AMDGPUInstPrinter::printMatrixAScale(
const MCInst *
MI,
unsigned OpNo,
1489 printMatrixScale(
MI, OpNo, STI, O,
'a');
1492void AMDGPUInstPrinter::printMatrixBScale(
const MCInst *
MI,
unsigned OpNo,
1495 printMatrixScale(
MI, OpNo, STI, O,
'b');
1498void AMDGPUInstPrinter::printMatrixScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1501 auto Imm =
MI->getOperand(OpNo).getImm() & 3;
1505 O <<
" matrix_" << AorB <<
"_scale_fmt:";
1510 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E8:
1511 O <<
"MATRIX_SCALE_FMT_E8";
1513 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E5M3:
1514 O <<
"MATRIX_SCALE_FMT_E5M3";
1516 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E4M3:
1517 O <<
"MATRIX_SCALE_FMT_E4M3";
1522void AMDGPUInstPrinter::printMatrixAScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1525 printMatrixScaleFmt(
MI, OpNo, STI, O,
'a');
1528void AMDGPUInstPrinter::printMatrixBScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1531 printMatrixScaleFmt(
MI, OpNo, STI, O,
'b');
1534void AMDGPUInstPrinter::printInterpSlot(
const MCInst *
MI,
unsigned OpNum,
1537 unsigned Imm =
MI->getOperand(OpNum).getImm();
1549 O <<
"invalid_param_" <<
Imm;
1553void AMDGPUInstPrinter::printInterpAttr(
const MCInst *
MI,
unsigned OpNum,
1556 unsigned Attr =
MI->getOperand(OpNum).getImm();
1557 O <<
"attr" << Attr;
1560void AMDGPUInstPrinter::printInterpAttrChan(
const MCInst *
MI,
unsigned OpNum,
1563 unsigned Chan =
MI->getOperand(OpNum).getImm();
1564 O <<
'.' <<
"xyzw"[Chan & 0x3];
1567void AMDGPUInstPrinter::printGPRIdxMode(
const MCInst *
MI,
unsigned OpNo,
1570 using namespace llvm::AMDGPU::VGPRIndexMode;
1571 unsigned Val =
MI->getOperand(OpNo).getImm();
1573 if ((Val & ~ENABLE_MASK) != 0) {
1574 O << formatHex(static_cast<uint64_t>(Val));
1577 bool NeedComma =
false;
1578 for (
unsigned ModeId = ID_MIN; ModeId <=
ID_MAX; ++ModeId) {
1579 if (Val & (1 << ModeId)) {
1590void AMDGPUInstPrinter::printMemOperand(
const MCInst *
MI,
unsigned OpNo,
1593 printRegularOperand(
MI, OpNo, STI, O);
1595 printRegularOperand(
MI, OpNo + 1, STI, O);
1603 if (
Op.getImm() == 1) {
1614 if (
Op.getImm() == 1)
1621 int Imm =
MI->getOperand(OpNo).getImm();
1635 const unsigned Imm16 =
MI->getOperand(OpNo).getImm();
1646 O <<
"sendmsg(" << MsgName;
1655 O <<
"sendmsg(" << MsgId <<
", " << OpId <<
", " <<
StreamId <<
')';
1667 uint16_t Probe0 = ((0 & AndMask) | OrMask) ^ XorMask;
1672 for (
unsigned Mask = 1 << (
BITMASK_WIDTH - 1); Mask > 0; Mask >>= 1) {
1742 }
else if (AndMask ==
BITMASK_MAX && OrMask == 0 && XorMask > 0 &&
1753 if (GroupSize > 1 &&
1755 OrMask < GroupSize &&
1773 printU16ImmDecOperand(
MI, OpNo, O);
1782 unsigned SImm16 =
MI->getOperand(OpNo).getImm();
1783 unsigned Vmcnt, Expcnt, Lgkmcnt;
1789 bool PrintAll = IsDefaultVmcnt && IsDefaultExpcnt && IsDefaultLgkmcnt;
1791 bool NeedSpace =
false;
1793 if (!IsDefaultVmcnt || PrintAll) {
1794 O <<
"vmcnt(" << Vmcnt <<
')';
1798 if (!IsDefaultExpcnt || PrintAll) {
1801 O <<
"expcnt(" << Expcnt <<
')';
1805 if (!IsDefaultLgkmcnt || PrintAll) {
1808 O <<
"lgkmcnt(" << Lgkmcnt <<
')';
1817 uint64_t Imm16 =
MI->getOperand(OpNo).getImm() & 0xffff;
1819 bool HasNonDefaultVal =
false;
1825 bool NeedSpace =
false;
1827 if (!IsDefault || !HasNonDefaultVal) {
1830 O << Name <<
'(' << Val <<
')';
1842 const char *BadInstId =
"/* invalid instid value */";
1843 static const std::array<const char *, 12> InstIds = {
1844 "NO_DEP",
"VALU_DEP_1",
"VALU_DEP_2",
1845 "VALU_DEP_3",
"VALU_DEP_4",
"TRANS32_DEP_1",
1846 "TRANS32_DEP_2",
"TRANS32_DEP_3",
"FMA_ACCUM_CYCLE_1",
1847 "SALU_CYCLE_1",
"SALU_CYCLE_2",
"SALU_CYCLE_3"};
1849 const char *BadInstSkip =
"/* invalid instskip value */";
1850 static const std::array<const char *, 6> InstSkips = {
1851 "SAME",
"NEXT",
"SKIP_1",
"SKIP_2",
"SKIP_3",
"SKIP_4"};
1853 unsigned SImm16 =
MI->getOperand(OpNo).getImm();
1854 const char *Prefix =
"";
1856 unsigned Value = SImm16 & 0xF;
1858 const char *Name =
Value < InstIds.size() ? InstIds[
Value] : BadInstId;
1859 O << Prefix <<
"instid0(" << Name <<
')';
1863 Value = (SImm16 >> 4) & 7;
1866 Value < InstSkips.size() ? InstSkips[
Value] : BadInstSkip;
1867 O << Prefix <<
"instskip(" << Name <<
')';
1871 Value = (SImm16 >> 7) & 0xF;
1873 const char *Name =
Value < InstIds.size() ? InstIds[
Value] : BadInstId;
1874 O << Prefix <<
"instid1(" << Name <<
')';
1885 unsigned Val =
MI->getOperand(OpNo).getImm();
1890 if (!HwRegName.
empty()) {
1896 O <<
", " <<
Offset <<
", " << Width;
1911void AMDGPUInstPrinter::printNamedInt(
const MCInst *
MI,
unsigned OpNo,
1914 bool PrintInHex,
bool AlwaysPrint) {
1915 int64_t V =
MI->getOperand(OpNo).getImm();
1916 if (AlwaysPrint || V != 0)
1920void AMDGPUInstPrinter::printBitOp3(
const MCInst *
MI,
unsigned OpNo,
1931 O << formatHex(static_cast<uint64_t>(
Imm));
1934void AMDGPUInstPrinter::printScaleSel(
const MCInst *
MI,
unsigned OpNo,
1937 uint8_t
Imm =
MI->getOperand(OpNo).getImm();
1944#include "AMDGPUGenAsmWriter.inc"
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static void printSwizzleBitmask(const uint16_t AndMask, const uint16_t OrMask, const uint16_t XorMask, raw_ostream &O)
static bool printImmediateBFloat16(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O)
static bool allOpsDefaultValue(const int *Ops, int NumOps, int Mod, bool IsPacked, bool HasDstSel)
static MCPhysReg getRegFromMIA(MCPhysReg Reg, unsigned OpNo, const MCInstrDesc &Desc, const MCRegisterInfo &MRI, const AMDGPUMCInstrAnalysis &MIA)
static MCPhysReg getRegForPrinting(MCPhysReg Reg, const MCRegisterInfo &MRI)
static bool printImmediateFP16(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O)
Provides AMDGPU specific target descriptions.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
void printSwizzle(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printEndpgm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static const char * getRegisterName(MCRegister Reg)
static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, StringRef Asm, StringRef Default="")
void printDepCtr(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printHwreg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSendMsg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static void printRegOperand(MCRegister Reg, raw_ostream &O, const MCRegisterInfo &MRI)
void printRegName(raw_ostream &OS, MCRegister Reg) override
Print the assembler register name.
void printInst(const MCInst *MI, uint64_t Address, StringRef Annot, const MCSubtargetInfo &STI, raw_ostream &O) override
Print the specified MCInst to the specified raw_ostream.
void printInstruction(const MCInst *MI, uint64_t Address, const MCSubtargetInfo &STI, raw_ostream &O)
void printSWaitCnt(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printOModSI(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSDelayALU(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
unsigned getVgprMSBs() const
void printExpr(raw_ostream &, const MCExpr &) const
format_object< int64_t > formatHex(int64_t Value) const
format_object< int64_t > formatDec(int64_t Value) const
Utility functions to print decimal/hexadecimal values.
const MCRegisterInfo & MRI
void printAnnotation(raw_ostream &OS, StringRef Annot)
Utility function for printing annotations.
const MCInstrAnalysis * MIA
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Instances of this class represent operands of the MCInst class.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
virtual unsigned getHwMode(enum HwModeType type=HwMode_Default) const
HwMode ID corresponding to the 'type' parameter is retrieved from the HwMode bit set of the current s...
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
LLVM Value Representation.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
StringRef getHwreg(uint64_t Encoding, const MCSubtargetInfo &STI)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
StringRef getMsgName(uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a msg_id immediate.
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
const char *const IdSymbolic[]
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isVOPCAsmOnly(unsigned Opc)
unsigned getTemporalHintType(const MCInstrDesc TID)
const MCRegisterClass * getVGPRPhysRegClass(MCPhysReg Reg, const MCRegisterInfo &MRI)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool isGFX12Plus(const MCSubtargetInfo &STI)
bool isGFX940(const MCSubtargetInfo &STI)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
LLVM_READNONE bool isLegalDPALU_DPPControl(const MCSubtargetInfo &ST, unsigned DC)
bool isSI(const MCSubtargetInfo &STI)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getVmcntBitMask(const IsaVersion &Version)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool isInlineValue(unsigned Reg)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool isGFX9Plus(const MCSubtargetInfo &STI)
MCPhysReg getVGPRWithMSBs(MCPhysReg Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
bool isCI(const MCSubtargetInfo &STI)
bool getVOP2IsSingle(unsigned Opc)
bool isPermlane16(unsigned Opc)
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
@ Mod
The access may modify the value stored in memory.
To bit_cast(const From &from) noexcept
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
@ Default
The result values are uniform if and only if all operands are uniform.
int popcount(T Value) noexcept
Count the number of set bits in a value.
static constexpr ValueType Default
static std::tuple< typename Fields::ValueType... > decode(uint64_t Encoded)
Instruction set architecture version.