51void AMDGPUInstPrinter::printU16ImmOperand(
const MCInst *
MI,
unsigned OpNo,
62 int64_t
Imm =
Op.getImm();
64 O << formatHex(static_cast<uint64_t>(
Imm & 0xffff));
66 printU32ImmOperand(
MI, OpNo, STI, O);
69void AMDGPUInstPrinter::printU16ImmDecOperand(
const MCInst *
MI,
unsigned OpNo,
74void AMDGPUInstPrinter::printU32ImmOperand(
const MCInst *
MI,
unsigned OpNo,
77 const MCOperand &
Op =
MI->getOperand(OpNo);
79 MAI.printExpr(O, *
Op.getExpr());
86void AMDGPUInstPrinter::printFP64ImmOperand(
const MCInst *
MI,
unsigned OpNo,
90 const MCOperand &
Op =
MI->getOperand(OpNo);
92 MAI.printExpr(O, *
Op.getExpr());
96 printLiteral64(
Op.getImm(), O,
true);
99void AMDGPUInstPrinter::printNamedBit(
const MCInst *
MI,
unsigned OpNo,
101 if (
MI->getOperand(OpNo).getImm()) {
106void AMDGPUInstPrinter::printOffset(
const MCInst *
MI,
unsigned OpNo,
109 uint32_t
Imm =
MI->getOperand(OpNo).getImm();
114 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
117 O << formatDec(SignExtend32<24>(
Imm));
119 printU16ImmDecOperand(
MI, OpNo, O);
123void AMDGPUInstPrinter::printFlatOffset(
const MCInst *
MI,
unsigned OpNo,
126 uint32_t
Imm =
MI->getOperand(OpNo).getImm();
130 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
138 printU16ImmDecOperand(
MI, OpNo, O);
142void AMDGPUInstPrinter::printSMRDOffset8(
const MCInst *
MI,
unsigned OpNo,
145 printU32ImmOperand(
MI, OpNo, STI, O);
148void AMDGPUInstPrinter::printSMEMOffset(
const MCInst *
MI,
unsigned OpNo,
154void AMDGPUInstPrinter::printSMRDLiteralOffset(
const MCInst *
MI,
unsigned OpNo,
157 printU32ImmOperand(
MI, OpNo, STI, O);
160void AMDGPUInstPrinter::printCPol(
const MCInst *
MI,
unsigned OpNo,
162 auto Imm =
MI->getOperand(OpNo).getImm();
169 O <<
" scale_offset";
171 printTH(
MI, TH, Scope, O);
172 printScope(Scope, O);
191 O <<
" /* unexpected cache policy bit */";
194void AMDGPUInstPrinter::printTH(
const MCInst *
MI, int64_t TH, int64_t Scope,
200 const unsigned Opcode =
MI->getOpcode();
201 const MCInstrDesc &TID =
MII.get(Opcode);
224 O << (IsStore ?
"TH_STORE_" :
"TH_LOAD_");
234 : (IsStore ?
"WB" :
"LU"));
255void AMDGPUInstPrinter::printScope(int64_t Scope,
raw_ostream &O) {
271void AMDGPUInstPrinter::printDim(
const MCInst *
MI,
unsigned OpNo,
273 unsigned Dim =
MI->getOperand(OpNo).getImm();
274 O <<
" dim:SQ_RSRC_IMG_";
283void AMDGPUInstPrinter::printR128A16(
const MCInst *
MI,
unsigned OpNo,
286 printNamedBit(
MI, OpNo, O,
"a16");
288 printNamedBit(
MI, OpNo, O,
"r128");
291void AMDGPUInstPrinter::printFORMAT(
const MCInst *
MI,
unsigned OpNo,
296void AMDGPUInstPrinter::printSymbolicFormat(
const MCInst *
MI,
299 using namespace llvm::AMDGPU::MTBUFFormat;
302 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::format);
305 unsigned Val =
MI->getOperand(OpNo).getImm();
307 if (Val == UFMT_DEFAULT)
312 O <<
" format:" << Val;
315 if (Val == DFMT_NFMT_DEFAULT)
322 if (Dfmt != DFMT_DEFAULT) {
324 if (Nfmt != NFMT_DEFAULT) {
328 if (Nfmt != NFMT_DEFAULT) {
333 O <<
" format:" << Val;
341 unsigned Enc =
MRI.getEncodingValue(
Reg);
346 unsigned RegNo = Idx % 0x100;
348 if (RC->
getID() == AMDGPU::VGPR_16RegClassID) {
367 unsigned Enc =
MRI.getEncodingValue(
Reg);
374 unsigned Opc =
Desc.getOpcode();
376 for (
I = 0;
I < 4; ++
I) {
377 if (
Ops.first[
I] != AMDGPU::OpName::NUM_OPERAND_NAMES &&
378 (
unsigned)AMDGPU::getNamedOperandIdx(
Opc,
Ops.first[
I]) == OpNo)
380 if (
Ops.second &&
Ops.second[
I] != AMDGPU::OpName::NUM_OPERAND_NAMES &&
381 (
unsigned)AMDGPU::getNamedOperandIdx(
Opc,
Ops.second[
I]) == OpNo)
386 unsigned OpMSBs = (VgprMSBs >> (
I * 2)) & 3;
400 case AMDGPU::PRIVATE_RSRC_REG:
423void AMDGPUInstPrinter::printVOPDst(
const MCInst *
MI,
unsigned OpNo,
425 auto Opcode =
MI->getOpcode();
443 printRegularOperand(
MI, OpNo, STI, O);
449 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
450 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
451 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
452 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
453 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
454 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
455 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
456 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
457 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
458 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
459 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
460 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
461 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
462 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
463 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
464 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
465 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
466 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
467 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
468 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
469 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
470 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx12:
471 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx12:
472 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx12:
473 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx12:
474 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx12:
475 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx12:
476 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx12:
477 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx12:
478 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx12:
479 printDefaultVccOperand(
false, STI, O);
484void AMDGPUInstPrinter::printVINTRPDst(
const MCInst *
MI,
unsigned OpNo,
491 printRegularOperand(
MI, OpNo, STI, O);
494void AMDGPUInstPrinter::printAVLdSt32Align2RegOp(
const MCInst *
MI,
498 MCRegister
Reg =
MI->getOperand(OpNo).getReg();
501 if (MCRegister
SubReg =
MRI.getSubReg(
Reg, AMDGPU::sub0))
506void AMDGPUInstPrinter::printImmediateInt16(uint32_t
Imm,
509 int32_t SImm =
static_cast<int32_t
>(
Imm);
515 if (printImmediateFloat32(
Imm, STI, O))
518 O << formatHex(static_cast<uint64_t>(
Imm & 0xffff));
525 else if (
Imm == 0xBC00)
527 else if (
Imm == 0x3800)
529 else if (
Imm == 0xB800)
531 else if (
Imm == 0x4000)
533 else if (
Imm == 0xC000)
535 else if (
Imm == 0x4400)
537 else if (
Imm == 0xC400)
539 else if (
Imm == 0x3118 && STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
551 else if (
Imm == 0xBF80)
553 else if (
Imm == 0x3F00)
555 else if (
Imm == 0xBF00)
557 else if (
Imm == 0x4000)
559 else if (
Imm == 0xC000)
561 else if (
Imm == 0x4080)
563 else if (
Imm == 0xC080)
565 else if (
Imm == 0x3E22 && STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
573void AMDGPUInstPrinter::printImmediateBF16(uint32_t
Imm,
576 int16_t SImm =
static_cast<int16_t
>(
Imm);
585 O << formatHex(static_cast<uint64_t>(
Imm));
588void AMDGPUInstPrinter::printImmediateF16(uint32_t
Imm,
591 int16_t SImm =
static_cast<int16_t
>(
Imm);
597 uint16_t HImm =
static_cast<uint16_t
>(
Imm);
601 uint64_t
Imm16 =
static_cast<uint16_t
>(
Imm);
605void AMDGPUInstPrinter::printImmediateV216(uint32_t
Imm, uint8_t OpType,
608 int32_t SImm =
static_cast<int32_t
>(
Imm);
617 if (printImmediateFloat32(
Imm, STI, O))
631 uint16_t Lo16 =
static_cast<uint16_t
>(
Imm & 0xFFFF);
632 uint16_t Hi16 =
static_cast<uint16_t
>((
Imm >> 16) & 0xFFFF);
657 O << formatHex(static_cast<uint64_t>(
Imm));
660bool AMDGPUInstPrinter::printImmediateFloat32(uint32_t
Imm,
681 else if (
Imm == 0x3e22f983 &&
682 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
690void AMDGPUInstPrinter::printImmediate32(uint32_t
Imm,
693 int32_t SImm =
static_cast<int32_t
>(
Imm);
699 if (printImmediateFloat32(
Imm, STI, O))
702 O << formatHex(static_cast<uint64_t>(
Imm));
705void AMDGPUInstPrinter::printImmediate64(uint64_t
Imm,
708 int64_t SImm =
static_cast<int64_t
>(
Imm);
709 if (SImm >= -16 && SImm <= 64) {
732 else if (
Imm == 0x3fc45f306dc9c882 &&
733 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
734 O <<
"0.15915494309189532";
736 printLiteral64(
Imm, O, IsFP);
739void AMDGPUInstPrinter::printLiteral64(uint64_t
Imm,
raw_ostream &O,
742 O << formatHex(static_cast<uint64_t>(
Hi_32(
Imm)));
747void AMDGPUInstPrinter::printBLGP(
const MCInst *
MI,
unsigned OpNo,
750 unsigned Imm =
MI->getOperand(OpNo).getImm();
755 switch (
MI->getOpcode()) {
756 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_acd:
757 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_vcd:
758 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_acd:
759 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_vcd:
760 O <<
" neg:[" << (
Imm & 1) <<
',' << ((
Imm >> 1) & 1) <<
','
761 << ((
Imm >> 2) & 1) <<
']';
766 O <<
" blgp:" <<
Imm;
769void AMDGPUInstPrinter::printDefaultVccOperand(
bool FirstOperand,
783 unsigned OpNo)
const {
787 (
Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
788 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO));
792void AMDGPUInstPrinter::printOperand(
const MCInst *
MI,
unsigned OpNo,
795 unsigned Opc =
MI->getOpcode();
797 int ModIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
804 (
Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
805 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO)))
806 printDefaultVccOperand(
true, STI, O);
808 printRegularOperand(
MI, OpNo, STI, O);
812void AMDGPUInstPrinter::printRegularOperand(
const MCInst *
MI,
unsigned OpNo,
815 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
817 if (OpNo >=
MI->getNumOperands()) {
818 O <<
"/*Missing OP" << OpNo <<
"*/";
822 const MCOperand &
Op =
MI->getOperand(OpNo);
829 const MCOperandInfo &OpInfo =
Desc.operands()[OpNo];
831 int16_t RCID =
MII.getOpRegClassID(
833 const MCRegisterClass &RC =
MRI.getRegClass(RCID);
837 (OpInfo.
RegClass == AMDGPU::SReg_1 ||
838 OpInfo.
RegClass == AMDGPU::SReg_1_XEXEC);
845 O <<
"/*Invalid register, operand has \'" <<
MRI.getRegClassName(&RC)
846 <<
"\' register class*/";
850 }
else if (
Op.isImm()) {
851 const uint8_t OpTy =
Desc.operands()[OpNo].OperandType;
863 printImmediate32(
Op.getImm(), STI, O);
867 printImmediate64(
Op.getImm(), STI, O,
false);
872 printImmediate64(
Op.getImm(), STI, O,
true);
876 printImmediateInt16(
Op.getImm(), STI, O);
880 printImmediateF16(
Op.getImm(), STI, O);
884 printImmediateBF16(
Op.getImm(), STI, O);
894 printImmediateV216(
Op.getImm(), OpTy, STI, O);
903 printImmediate32(
Op.getImm(), STI, O);
904 O <<
"/*Invalid immediate*/";
911 }
else if (
Op.isExpr()) {
912 const MCExpr *
Exp =
Op.getExpr();
913 MAI.printExpr(O, *Exp);
919 switch (
MI->getOpcode()) {
922 case AMDGPU::V_CNDMASK_B32_e32_gfx10:
923 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
924 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
925 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
926 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
927 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
928 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
929 case AMDGPU::V_CNDMASK_B32_dpp8_gfx10:
930 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
931 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
932 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
933 case AMDGPU::V_CNDMASK_B32_e32_gfx11:
934 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
935 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
936 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
937 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
938 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
939 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
940 case AMDGPU::V_CNDMASK_B32_dpp8_gfx11:
941 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
942 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
943 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
944 case AMDGPU::V_CNDMASK_B32_e32_gfx12:
945 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx12:
946 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx12:
947 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx12:
948 case AMDGPU::V_CNDMASK_B32_dpp_gfx12:
949 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx12:
950 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx12:
951 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx12:
952 case AMDGPU::V_CNDMASK_B32_dpp8_gfx12:
953 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx12:
954 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx12:
955 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx12:
957 case AMDGPU::V_CNDMASK_B32_e32_gfx6_gfx7:
958 case AMDGPU::V_CNDMASK_B32_e32_vi:
959 if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
MI->getOpcode(),
960 AMDGPU::OpName::src1))
961 printDefaultVccOperand(OpNo == 0, STI, O);
967 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::soffset);
969 if ((
int)OpNo == SOffsetIdx)
970 printSymbolicFormat(
MI, STI, O);
974void AMDGPUInstPrinter::printOperandAndFPInputMods(
const MCInst *
MI,
978 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
979 if (needsImpliedVcc(
Desc, OpNo))
980 printDefaultVccOperand(
true, STI, O);
982 unsigned InputModifiers =
MI->getOperand(OpNo).getImm();
987 bool NegMnemo =
false;
990 if (OpNo + 1 <
MI->getNumOperands() &&
992 const MCOperand &
Op =
MI->getOperand(OpNo + 1);
993 NegMnemo =
Op.isImm();
1004 printRegularOperand(
MI, OpNo + 1, STI, O);
1013 switch (
MI->getOpcode()) {
1017 case AMDGPU::V_CNDMASK_B32_sdwa_gfx10:
1018 case AMDGPU::V_CNDMASK_B32_dpp_gfx10:
1019 case AMDGPU::V_CNDMASK_B32_dpp_gfx11:
1020 if ((
int)OpNo + 1 ==
1021 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::src1))
1022 printDefaultVccOperand(OpNo == 0, STI, O);
1027void AMDGPUInstPrinter::printOperandAndIntInputMods(
const MCInst *
MI,
1031 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
1032 if (needsImpliedVcc(
Desc, OpNo))
1033 printDefaultVccOperand(
true, STI, O);
1035 unsigned InputModifiers =
MI->getOperand(OpNo).getImm();
1038 printRegularOperand(
MI, OpNo + 1, STI, O);
1043 switch (
MI->getOpcode()) {
1046 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
1047 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
1048 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
1049 if ((
int)OpNo + 1 == AMDGPU::getNamedOperandIdx(
MI->getOpcode(),
1050 AMDGPU::OpName::src1))
1051 printDefaultVccOperand(OpNo == 0, STI, O);
1056void AMDGPUInstPrinter::printDPP8(
const MCInst *
MI,
unsigned OpNo,
1062 unsigned Imm =
MI->getOperand(OpNo).getImm();
1064 for (
size_t i = 1; i < 8; ++i) {
1070void AMDGPUInstPrinter::printDPPCtrl(
const MCInst *
MI,
unsigned OpNo,
1073 using namespace AMDGPU::DPP;
1075 unsigned Imm =
MI->getOperand(OpNo).getImm();
1076 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
1080 O <<
" /* DP ALU dpp only supports "
1081 << (
isGFX12(STI) ?
"row_share" :
"row_newbcast") <<
" */";
1084 if (
Imm <= DppCtrl::QUAD_PERM_LAST) {
1090 }
else if ((
Imm >= DppCtrl::ROW_SHL_FIRST) &&
1091 (
Imm <= DppCtrl::ROW_SHL_LAST)) {
1093 }
else if ((
Imm >= DppCtrl::ROW_SHR_FIRST) &&
1094 (
Imm <= DppCtrl::ROW_SHR_LAST)) {
1096 }
else if ((
Imm >= DppCtrl::ROW_ROR_FIRST) &&
1097 (
Imm <= DppCtrl::ROW_ROR_LAST)) {
1099 }
else if (
Imm == DppCtrl::WAVE_SHL1) {
1101 O <<
"/* wave_shl is not supported starting from GFX10 */";
1105 }
else if (
Imm == DppCtrl::WAVE_ROL1) {
1107 O <<
"/* wave_rol is not supported starting from GFX10 */";
1111 }
else if (
Imm == DppCtrl::WAVE_SHR1) {
1113 O <<
"/* wave_shr is not supported starting from GFX10 */";
1117 }
else if (
Imm == DppCtrl::WAVE_ROR1) {
1119 O <<
"/* wave_ror is not supported starting from GFX10 */";
1123 }
else if (
Imm == DppCtrl::ROW_MIRROR) {
1125 }
else if (
Imm == DppCtrl::ROW_HALF_MIRROR) {
1126 O <<
"row_half_mirror";
1127 }
else if (
Imm == DppCtrl::BCAST15) {
1129 O <<
"/* row_bcast is not supported starting from GFX10 */";
1132 O <<
"row_bcast:15";
1133 }
else if (
Imm == DppCtrl::BCAST31) {
1135 O <<
"/* row_bcast is not supported starting from GFX10 */";
1138 O <<
"row_bcast:31";
1139 }
else if ((
Imm >= DppCtrl::ROW_SHARE_FIRST) &&
1140 (
Imm <= DppCtrl::ROW_SHARE_LAST)) {
1142 O <<
"row_newbcast:";
1146 O <<
" /* row_newbcast/row_share is not supported on ASICs earlier "
1147 "than GFX90A/GFX10 */";
1151 }
else if ((
Imm >= DppCtrl::ROW_XMASK_FIRST) &&
1152 (
Imm <= DppCtrl::ROW_XMASK_LAST)) {
1154 O <<
"/* row_xmask is not supported on ASICs earlier than GFX10 */";
1157 O <<
"row_xmask:" <<
formatDec(
Imm - DppCtrl::ROW_XMASK_FIRST);
1159 O <<
"/* Invalid dpp_ctrl value */";
1163void AMDGPUInstPrinter::printDppBoundCtrl(
const MCInst *
MI,
unsigned OpNo,
1166 unsigned Imm =
MI->getOperand(OpNo).getImm();
1168 O <<
" bound_ctrl:1";
1172void AMDGPUInstPrinter::printDppFI(
const MCInst *
MI,
unsigned OpNo,
1174 using namespace llvm::AMDGPU::DPP;
1175 unsigned Imm =
MI->getOperand(OpNo).getImm();
1176 if (
Imm == DPP_FI_1 ||
Imm == DPP8_FI_1) {
1181void AMDGPUInstPrinter::printSDWASel(
const MCInst *
MI,
unsigned OpNo,
1183 using namespace llvm::AMDGPU::SDWA;
1185 unsigned Imm =
MI->getOperand(OpNo).getImm();
1187 case SdwaSel::BYTE_0:
O <<
"BYTE_0";
break;
1188 case SdwaSel::BYTE_1:
O <<
"BYTE_1";
break;
1189 case SdwaSel::BYTE_2:
O <<
"BYTE_2";
break;
1190 case SdwaSel::BYTE_3:
O <<
"BYTE_3";
break;
1191 case SdwaSel::WORD_0:
O <<
"WORD_0";
break;
1192 case SdwaSel::WORD_1:
O <<
"WORD_1";
break;
1193 case SdwaSel::DWORD:
O <<
"DWORD";
break;
1198void AMDGPUInstPrinter::printSDWADstSel(
const MCInst *
MI,
unsigned OpNo,
1202 printSDWASel(
MI, OpNo, O);
1205void AMDGPUInstPrinter::printSDWASrc0Sel(
const MCInst *
MI,
unsigned OpNo,
1209 printSDWASel(
MI, OpNo, O);
1212void AMDGPUInstPrinter::printSDWASrc1Sel(
const MCInst *
MI,
unsigned OpNo,
1216 printSDWASel(
MI, OpNo, O);
1219void AMDGPUInstPrinter::printSDWADstUnused(
const MCInst *
MI,
unsigned OpNo,
1222 using namespace llvm::AMDGPU::SDWA;
1225 unsigned Imm =
MI->getOperand(OpNo).getImm();
1227 case DstUnused::UNUSED_PAD:
O <<
"UNUSED_PAD";
break;
1228 case DstUnused::UNUSED_SEXT:
O <<
"UNUSED_SEXT";
break;
1229 case DstUnused::UNUSED_PRESERVE:
O <<
"UNUSED_PRESERVE";
break;
1234void AMDGPUInstPrinter::printExpSrcN(
const MCInst *
MI,
unsigned OpNo,
1237 unsigned Opc =
MI->getOpcode();
1238 int EnIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::en);
1239 unsigned En =
MI->getOperand(EnIdx).getImm();
1241 int ComprIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::compr);
1244 if (
MI->getOperand(ComprIdx).getImm())
1245 OpNo = OpNo -
N +
N / 2;
1253void AMDGPUInstPrinter::printExpSrc0(
const MCInst *
MI,
unsigned OpNo,
1256 printExpSrcN(
MI, OpNo, STI, O, 0);
1259void AMDGPUInstPrinter::printExpSrc1(
const MCInst *
MI,
unsigned OpNo,
1262 printExpSrcN(
MI, OpNo, STI, O, 1);
1265void AMDGPUInstPrinter::printExpSrc2(
const MCInst *
MI,
unsigned OpNo,
1268 printExpSrcN(
MI, OpNo, STI, O, 2);
1271void AMDGPUInstPrinter::printExpSrc3(
const MCInst *
MI,
unsigned OpNo,
1274 printExpSrcN(
MI, OpNo, STI, O, 3);
1277void AMDGPUInstPrinter::printExpTgt(
const MCInst *
MI,
unsigned OpNo,
1280 using namespace llvm::AMDGPU::Exp;
1283 unsigned Id =
MI->getOperand(OpNo).getImm() & ((1 << 6) - 1);
1288 O <<
' ' << TgtName;
1292 O <<
" invalid_target_" <<
Id;
1297 bool IsPacked,
bool HasDstSel) {
1301 if (!!(
Ops[
I] &
Mod) != DefaultValue)
1311void AMDGPUInstPrinter::printPackedModifier(
const MCInst *
MI,
1315 unsigned Opc =
MI->getOpcode();
1319 std::pair<AMDGPU::OpName, AMDGPU::OpName> MOps[] = {
1320 {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src0},
1321 {AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src1},
1322 {AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::src2}};
1325 for (
auto [SrcMod, Src] : MOps) {
1329 int ModIdx = AMDGPU::getNamedOperandIdx(
Opc, SrcMod);
1331 (ModIdx != -1) ?
MI->getOperand(ModIdx).getImm() : DefaultValue;
1340 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2_modifiers);
1346 (AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst) != -1) ||
1347 (AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::sdst) != -1);
1355 for (AMDGPU::OpName OpName :
1356 {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
1357 AMDGPU::OpName::src2_modifiers}) {
1358 int Idx = AMDGPU::getNamedOperandIdx(
Opc, OpName);
1366 const bool HasDstSel =
1377 ListSeparator Sep(
",");
1388void AMDGPUInstPrinter::printOpSel(
const MCInst *
MI,
unsigned,
1391 unsigned Opc =
MI->getOpcode();
1394 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
1395 unsigned Mod =
MI->getOperand(SrcMod).getImm();
1398 if (Index0 || Index1)
1399 O <<
" op_sel:[" << Index0 <<
',' << Index1 <<
']';
1403 auto FIN = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
1404 auto BCN = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1_modifiers);
1408 O <<
" op_sel:[" << FI <<
',' << BC <<
']';
1415void AMDGPUInstPrinter::printOpSelHi(
const MCInst *
MI,
unsigned OpNo,
1421void AMDGPUInstPrinter::printNegLo(
const MCInst *
MI,
unsigned OpNo,
1427void AMDGPUInstPrinter::printNegHi(
const MCInst *
MI,
unsigned OpNo,
1433void AMDGPUInstPrinter::printIndexKey8bit(
const MCInst *
MI,
unsigned OpNo,
1436 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1440 O <<
" index_key:" <<
Imm;
1443void AMDGPUInstPrinter::printIndexKey16bit(
const MCInst *
MI,
unsigned OpNo,
1446 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1450 O <<
" index_key:" <<
Imm;
1453void AMDGPUInstPrinter::printIndexKey32bit(
const MCInst *
MI,
unsigned OpNo,
1456 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1460 O <<
" index_key:" <<
Imm;
1463void AMDGPUInstPrinter::printMatrixFMT(
const MCInst *
MI,
unsigned OpNo,
1466 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1470 O <<
" matrix_" << AorB <<
"_fmt:";
1475 case WMMA::MatrixFMT::MATRIX_FMT_FP8:
1476 O <<
"MATRIX_FMT_FP8";
1478 case WMMA::MatrixFMT::MATRIX_FMT_BF8:
1479 O <<
"MATRIX_FMT_BF8";
1481 case WMMA::MatrixFMT::MATRIX_FMT_FP6:
1482 O <<
"MATRIX_FMT_FP6";
1484 case WMMA::MatrixFMT::MATRIX_FMT_BF6:
1485 O <<
"MATRIX_FMT_BF6";
1487 case WMMA::MatrixFMT::MATRIX_FMT_FP4:
1488 O <<
"MATRIX_FMT_FP4";
1493void AMDGPUInstPrinter::printMatrixAFMT(
const MCInst *
MI,
unsigned OpNo,
1496 printMatrixFMT(
MI, OpNo, STI, O,
'a');
1499void AMDGPUInstPrinter::printMatrixBFMT(
const MCInst *
MI,
unsigned OpNo,
1502 printMatrixFMT(
MI, OpNo, STI, O,
'b');
1505void AMDGPUInstPrinter::printMatrixScale(
const MCInst *
MI,
unsigned OpNo,
1508 auto Imm =
MI->getOperand(OpNo).getImm() & 1;
1512 O <<
" matrix_" << AorB <<
"_scale:";
1517 case WMMA::MatrixScale::MATRIX_SCALE_ROW0:
1518 O <<
"MATRIX_SCALE_ROW0";
1520 case WMMA::MatrixScale::MATRIX_SCALE_ROW1:
1521 O <<
"MATRIX_SCALE_ROW1";
1526void AMDGPUInstPrinter::printMatrixAScale(
const MCInst *
MI,
unsigned OpNo,
1529 printMatrixScale(
MI, OpNo, STI, O,
'a');
1532void AMDGPUInstPrinter::printMatrixBScale(
const MCInst *
MI,
unsigned OpNo,
1535 printMatrixScale(
MI, OpNo, STI, O,
'b');
1538void AMDGPUInstPrinter::printMatrixScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1541 auto Imm =
MI->getOperand(OpNo).getImm() & 3;
1545 O <<
" matrix_" << AorB <<
"_scale_fmt:";
1550 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E8:
1551 O <<
"MATRIX_SCALE_FMT_E8";
1553 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E5M3:
1554 O <<
"MATRIX_SCALE_FMT_E5M3";
1556 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E4M3:
1557 O <<
"MATRIX_SCALE_FMT_E4M3";
1562void AMDGPUInstPrinter::printMatrixAScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1565 printMatrixScaleFmt(
MI, OpNo, STI, O,
'a');
1568void AMDGPUInstPrinter::printMatrixBScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1571 printMatrixScaleFmt(
MI, OpNo, STI, O,
'b');
1574void AMDGPUInstPrinter::printInterpSlot(
const MCInst *
MI,
unsigned OpNum,
1577 unsigned Imm =
MI->getOperand(OpNum).getImm();
1589 O <<
"invalid_param_" <<
Imm;
1593void AMDGPUInstPrinter::printInterpAttr(
const MCInst *
MI,
unsigned OpNum,
1596 unsigned Attr =
MI->getOperand(OpNum).getImm();
1597 O <<
"attr" << Attr;
1600void AMDGPUInstPrinter::printInterpAttrChan(
const MCInst *
MI,
unsigned OpNum,
1603 unsigned Chan =
MI->getOperand(OpNum).getImm();
1604 O <<
'.' <<
"xyzw"[Chan & 0x3];
1607void AMDGPUInstPrinter::printGPRIdxMode(
const MCInst *
MI,
unsigned OpNo,
1610 using namespace llvm::AMDGPU::VGPRIndexMode;
1611 unsigned Val =
MI->getOperand(OpNo).getImm();
1613 if ((Val & ~ENABLE_MASK) != 0) {
1614 O << formatHex(static_cast<uint64_t>(Val));
1617 ListSeparator Sep(
",");
1618 for (
unsigned ModeId = ID_MIN; ModeId <=
ID_MAX; ++ModeId) {
1619 if (Val & (1 << ModeId))
1626void AMDGPUInstPrinter::printMemOperand(
const MCInst *
MI,
unsigned OpNo,
1629 printRegularOperand(
MI, OpNo, STI, O);
1631 printRegularOperand(
MI, OpNo + 1, STI, O);
1639 if (
Op.getImm() == 1) {
1650 if (
Op.getImm() == 1)
1657 int Imm =
MI->getOperand(OpNo).getImm();
1671 const unsigned Imm16 =
MI->getOperand(OpNo).getImm();
1682 O <<
"sendmsg(" << MsgName;
1691 O <<
"sendmsg(" << MsgId <<
", " << OpId <<
", " <<
StreamId <<
')';
1703 uint16_t Probe0 = ((0 & AndMask) | OrMask) ^ XorMask;
1708 for (
unsigned Mask = 1 << (
BITMASK_WIDTH - 1); Mask > 0; Mask >>= 1) {
1778 }
else if (AndMask ==
BITMASK_MAX && OrMask == 0 && XorMask > 0 &&
1789 if (GroupSize > 1 &&
1791 OrMask < GroupSize &&
1809 printU16ImmDecOperand(
MI, OpNo, O);
1818 unsigned SImm16 =
MI->getOperand(OpNo).getImm();
1819 unsigned Vmcnt, Expcnt, Lgkmcnt;
1825 bool PrintAll = IsDefaultVmcnt && IsDefaultExpcnt && IsDefaultLgkmcnt;
1829 if (!IsDefaultVmcnt || PrintAll)
1830 O << Sep <<
"vmcnt(" << Vmcnt <<
')';
1832 if (!IsDefaultExpcnt || PrintAll)
1833 O << Sep <<
"expcnt(" << Expcnt <<
')';
1835 if (!IsDefaultLgkmcnt || PrintAll)
1836 O << Sep <<
"lgkmcnt(" << Lgkmcnt <<
')';
1844 uint64_t Imm16 =
MI->getOperand(OpNo).getImm() & 0xffff;
1846 bool HasNonDefaultVal =
false;
1854 if (!IsDefault || !HasNonDefaultVal)
1855 O << Sep << Name <<
'(' << Val <<
')';
1865 const char *BadInstId =
"/* invalid instid value */";
1866 static const std::array<const char *, 12> InstIds = {
1867 "NO_DEP",
"VALU_DEP_1",
"VALU_DEP_2",
1868 "VALU_DEP_3",
"VALU_DEP_4",
"TRANS32_DEP_1",
1869 "TRANS32_DEP_2",
"TRANS32_DEP_3",
"FMA_ACCUM_CYCLE_1",
1870 "SALU_CYCLE_1",
"SALU_CYCLE_2",
"SALU_CYCLE_3"};
1872 const char *BadInstSkip =
"/* invalid instskip value */";
1873 static const std::array<const char *, 6> InstSkips = {
1874 "SAME",
"NEXT",
"SKIP_1",
"SKIP_2",
"SKIP_3",
"SKIP_4"};
1876 unsigned SImm16 =
MI->getOperand(OpNo).getImm();
1877 const char *Prefix =
"";
1879 unsigned Value = SImm16 & 0xF;
1881 const char *Name =
Value < InstIds.size() ? InstIds[
Value] : BadInstId;
1882 O << Prefix <<
"instid0(" << Name <<
')';
1886 Value = (SImm16 >> 4) & 7;
1889 Value < InstSkips.size() ? InstSkips[
Value] : BadInstSkip;
1890 O << Prefix <<
"instskip(" << Name <<
')';
1894 Value = (SImm16 >> 7) & 0xF;
1896 const char *Name =
Value < InstIds.size() ? InstIds[
Value] : BadInstId;
1897 O << Prefix <<
"instid1(" << Name <<
')';
1908 unsigned Val =
MI->getOperand(OpNo).getImm();
1913 if (!HwRegName.
empty()) {
1919 O <<
", " <<
Offset <<
", " << Width;
1934void AMDGPUInstPrinter::printNamedInt(
const MCInst *
MI,
unsigned OpNo,
1937 bool PrintInHex,
bool AlwaysPrint) {
1938 int64_t V =
MI->getOperand(OpNo).getImm();
1939 if (AlwaysPrint || V != 0)
1943void AMDGPUInstPrinter::printBitOp3(
const MCInst *
MI,
unsigned OpNo,
1954 O << formatHex(static_cast<uint64_t>(
Imm));
1957void AMDGPUInstPrinter::printScaleSel(
const MCInst *
MI,
unsigned OpNo,
1960 uint8_t
Imm =
MI->getOperand(OpNo).getImm();
1967#include "AMDGPUGenAsmWriter.inc"
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static void printSwizzleBitmask(const uint16_t AndMask, const uint16_t OrMask, const uint16_t XorMask, raw_ostream &O)
static bool printImmediateBFloat16(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O)
static bool allOpsDefaultValue(const int *Ops, int NumOps, int Mod, bool IsPacked, bool HasDstSel)
static MCRegister getRegForPrinting(MCRegister Reg, const MCRegisterInfo &MRI)
static MCRegister getRegFromMIA(MCRegister Reg, unsigned OpNo, const MCInstrDesc &Desc, const MCRegisterInfo &MRI, const AMDGPUMCInstrAnalysis &MIA)
static bool printImmediateFP16(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O)
Provides AMDGPU specific target descriptions.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
void printSwizzle(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printEndpgm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static const char * getRegisterName(MCRegister Reg)
static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, StringRef Asm, StringRef Default="")
void printDepCtr(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printHwreg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSendMsg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static void printRegOperand(MCRegister Reg, raw_ostream &O, const MCRegisterInfo &MRI)
void printRegName(raw_ostream &OS, MCRegister Reg) override
Print the assembler register name.
void printInst(const MCInst *MI, uint64_t Address, StringRef Annot, const MCSubtargetInfo &STI, raw_ostream &O) override
Print the specified MCInst to the specified raw_ostream.
void printInstruction(const MCInst *MI, uint64_t Address, const MCSubtargetInfo &STI, raw_ostream &O)
void printSWaitCnt(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printOModSI(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSDelayALU(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
unsigned getVgprMSBs() const
A helper class to return the specified delimiter string after the first invocation of operator String...
void printExpr(raw_ostream &, const MCExpr &) const
format_object< int64_t > formatHex(int64_t Value) const
format_object< int64_t > formatDec(int64_t Value) const
Utility functions to print decimal/hexadecimal values.
const MCRegisterInfo & MRI
void printAnnotation(raw_ostream &OS, StringRef Annot)
Utility function for printing annotations.
const MCInstrAnalysis * MIA
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
bool isLookupRegClassByHwMode() const
Set if this operand is a value that requires the current hwmode to look up its register class.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Instances of this class represent operands of the MCInst class.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
virtual unsigned getHwMode(enum HwModeType type=HwMode_Default) const
HwMode ID corresponding to the 'type' parameter is retrieved from the HwMode bit set of the current s...
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
LLVM Value Representation.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
StringRef getHwreg(uint64_t Encoding, const MCSubtargetInfo &STI)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
StringRef getMsgName(uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a msg_id immediate.
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
const char *const IdSymbolic[]
bool isInlineValue(MCRegister Reg)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isVOPCAsmOnly(unsigned Opc)
unsigned getTemporalHintType(const MCInstrDesc TID)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool isGFX12Plus(const MCSubtargetInfo &STI)
const MCRegisterClass * getVGPRPhysRegClass(MCRegister Reg, const MCRegisterInfo &MRI)
bool isGFX940(const MCSubtargetInfo &STI)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
LLVM_READNONE bool isLegalDPALU_DPPControl(const MCSubtargetInfo &ST, unsigned DC)
bool isSI(const MCSubtargetInfo &STI)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
bool isGFX12(const MCSubtargetInfo &STI)
MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
unsigned getVmcntBitMask(const IsaVersion &Version)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool isGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_IMM_V2FP16_SPLAT
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
bool isCI(const MCSubtargetInfo &STI)
bool getVOP2IsSingle(unsigned Opc)
bool isPermlane16(unsigned Opc)
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
@ Mod
The access may modify the value stored in memory.
To bit_cast(const From &from) noexcept
DWARFExpression::Operation Op
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
@ Default
The result values are uniform if and only if all operands are uniform.
static constexpr ValueType Default
static std::tuple< typename Fields::ValueType... > decode(uint64_t Encoded)
Instruction set architecture version.