51void AMDGPUInstPrinter::printU16ImmOperand(
const MCInst *
MI,
unsigned OpNo,
62 int64_t
Imm =
Op.getImm();
64 O << formatHex(static_cast<uint64_t>(
Imm & 0xffff));
66 printU32ImmOperand(
MI, OpNo, STI, O);
69void AMDGPUInstPrinter::printU16ImmDecOperand(
const MCInst *
MI,
unsigned OpNo,
74void AMDGPUInstPrinter::printU32ImmOperand(
const MCInst *
MI,
unsigned OpNo,
77 const MCOperand &
Op =
MI->getOperand(OpNo);
79 MAI.printExpr(O, *
Op.getExpr());
86void AMDGPUInstPrinter::printFP64ImmOperand(
const MCInst *
MI,
unsigned OpNo,
90 const MCOperand &
Op =
MI->getOperand(OpNo);
92 MAI.printExpr(O, *
Op.getExpr());
96 printLiteral64(
Op.getImm(), O,
true);
99void AMDGPUInstPrinter::printNamedBit(
const MCInst *
MI,
unsigned OpNo,
101 if (
MI->getOperand(OpNo).getImm()) {
106void AMDGPUInstPrinter::printOffset(
const MCInst *
MI,
unsigned OpNo,
109 uint32_t
Imm =
MI->getOperand(OpNo).getImm();
114 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
117 O << formatDec(SignExtend32<24>(
Imm));
119 printU16ImmDecOperand(
MI, OpNo, O);
123void AMDGPUInstPrinter::printFlatOffset(
const MCInst *
MI,
unsigned OpNo,
126 uint32_t
Imm =
MI->getOperand(OpNo).getImm();
130 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
138 printU16ImmDecOperand(
MI, OpNo, O);
142void AMDGPUInstPrinter::printSMRDOffset8(
const MCInst *
MI,
unsigned OpNo,
145 printU32ImmOperand(
MI, OpNo, STI, O);
148void AMDGPUInstPrinter::printSMEMOffset(
const MCInst *
MI,
unsigned OpNo,
154void AMDGPUInstPrinter::printSMRDLiteralOffset(
const MCInst *
MI,
unsigned OpNo,
157 printU32ImmOperand(
MI, OpNo, STI, O);
160void AMDGPUInstPrinter::printCPol(
const MCInst *
MI,
unsigned OpNo,
162 auto Imm =
MI->getOperand(OpNo).getImm();
169 O <<
" scale_offset";
171 printTH(
MI, TH, Scope, O);
172 printScope(Scope, O);
191 O <<
" /* unexpected cache policy bit */";
194void AMDGPUInstPrinter::printTH(
const MCInst *
MI, int64_t TH, int64_t Scope,
200 const unsigned Opcode =
MI->getOpcode();
201 const MCInstrDesc &TID =
MII.get(Opcode);
224 O << (IsStore ?
"TH_STORE_" :
"TH_LOAD_");
234 : (IsStore ?
"WB" :
"LU"));
255void AMDGPUInstPrinter::printScope(int64_t Scope,
raw_ostream &O) {
271void AMDGPUInstPrinter::printDim(
const MCInst *
MI,
unsigned OpNo,
273 unsigned Dim =
MI->getOperand(OpNo).getImm();
274 O <<
" dim:SQ_RSRC_IMG_";
283void AMDGPUInstPrinter::printR128A16(
const MCInst *
MI,
unsigned OpNo,
286 printNamedBit(
MI, OpNo, O,
"a16");
288 printNamedBit(
MI, OpNo, O,
"r128");
291void AMDGPUInstPrinter::printFORMAT(
const MCInst *
MI,
unsigned OpNo,
296void AMDGPUInstPrinter::printSymbolicFormat(
const MCInst *
MI,
299 using namespace llvm::AMDGPU::MTBUFFormat;
302 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::format);
305 unsigned Val =
MI->getOperand(OpNo).getImm();
307 if (Val == UFMT_DEFAULT)
312 O <<
" format:" << Val;
315 if (Val == DFMT_NFMT_DEFAULT)
322 if (Dfmt != DFMT_DEFAULT) {
324 if (Nfmt != NFMT_DEFAULT) {
328 if (Nfmt != NFMT_DEFAULT) {
333 O <<
" format:" << Val;
341 unsigned Enc =
MRI.getEncodingValue(
Reg);
346 unsigned RegNo = Idx % 0x100;
348 if (RC->
getID() == AMDGPU::VGPR_16RegClassID) {
367 unsigned Enc =
MRI.getEncodingValue(
Reg);
374 unsigned Opc =
Desc.getOpcode();
376 for (
I = 0;
I < 4; ++
I) {
377 if (
Ops.first[
I] != AMDGPU::OpName::NUM_OPERAND_NAMES &&
378 (
unsigned)AMDGPU::getNamedOperandIdx(
Opc,
Ops.first[
I]) == OpNo)
380 if (
Ops.second &&
Ops.second[
I] != AMDGPU::OpName::NUM_OPERAND_NAMES &&
381 (
unsigned)AMDGPU::getNamedOperandIdx(
Opc,
Ops.second[
I]) == OpNo)
386 unsigned OpMSBs = (VgprMSBs >> (
I * 2)) & 3;
400 case AMDGPU::PRIVATE_RSRC_REG:
423void AMDGPUInstPrinter::printVOPDst(
const MCInst *
MI,
unsigned OpNo,
425 auto Opcode =
MI->getOpcode();
443 printRegularOperand(
MI, OpNo, STI, O);
449 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
450 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
451 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
452 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
453 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
454 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
455 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
456 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
457 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
458 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
459 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
460 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
461 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
462 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
463 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
464 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
465 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
466 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
467 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
468 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
469 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
470 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx12:
471 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx12:
472 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx12:
473 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx12:
474 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx12:
475 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx12:
476 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx12:
477 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx12:
478 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx12:
479 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx13:
480 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx13:
481 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx13:
482 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx13:
483 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx13:
484 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx13:
485 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx13:
486 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx13:
487 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx13:
488 printDefaultVccOperand(
false, STI, O);
493void AMDGPUInstPrinter::printVINTRPDst(
const MCInst *
MI,
unsigned OpNo,
500 printRegularOperand(
MI, OpNo, STI, O);
503void AMDGPUInstPrinter::printAVLdSt32Align2RegOp(
const MCInst *
MI,
507 MCRegister
Reg =
MI->getOperand(OpNo).getReg();
510 if (MCRegister
SubReg =
MRI.getSubReg(
Reg, AMDGPU::sub0))
515void AMDGPUInstPrinter::printImmediateInt16(uint32_t
Imm,
518 int32_t SImm =
static_cast<int32_t
>(
Imm);
524 if (printImmediateFloat32(
Imm, STI, O))
527 O << formatHex(static_cast<uint64_t>(
Imm & 0xffff));
534 else if (
Imm == 0xBC00)
536 else if (
Imm == 0x3800)
538 else if (
Imm == 0xB800)
540 else if (
Imm == 0x4000)
542 else if (
Imm == 0xC000)
544 else if (
Imm == 0x4400)
546 else if (
Imm == 0xC400)
548 else if (
Imm == 0x3118 && STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
560 else if (
Imm == 0xBF80)
562 else if (
Imm == 0x3F00)
564 else if (
Imm == 0xBF00)
566 else if (
Imm == 0x4000)
568 else if (
Imm == 0xC000)
570 else if (
Imm == 0x4080)
572 else if (
Imm == 0xC080)
574 else if (
Imm == 0x3E22 && STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
582void AMDGPUInstPrinter::printImmediateBF16(uint32_t
Imm,
585 int16_t SImm =
static_cast<int16_t
>(
Imm);
594 O << formatHex(static_cast<uint64_t>(
Imm));
597void AMDGPUInstPrinter::printImmediateF16(uint32_t
Imm,
600 int16_t SImm =
static_cast<int16_t
>(
Imm);
606 uint16_t HImm =
static_cast<uint16_t
>(
Imm);
610 uint64_t
Imm16 =
static_cast<uint16_t
>(
Imm);
614void AMDGPUInstPrinter::printImmediateV216(uint32_t
Imm, uint8_t OpType,
617 int32_t SImm =
static_cast<int32_t
>(
Imm);
626 if (printImmediateFloat32(
Imm, STI, O))
640 uint16_t Lo16 =
static_cast<uint16_t
>(
Imm & 0xFFFF);
641 uint16_t Hi16 =
static_cast<uint16_t
>((
Imm >> 16) & 0xFFFF);
666 O << formatHex(static_cast<uint64_t>(
Imm));
669bool AMDGPUInstPrinter::printImmediateFloat32(uint32_t
Imm,
690 else if (
Imm == 0x3e22f983 &&
691 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
699void AMDGPUInstPrinter::printImmediate32(uint32_t
Imm,
702 int32_t SImm =
static_cast<int32_t
>(
Imm);
708 if (printImmediateFloat32(
Imm, STI, O))
711 O << formatHex(static_cast<uint64_t>(
Imm));
714void AMDGPUInstPrinter::printImmediate64(uint64_t
Imm,
717 int64_t SImm =
static_cast<int64_t
>(
Imm);
718 if (SImm >= -16 && SImm <= 64) {
741 else if (
Imm == 0x3fc45f306dc9c882 &&
742 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
743 O <<
"0.15915494309189532";
745 printLiteral64(
Imm, O, IsFP);
748void AMDGPUInstPrinter::printLiteral64(uint64_t
Imm,
raw_ostream &O,
751 O << formatHex(static_cast<uint64_t>(
Hi_32(
Imm)));
756void AMDGPUInstPrinter::printBLGP(
const MCInst *
MI,
unsigned OpNo,
759 unsigned Imm =
MI->getOperand(OpNo).getImm();
764 switch (
MI->getOpcode()) {
765 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_acd:
766 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_vcd:
767 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_acd:
768 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_vcd:
769 O <<
" neg:[" << (
Imm & 1) <<
',' << ((
Imm >> 1) & 1) <<
','
770 << ((
Imm >> 2) & 1) <<
']';
775 O <<
" blgp:" <<
Imm;
778void AMDGPUInstPrinter::printDefaultVccOperand(
bool FirstOperand,
792 unsigned OpNo)
const {
796 (
Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
797 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO));
801void AMDGPUInstPrinter::printOperand(
const MCInst *
MI,
unsigned OpNo,
804 unsigned Opc =
MI->getOpcode();
806 int ModIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
813 (
Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
814 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO)))
815 printDefaultVccOperand(
true, STI, O);
817 printRegularOperand(
MI, OpNo, STI, O);
821void AMDGPUInstPrinter::printRegularOperand(
const MCInst *
MI,
unsigned OpNo,
824 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
826 if (OpNo >=
MI->getNumOperands()) {
827 O <<
"/*Missing OP" << OpNo <<
"*/";
831 const MCOperand &
Op =
MI->getOperand(OpNo);
838 const MCOperandInfo &OpInfo =
Desc.operands()[OpNo];
840 int16_t RCID =
MII.getOpRegClassID(
842 const MCRegisterClass &RC =
MRI.getRegClass(RCID);
846 (OpInfo.
RegClass == AMDGPU::SReg_1 ||
847 OpInfo.
RegClass == AMDGPU::SReg_1_XEXEC);
854 O <<
"/*Invalid register, operand has \'" <<
MRI.getRegClassName(&RC)
855 <<
"\' register class*/";
859 }
else if (
Op.isImm()) {
860 const uint8_t OpTy =
Desc.operands()[OpNo].OperandType;
872 printImmediate32(
Op.getImm(), STI, O);
876 printImmediate64(
Op.getImm(), STI, O,
false);
881 printImmediate64(
Op.getImm(), STI, O,
true);
885 printImmediateInt16(
Op.getImm(), STI, O);
889 printImmediateF16(
Op.getImm(), STI, O);
893 printImmediateBF16(
Op.getImm(), STI, O);
903 printImmediateV216(
Op.getImm(), OpTy, STI, O);
912 printImmediate32(
Op.getImm(), STI, O);
913 O <<
"/*Invalid immediate*/";
920 }
else if (
Op.isExpr()) {
921 const MCExpr *
Exp =
Op.getExpr();
922 MAI.printExpr(O, *Exp);
928 switch (
MI->getOpcode()) {
931 case AMDGPU::V_CNDMASK_B32_e32_gfx10:
932 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
933 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
934 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
935 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
936 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
937 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
938 case AMDGPU::V_CNDMASK_B32_dpp8_gfx10:
939 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
940 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
941 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
942 case AMDGPU::V_CNDMASK_B32_e32_gfx11:
943 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
944 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
945 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
946 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
947 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
948 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
949 case AMDGPU::V_CNDMASK_B32_dpp8_gfx11:
950 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
951 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
952 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
953 case AMDGPU::V_CNDMASK_B32_e32_gfx12:
954 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx12:
955 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx12:
956 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx12:
957 case AMDGPU::V_CNDMASK_B32_dpp_gfx12:
958 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx12:
959 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx12:
960 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx12:
961 case AMDGPU::V_CNDMASK_B32_dpp8_gfx12:
962 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx12:
963 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx12:
964 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx12:
965 case AMDGPU::V_CNDMASK_B32_e32_gfx13:
966 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx13:
967 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx13:
968 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx13:
969 case AMDGPU::V_CNDMASK_B32_dpp_gfx13:
970 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx13:
971 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx13:
972 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx13:
973 case AMDGPU::V_CNDMASK_B32_dpp8_gfx13:
974 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx13:
975 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx13:
976 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx13:
978 case AMDGPU::V_CNDMASK_B32_e32_gfx6_gfx7:
979 case AMDGPU::V_CNDMASK_B32_e32_vi:
980 if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
MI->getOpcode(),
981 AMDGPU::OpName::src1))
982 printDefaultVccOperand(OpNo == 0, STI, O);
988 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::soffset);
990 if ((
int)OpNo == SOffsetIdx)
991 printSymbolicFormat(
MI, STI, O);
995void AMDGPUInstPrinter::printOperandAndFPInputMods(
const MCInst *
MI,
999 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
1000 if (needsImpliedVcc(
Desc, OpNo))
1001 printDefaultVccOperand(
true, STI, O);
1003 unsigned InputModifiers =
MI->getOperand(OpNo).getImm();
1008 bool NegMnemo =
false;
1011 if (OpNo + 1 <
MI->getNumOperands() &&
1013 const MCOperand &
Op =
MI->getOperand(OpNo + 1);
1014 NegMnemo =
Op.isImm();
1025 printRegularOperand(
MI, OpNo + 1, STI, O);
1034 switch (
MI->getOpcode()) {
1038 case AMDGPU::V_CNDMASK_B32_sdwa_gfx10:
1039 case AMDGPU::V_CNDMASK_B32_dpp_gfx10:
1040 case AMDGPU::V_CNDMASK_B32_dpp_gfx11:
1041 if ((
int)OpNo + 1 ==
1042 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::src1))
1043 printDefaultVccOperand(OpNo == 0, STI, O);
1048void AMDGPUInstPrinter::printOperandAndIntInputMods(
const MCInst *
MI,
1052 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
1053 if (needsImpliedVcc(
Desc, OpNo))
1054 printDefaultVccOperand(
true, STI, O);
1056 unsigned InputModifiers =
MI->getOperand(OpNo).getImm();
1059 printRegularOperand(
MI, OpNo + 1, STI, O);
1064 switch (
MI->getOpcode()) {
1067 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
1068 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
1069 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
1070 if ((
int)OpNo + 1 == AMDGPU::getNamedOperandIdx(
MI->getOpcode(),
1071 AMDGPU::OpName::src1))
1072 printDefaultVccOperand(OpNo == 0, STI, O);
1077void AMDGPUInstPrinter::printDPP8(
const MCInst *
MI,
unsigned OpNo,
1083 unsigned Imm =
MI->getOperand(OpNo).getImm();
1085 for (
size_t i = 1; i < 8; ++i) {
1091void AMDGPUInstPrinter::printDPPCtrl(
const MCInst *
MI,
unsigned OpNo,
1094 using namespace AMDGPU::DPP;
1096 unsigned Imm =
MI->getOperand(OpNo).getImm();
1097 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
1101 O <<
" /* DP ALU dpp only supports "
1102 << (
isGFX12(STI) ?
"row_share" :
"row_newbcast") <<
" */";
1105 if (
Imm <= DppCtrl::QUAD_PERM_LAST) {
1111 }
else if ((
Imm >= DppCtrl::ROW_SHL_FIRST) &&
1112 (
Imm <= DppCtrl::ROW_SHL_LAST)) {
1114 }
else if ((
Imm >= DppCtrl::ROW_SHR_FIRST) &&
1115 (
Imm <= DppCtrl::ROW_SHR_LAST)) {
1117 }
else if ((
Imm >= DppCtrl::ROW_ROR_FIRST) &&
1118 (
Imm <= DppCtrl::ROW_ROR_LAST)) {
1120 }
else if (
Imm == DppCtrl::WAVE_SHL1) {
1122 O <<
"/* wave_shl is not supported starting from GFX10 */";
1126 }
else if (
Imm == DppCtrl::WAVE_ROL1) {
1128 O <<
"/* wave_rol is not supported starting from GFX10 */";
1132 }
else if (
Imm == DppCtrl::WAVE_SHR1) {
1134 O <<
"/* wave_shr is not supported starting from GFX10 */";
1138 }
else if (
Imm == DppCtrl::WAVE_ROR1) {
1140 O <<
"/* wave_ror is not supported starting from GFX10 */";
1144 }
else if (
Imm == DppCtrl::ROW_MIRROR) {
1146 }
else if (
Imm == DppCtrl::ROW_HALF_MIRROR) {
1147 O <<
"row_half_mirror";
1148 }
else if (
Imm == DppCtrl::BCAST15) {
1150 O <<
"/* row_bcast is not supported starting from GFX10 */";
1153 O <<
"row_bcast:15";
1154 }
else if (
Imm == DppCtrl::BCAST31) {
1156 O <<
"/* row_bcast is not supported starting from GFX10 */";
1159 O <<
"row_bcast:31";
1160 }
else if ((
Imm >= DppCtrl::ROW_SHARE_FIRST) &&
1161 (
Imm <= DppCtrl::ROW_SHARE_LAST)) {
1163 O <<
"row_newbcast:";
1167 O <<
" /* row_newbcast/row_share is not supported on ASICs earlier "
1168 "than GFX90A/GFX10 */";
1172 }
else if ((
Imm >= DppCtrl::ROW_XMASK_FIRST) &&
1173 (
Imm <= DppCtrl::ROW_XMASK_LAST)) {
1175 O <<
"/* row_xmask is not supported on ASICs earlier than GFX10 */";
1178 O <<
"row_xmask:" <<
formatDec(
Imm - DppCtrl::ROW_XMASK_FIRST);
1180 O <<
"/* Invalid dpp_ctrl value */";
1184void AMDGPUInstPrinter::printDppBoundCtrl(
const MCInst *
MI,
unsigned OpNo,
1187 unsigned Imm =
MI->getOperand(OpNo).getImm();
1189 O <<
" bound_ctrl:1";
1193void AMDGPUInstPrinter::printDppFI(
const MCInst *
MI,
unsigned OpNo,
1195 using namespace llvm::AMDGPU::DPP;
1196 unsigned Imm =
MI->getOperand(OpNo).getImm();
1197 if (
Imm == DPP_FI_1 ||
Imm == DPP8_FI_1) {
1202void AMDGPUInstPrinter::printSDWASel(
const MCInst *
MI,
unsigned OpNo,
1204 using namespace llvm::AMDGPU::SDWA;
1206 unsigned Imm =
MI->getOperand(OpNo).getImm();
1208 case SdwaSel::BYTE_0:
O <<
"BYTE_0";
break;
1209 case SdwaSel::BYTE_1:
O <<
"BYTE_1";
break;
1210 case SdwaSel::BYTE_2:
O <<
"BYTE_2";
break;
1211 case SdwaSel::BYTE_3:
O <<
"BYTE_3";
break;
1212 case SdwaSel::WORD_0:
O <<
"WORD_0";
break;
1213 case SdwaSel::WORD_1:
O <<
"WORD_1";
break;
1214 case SdwaSel::DWORD:
O <<
"DWORD";
break;
1219void AMDGPUInstPrinter::printSDWADstSel(
const MCInst *
MI,
unsigned OpNo,
1223 printSDWASel(
MI, OpNo, O);
1226void AMDGPUInstPrinter::printSDWASrc0Sel(
const MCInst *
MI,
unsigned OpNo,
1230 printSDWASel(
MI, OpNo, O);
1233void AMDGPUInstPrinter::printSDWASrc1Sel(
const MCInst *
MI,
unsigned OpNo,
1237 printSDWASel(
MI, OpNo, O);
1240void AMDGPUInstPrinter::printSDWADstUnused(
const MCInst *
MI,
unsigned OpNo,
1243 using namespace llvm::AMDGPU::SDWA;
1246 unsigned Imm =
MI->getOperand(OpNo).getImm();
1248 case DstUnused::UNUSED_PAD:
O <<
"UNUSED_PAD";
break;
1249 case DstUnused::UNUSED_SEXT:
O <<
"UNUSED_SEXT";
break;
1250 case DstUnused::UNUSED_PRESERVE:
O <<
"UNUSED_PRESERVE";
break;
1255void AMDGPUInstPrinter::printExpSrcN(
const MCInst *
MI,
unsigned OpNo,
1258 unsigned Opc =
MI->getOpcode();
1259 int EnIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::en);
1260 unsigned En =
MI->getOperand(EnIdx).getImm();
1262 int ComprIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::compr);
1265 if (
MI->getOperand(ComprIdx).getImm())
1266 OpNo = OpNo -
N +
N / 2;
1274void AMDGPUInstPrinter::printExpSrc0(
const MCInst *
MI,
unsigned OpNo,
1277 printExpSrcN(
MI, OpNo, STI, O, 0);
1280void AMDGPUInstPrinter::printExpSrc1(
const MCInst *
MI,
unsigned OpNo,
1283 printExpSrcN(
MI, OpNo, STI, O, 1);
1286void AMDGPUInstPrinter::printExpSrc2(
const MCInst *
MI,
unsigned OpNo,
1289 printExpSrcN(
MI, OpNo, STI, O, 2);
1292void AMDGPUInstPrinter::printExpSrc3(
const MCInst *
MI,
unsigned OpNo,
1295 printExpSrcN(
MI, OpNo, STI, O, 3);
1298void AMDGPUInstPrinter::printExpTgt(
const MCInst *
MI,
unsigned OpNo,
1301 using namespace llvm::AMDGPU::Exp;
1304 unsigned Id =
MI->getOperand(OpNo).getImm() & ((1 << 6) - 1);
1309 O <<
' ' << TgtName;
1313 O <<
" invalid_target_" <<
Id;
1318 bool IsPacked,
bool HasDstSel) {
1322 if (!!(
Ops[
I] &
Mod) != DefaultValue)
1332void AMDGPUInstPrinter::printPackedModifier(
const MCInst *
MI,
1336 unsigned Opc =
MI->getOpcode();
1340 std::pair<AMDGPU::OpName, AMDGPU::OpName> MOps[] = {
1341 {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src0},
1342 {AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src1},
1343 {AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::src2}};
1346 for (
auto [SrcMod, Src] : MOps) {
1350 int ModIdx = AMDGPU::getNamedOperandIdx(
Opc, SrcMod);
1352 (ModIdx != -1) ?
MI->getOperand(ModIdx).getImm() : DefaultValue;
1361 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2_modifiers);
1367 (AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst) != -1) ||
1368 (AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::sdst) != -1);
1376 for (AMDGPU::OpName OpName :
1377 {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
1378 AMDGPU::OpName::src2_modifiers}) {
1379 int Idx = AMDGPU::getNamedOperandIdx(
Opc, OpName);
1387 const bool HasDstSel =
1398 ListSeparator Sep(
",");
1409void AMDGPUInstPrinter::printOpSel(
const MCInst *
MI,
unsigned,
1412 unsigned Opc =
MI->getOpcode();
1415 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
1416 unsigned Mod =
MI->getOperand(SrcMod).getImm();
1419 if (Index0 || Index1)
1420 O <<
" op_sel:[" << Index0 <<
',' << Index1 <<
']';
1424 auto FIN = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
1425 auto BCN = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1_modifiers);
1429 O <<
" op_sel:[" << FI <<
',' << BC <<
']';
1436void AMDGPUInstPrinter::printOpSelHi(
const MCInst *
MI,
unsigned OpNo,
1442void AMDGPUInstPrinter::printNegLo(
const MCInst *
MI,
unsigned OpNo,
1448void AMDGPUInstPrinter::printNegHi(
const MCInst *
MI,
unsigned OpNo,
1454void AMDGPUInstPrinter::printIndexKey8bit(
const MCInst *
MI,
unsigned OpNo,
1457 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1461 O <<
" index_key:" <<
Imm;
1464void AMDGPUInstPrinter::printIndexKey16bit(
const MCInst *
MI,
unsigned OpNo,
1467 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1471 O <<
" index_key:" <<
Imm;
1474void AMDGPUInstPrinter::printIndexKey32bit(
const MCInst *
MI,
unsigned OpNo,
1477 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1481 O <<
" index_key:" <<
Imm;
1484void AMDGPUInstPrinter::printMatrixFMT(
const MCInst *
MI,
unsigned OpNo,
1487 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1491 O <<
" matrix_" << AorB <<
"_fmt:";
1498void AMDGPUInstPrinter::printMatrixAFMT(
const MCInst *
MI,
unsigned OpNo,
1501 printMatrixFMT(
MI, OpNo, STI, O,
'a');
1504void AMDGPUInstPrinter::printMatrixBFMT(
const MCInst *
MI,
unsigned OpNo,
1507 printMatrixFMT(
MI, OpNo, STI, O,
'b');
1510void AMDGPUInstPrinter::printMatrixScale(
const MCInst *
MI,
unsigned OpNo,
1513 auto Imm =
MI->getOperand(OpNo).getImm() & 1;
1517 O <<
" matrix_" << AorB <<
"_scale:";
1524void AMDGPUInstPrinter::printMatrixAScale(
const MCInst *
MI,
unsigned OpNo,
1527 printMatrixScale(
MI, OpNo, STI, O,
'a');
1530void AMDGPUInstPrinter::printMatrixBScale(
const MCInst *
MI,
unsigned OpNo,
1533 printMatrixScale(
MI, OpNo, STI, O,
'b');
1536void AMDGPUInstPrinter::printMatrixScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1539 auto Imm =
MI->getOperand(OpNo).getImm() & 3;
1543 O <<
" matrix_" << AorB <<
"_scale_fmt:";
1550void AMDGPUInstPrinter::printMatrixAScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1553 printMatrixScaleFmt(
MI, OpNo, STI, O,
'a');
1556void AMDGPUInstPrinter::printMatrixBScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1559 printMatrixScaleFmt(
MI, OpNo, STI, O,
'b');
1562void AMDGPUInstPrinter::printInterpSlot(
const MCInst *
MI,
unsigned OpNum,
1565 unsigned Imm =
MI->getOperand(OpNum).getImm();
1577 O <<
"invalid_param_" <<
Imm;
1581void AMDGPUInstPrinter::printInterpAttr(
const MCInst *
MI,
unsigned OpNum,
1584 unsigned Attr =
MI->getOperand(OpNum).getImm();
1585 O <<
"attr" << Attr;
1588void AMDGPUInstPrinter::printInterpAttrChan(
const MCInst *
MI,
unsigned OpNum,
1591 unsigned Chan =
MI->getOperand(OpNum).getImm();
1592 O <<
'.' <<
"xyzw"[Chan & 0x3];
1595void AMDGPUInstPrinter::printGPRIdxMode(
const MCInst *
MI,
unsigned OpNo,
1598 using namespace llvm::AMDGPU::VGPRIndexMode;
1599 unsigned Val =
MI->getOperand(OpNo).getImm();
1601 if ((Val & ~ENABLE_MASK) != 0) {
1602 O << formatHex(static_cast<uint64_t>(Val));
1605 ListSeparator Sep(
",");
1606 for (
unsigned ModeId = ID_MIN; ModeId <=
ID_MAX; ++ModeId) {
1607 if (Val & (1 << ModeId))
1614void AMDGPUInstPrinter::printMemOperand(
const MCInst *
MI,
unsigned OpNo,
1617 printRegularOperand(
MI, OpNo, STI, O);
1619 printRegularOperand(
MI, OpNo + 1, STI, O);
1627 if (
Op.getImm() == 1) {
1638 if (
Op.getImm() == 1)
1645 int Imm =
MI->getOperand(OpNo).getImm();
1659 const unsigned Imm16 =
MI->getOperand(OpNo).getImm();
1670 O <<
"sendmsg(" << MsgName;
1679 O <<
"sendmsg(" << MsgId <<
", " << OpId <<
", " <<
StreamId <<
')';
1692 if (EventName.
empty())
1693 O << formatHex(static_cast<uint64_t>(Imm16));
1704 uint16_t Probe0 = ((0 & AndMask) | OrMask) ^ XorMask;
1709 for (
unsigned Mask = 1 << (
BITMASK_WIDTH - 1); Mask > 0; Mask >>= 1) {
1779 }
else if (AndMask ==
BITMASK_MAX && OrMask == 0 && XorMask > 0 &&
1790 if (GroupSize > 1 &&
1792 OrMask < GroupSize &&
1810 printU16ImmDecOperand(
MI, OpNo, O);
1819 unsigned SImm16 =
MI->getOperand(OpNo).getImm();
1820 unsigned Vmcnt, Expcnt, Lgkmcnt;
1826 bool PrintAll = IsDefaultVmcnt && IsDefaultExpcnt && IsDefaultLgkmcnt;
1830 if (!IsDefaultVmcnt || PrintAll)
1831 O << Sep <<
"vmcnt(" << Vmcnt <<
')';
1833 if (!IsDefaultExpcnt || PrintAll)
1834 O << Sep <<
"expcnt(" << Expcnt <<
')';
1836 if (!IsDefaultLgkmcnt || PrintAll)
1837 O << Sep <<
"lgkmcnt(" << Lgkmcnt <<
')';
1845 uint64_t Imm16 =
MI->getOperand(OpNo).getImm() & 0xffff;
1847 bool HasNonDefaultVal =
false;
1855 if (!IsDefault || !HasNonDefaultVal)
1856 O << Sep << Name <<
'(' << Val <<
')';
1866 const char *BadInstId =
"/* invalid instid value */";
1867 static const std::array<const char *, 12> InstIds = {
1868 "NO_DEP",
"VALU_DEP_1",
"VALU_DEP_2",
1869 "VALU_DEP_3",
"VALU_DEP_4",
"TRANS32_DEP_1",
1870 "TRANS32_DEP_2",
"TRANS32_DEP_3",
"FMA_ACCUM_CYCLE_1",
1871 "SALU_CYCLE_1",
"SALU_CYCLE_2",
"SALU_CYCLE_3"};
1873 const char *BadInstSkip =
"/* invalid instskip value */";
1874 static const std::array<const char *, 6> InstSkips = {
1875 "SAME",
"NEXT",
"SKIP_1",
"SKIP_2",
"SKIP_3",
"SKIP_4"};
1877 unsigned SImm16 =
MI->getOperand(OpNo).getImm();
1878 const char *Prefix =
"";
1880 unsigned Value = SImm16 & 0xF;
1882 const char *Name =
Value < InstIds.size() ? InstIds[
Value] : BadInstId;
1883 O << Prefix <<
"instid0(" << Name <<
')';
1887 Value = (SImm16 >> 4) & 7;
1890 Value < InstSkips.size() ? InstSkips[
Value] : BadInstSkip;
1891 O << Prefix <<
"instskip(" << Name <<
')';
1895 Value = (SImm16 >> 7) & 0xF;
1897 const char *Name =
Value < InstIds.size() ? InstIds[
Value] : BadInstId;
1898 O << Prefix <<
"instid1(" << Name <<
')';
1909 unsigned Val =
MI->getOperand(OpNo).getImm();
1914 if (!HwRegName.
empty()) {
1920 O <<
", " <<
Offset <<
", " << Width;
1935void AMDGPUInstPrinter::printNamedInt(
const MCInst *
MI,
unsigned OpNo,
1938 bool PrintInHex,
bool AlwaysPrint) {
1939 int64_t V =
MI->getOperand(OpNo).getImm();
1940 if (AlwaysPrint || V != 0)
1944void AMDGPUInstPrinter::printBitOp3(
const MCInst *
MI,
unsigned OpNo,
1955 O << formatHex(static_cast<uint64_t>(
Imm));
1958void AMDGPUInstPrinter::printScaleSel(
const MCInst *
MI,
unsigned OpNo,
1961 uint8_t
Imm =
MI->getOperand(OpNo).getImm();
1968#include "AMDGPUGenAsmWriter.inc"
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static void printSwizzleBitmask(const uint16_t AndMask, const uint16_t OrMask, const uint16_t XorMask, raw_ostream &O)
static bool printImmediateBFloat16(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O)
static bool allOpsDefaultValue(const int *Ops, int NumOps, int Mod, bool IsPacked, bool HasDstSel)
static MCRegister getRegForPrinting(MCRegister Reg, const MCRegisterInfo &MRI)
static MCRegister getRegFromMIA(MCRegister Reg, unsigned OpNo, const MCInstrDesc &Desc, const MCRegisterInfo &MRI, const AMDGPUMCInstrAnalysis &MIA)
static bool printImmediateFP16(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O)
Provides AMDGPU specific target descriptions.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
void printSwizzle(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printWaitEvent(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printEndpgm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static const char * getRegisterName(MCRegister Reg)
static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, StringRef Asm, StringRef Default="")
void printDepCtr(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printHwreg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSendMsg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static void printRegOperand(MCRegister Reg, raw_ostream &O, const MCRegisterInfo &MRI)
void printRegName(raw_ostream &OS, MCRegister Reg) override
Print the assembler register name.
void printInst(const MCInst *MI, uint64_t Address, StringRef Annot, const MCSubtargetInfo &STI, raw_ostream &O) override
Print the specified MCInst to the specified raw_ostream.
void printInstruction(const MCInst *MI, uint64_t Address, const MCSubtargetInfo &STI, raw_ostream &O)
void printSWaitCnt(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printOModSI(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSDelayALU(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
unsigned getVgprMSBs() const
A helper class to return the specified delimiter string after the first invocation of operator String...
void printExpr(raw_ostream &, const MCExpr &) const
format_object< int64_t > formatHex(int64_t Value) const
format_object< int64_t > formatDec(int64_t Value) const
Utility functions to print decimal/hexadecimal values.
const MCRegisterInfo & MRI
void printAnnotation(raw_ostream &OS, StringRef Annot)
Utility function for printing annotations.
const MCInstrAnalysis * MIA
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
bool isLookupRegClassByHwMode() const
Set if this operand is a value that requires the current hwmode to look up its register class.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Instances of this class represent operands of the MCInst class.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
virtual unsigned getHwMode(enum HwModeType type=HwMode_Default) const
HwMode ID corresponding to the 'type' parameter is retrieved from the HwMode bit set of the current s...
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
LLVM Value Representation.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
StringRef getHwreg(uint64_t Encoding, const MCSubtargetInfo &STI)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
StringRef getMsgName(uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a msg_id immediate.
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
const char *const IdSymbolic[]
constexpr const char *const ModMatrixFmt[]
constexpr const char *const ModMatrixScaleFmt[]
constexpr const char *const ModMatrixScale[]
StringRef getWaitEventMaskName(uint64_t Encoding, const MCSubtargetInfo &STI)
bool isInlineValue(MCRegister Reg)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isVOPCAsmOnly(unsigned Opc)
unsigned getTemporalHintType(const MCInstrDesc TID)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool isGFX12Plus(const MCSubtargetInfo &STI)
const MCRegisterClass * getVGPRPhysRegClass(MCRegister Reg, const MCRegisterInfo &MRI)
bool isGFX940(const MCSubtargetInfo &STI)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
LLVM_READNONE bool isLegalDPALU_DPPControl(const MCSubtargetInfo &ST, unsigned DC)
bool isSI(const MCSubtargetInfo &STI)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
bool isGFX12(const MCSubtargetInfo &STI)
MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
unsigned getVmcntBitMask(const IsaVersion &Version)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool isGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_IMM_V2FP16_SPLAT
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
bool isCI(const MCSubtargetInfo &STI)
bool getVOP2IsSingle(unsigned Opc)
bool isPermlane16(unsigned Opc)
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
@ Mod
The access may modify the value stored in memory.
To bit_cast(const From &from) noexcept
DWARFExpression::Operation Op
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
@ Default
The result values are uniform if and only if all operands are uniform.
static constexpr ValueType Default
static std::tuple< typename Fields::ValueType... > decode(uint64_t Encoded)
Instruction set architecture version.