44#define DEBUG_TYPE "amdgpu-disassembler"
47 (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
48 : AMDGPU::EncValues::SGPR_MAX_SI)
60 MAI(*Ctx.getAsmInfo()),
62 TargetMaxInstBytes(MAI.getMaxInstLength(&
STI)),
63 CodeObjectVersion(
AMDGPU::getDefaultAMDHSACodeObjectVersion()) {
65 if (!
STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !
isGFX10Plus())
69 createConstantSymbolExpr(Symbol, Code);
71 UCVersionW64Expr = createConstantSymbolExpr(
"UC_VERSION_W64_BIT", 0x2000);
72 UCVersionW32Expr = createConstantSymbolExpr(
"UC_VERSION_W32_BIT", 0x4000);
73 UCVersionMDPExpr = createConstantSymbolExpr(
"UC_VERSION_MDP_BIT", 0x8000);
89 AMDGPU::OpName Name) {
90 int OpIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), Name);
107 if (DAsm->tryAddingSymbolicOperand(Inst,
Offset, Addr,
true, 2, 2, 0))
116 if (DAsm->isGFX12Plus()) {
118 }
else if (DAsm->isVI()) {
129 return addOperand(Inst, DAsm->decodeBoolReg(Inst, Val));
136 return addOperand(Inst, DAsm->decodeSplitBarrier(Inst, Val));
142 return addOperand(Inst, DAsm->decodeDpp8FI(Val));
145#define DECODE_OPERAND(StaticDecoderName, DecoderName) \
146 static DecodeStatus StaticDecoderName(MCInst &Inst, unsigned Imm, \
148 const MCDisassembler *Decoder) { \
149 auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
150 return addOperand(Inst, DAsm->DecoderName(Imm)); \
155#define DECODE_OPERAND_REG_8(RegClass) \
156 static DecodeStatus Decode##RegClass##RegisterClass( \
157 MCInst &Inst, unsigned Imm, uint64_t , \
158 const MCDisassembler *Decoder) { \
159 assert(Imm < (1 << 8) && "8-bit encoding"); \
160 auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
162 Inst, DAsm->createRegOperand(AMDGPU::RegClass##RegClassID, Imm)); \
165#define DECODE_SrcOp(Name, EncSize, OpWidth, EncImm) \
166 static DecodeStatus Name(MCInst &Inst, unsigned Imm, uint64_t , \
167 const MCDisassembler *Decoder) { \
168 assert(Imm < (1 << EncSize) && #EncSize "-bit encoding"); \
169 auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
170 return addOperand(Inst, DAsm->decodeSrcOp(Inst, OpWidth, EncImm)); \
174 unsigned OpWidth,
unsigned Imm,
unsigned EncImm,
176 assert(Imm < (1U << EncSize) &&
"Operand doesn't fit encoding!");
178 return addOperand(Inst, DAsm->decodeSrcOp(Inst, OpWidth, EncImm));
183#define DECODE_OPERAND_SREG_7(RegClass, OpWidth) \
184 DECODE_SrcOp(Decode##RegClass##RegisterClass, 7, OpWidth, Imm)
186#define DECODE_OPERAND_SREG_8(RegClass, OpWidth) \
187 DECODE_SrcOp(Decode##RegClass##RegisterClass, 8, OpWidth, Imm)
193template <
unsigned OpW
idth>
201template <
unsigned OpW
idth>
205 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
211template <
unsigned OpW
idth>
214 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, Decoder);
219template <
unsigned OpW
idth>
223 return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm, Decoder);
231template <
unsigned OpW
idth>
235 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
240template <
unsigned OpW
idth>
244 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, Decoder);
292 assert((Imm & (1 << 8)) == 0 &&
"Imm{8} should not be used");
294 bool IsHi = Imm & (1 << 9);
295 unsigned RegIdx = Imm & 0xff;
297 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
305 bool IsHi = Imm & (1 << 7);
306 unsigned RegIdx = Imm & 0x7f;
308 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
311template <
unsigned OpW
idth>
319 bool IsHi = Imm & (1 << 7);
320 unsigned RegIdx = Imm & 0x7f;
321 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
323 return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(Inst, OpWidth, Imm & 0xFF));
326template <
unsigned OpW
idth>
334 bool IsHi = Imm & (1 << 9);
335 unsigned RegIdx = Imm & 0xff;
336 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
338 return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(Inst, OpWidth, Imm & 0xFF));
349 bool IsHi = Imm & (1 << 9);
350 unsigned RegIdx = Imm & 0xff;
351 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
358 return addOperand(Inst, DAsm->decodeMandatoryLiteralConstant(Imm));
365 return addOperand(Inst, DAsm->decodeMandatoryLiteral64Constant(Imm));
369 uint64_t Addr,
const void *Decoder) {
371 return addOperand(Inst, DAsm->decodeVOPDDstYOp(Inst, Val));
377 return addOperand(Inst, DAsm->decodeSrcOp(Inst, Opw, Imm | 256));
380template <
unsigned Opw>
390 assert(Imm < (1 << 9) &&
"9-bit encoding");
392 return addOperand(Inst, DAsm->decodeSrcOp(Inst, 64, Imm));
395#define DECODE_SDWA(DecName) \
396DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
406 return addOperand(Inst, DAsm->decodeVersionImm(Imm));
409#include "AMDGPUGenDisassemblerTables.inc"
413template <>
constexpr uint32_t InsnBitWidth<uint32_t> = 32;
414template <>
constexpr uint32_t InsnBitWidth<uint64_t> = 64;
415template <>
constexpr uint32_t InsnBitWidth<std::bitset<96>> = 96;
416template <>
constexpr uint32_t InsnBitWidth<std::bitset<128>> = 128;
423template <
typename InsnType>
431 const auto SavedBytes = Bytes;
438 decodeInstruction(Table, TmpInst, Inst,
Address,
this,
STI);
444 Comments << LocalComments;
451template <
typename InsnType>
456 for (
const uint8_t *
T : {Table1, Table2}) {
467 Bytes = Bytes.
slice(
sizeof(
T));
475 Bytes = Bytes.
slice(8);
477 Bytes = Bytes.
slice(4);
478 return (
Hi << 64) |
Lo;
485 Bytes = Bytes.
slice(8);
487 Bytes = Bytes.
slice(8);
488 return (
Hi << 64) |
Lo;
491void AMDGPUDisassembler::decodeImmOperands(
MCInst &
MI,
493 const MCInstrDesc &
Desc = MCII.get(
MI.getOpcode());
495 if (OpNo >=
MI.getNumOperands())
505 MCOperand &
Op =
MI.getOperand(OpNo);
508 int64_t
Imm =
Op.getImm();
522 switch (OpDesc.OperandType) {
545 Imm = (F16Val << 16) | (F16Val & 0xFFFF);
570 unsigned MaxInstBytesNum = std::min((
size_t)TargetMaxInstBytes, Bytes_.
size());
571 Bytes = Bytes_.
slice(0, MaxInstBytesNum);
575 Size = std::min((
size_t)4, Bytes_.
size());
587 Bytes = Bytes_.
slice(0, MaxInstBytesNum);
617 if (
STI.hasFeature(AMDGPU::Feature64BitLiterals)) {
619 Bytes = Bytes_.
slice(4, MaxInstBytesNum - 4);
627 Bytes = Bytes_.
slice(0, MaxInstBytesNum);
629 }
else if (Bytes.size() >= 16 &&
630 STI.hasFeature(AMDGPU::FeatureGFX950Insts)) {
636 Bytes = Bytes_.
slice(0, MaxInstBytesNum);
639 if (Bytes.size() >= 8) {
642 if (
STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
646 if (
STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) &&
650 if (
STI.hasFeature(AMDGPU::FeatureGFX950Insts) &&
657 if (
STI.hasFeature(AMDGPU::FeatureFmaMixInsts) &&
661 if (
STI.hasFeature(AMDGPU::FeatureGFX940Insts) &&
665 if (
STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
716 Bytes = Bytes_.
slice(0, MaxInstBytesNum);
720 if (Bytes.size() >= 4) {
733 if (
STI.hasFeature(AMDGPU::FeatureGFX950Insts) &&
737 if (
STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
741 if (
STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
774 decodeImmOperands(
MI, *MCII);
786 else if (AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::dpp8) !=
798 AMDGPU::OpName::src2_modifiers);
801 if (
MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp ||
802 MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp) {
805 AMDGPU::OpName::src2_modifiers);
813 if (MCII->get(
MI.getOpcode()).TSFlags &
815 int CPolPos = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
816 AMDGPU::OpName::cpol);
821 if (
MI.getNumOperands() <= (
unsigned)CPolPos) {
823 AMDGPU::OpName::cpol);
825 MI.getOperand(CPolPos).setImm(
MI.getOperand(CPolPos).getImm() | CPol);
830 if ((MCII->get(
MI.getOpcode()).TSFlags &
832 (
STI.hasFeature(AMDGPU::FeatureGFX90AInsts))) {
835 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::tfe);
836 if (TFEOpIdx != -1) {
837 auto *TFEIter =
MI.begin();
838 std::advance(TFEIter, TFEOpIdx);
846 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::offset);
847 if (OffsetIdx != -1) {
848 uint32_t Imm =
MI.getOperand(OffsetIdx).getImm();
850 if (SignedOffset < 0)
855 if (MCII->get(
MI.getOpcode()).TSFlags &
858 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::swz);
859 if (SWZOpIdx != -1) {
860 auto *SWZIter =
MI.begin();
861 std::advance(SWZIter, SWZOpIdx);
869 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::vaddr0);
871 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::srsrc);
872 unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
873 if (VAddr0Idx >= 0 && NSAArgs > 0) {
874 unsigned NSAWords = (NSAArgs + 3) / 4;
875 if (Bytes.size() < 4 * NSAWords)
877 for (
unsigned i = 0; i < NSAArgs; ++i) {
878 const unsigned VAddrIdx = VAddr0Idx + 1 + i;
880 MCII->getOpRegClassID(
Desc.operands()[VAddrIdx], HwModeRegClass);
883 Bytes = Bytes.slice(4 * NSAWords);
889 if (MCII->get(
MI.getOpcode()).TSFlags &
908 int VDstIn_Idx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
909 AMDGPU::OpName::vdst_in);
910 if (VDstIn_Idx != -1) {
911 int Tied = MCII->get(
MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
913 if (Tied != -1 && (
MI.getNumOperands() <= (
unsigned)VDstIn_Idx ||
914 !
MI.getOperand(VDstIn_Idx).isReg() ||
915 MI.getOperand(VDstIn_Idx).getReg() !=
MI.getOperand(Tied).getReg())) {
916 if (
MI.getNumOperands() > (
unsigned)VDstIn_Idx)
917 MI.erase(&
MI.getOperand(VDstIn_Idx));
920 AMDGPU::OpName::vdst_in);
932 MCII->get(
MI.getOpcode()).getNumDefs() == 0 &&
933 MCII->get(
MI.getOpcode()).hasImplicitDefOfPhysReg(AMDGPU::EXEC)) {
934 auto ExecEncoding = MRI.getEncodingValue(AMDGPU::EXEC_LO);
935 if (Bytes_[0] != ExecEncoding)
939 Size = MaxInstBytesNum - Bytes.size();
944 if (
STI.hasFeature(AMDGPU::FeatureGFX11Insts)) {
954 if (
MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx11 ||
955 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx11 ||
956 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx12 ||
957 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx12 ||
958 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx13 ||
959 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx13 ||
960 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx11 ||
961 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx11 ||
962 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx12 ||
963 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx12 ||
964 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx13 ||
965 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx13 ||
966 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx11 ||
967 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx11 ||
968 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx12 ||
969 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx12 ||
970 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx13 ||
971 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx13 ||
972 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx11 ||
973 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx11 ||
974 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx12 ||
975 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx12 ||
976 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx13 ||
977 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx13) {
985 if (
STI.hasFeature(AMDGPU::FeatureGFX9) ||
986 STI.hasFeature(AMDGPU::FeatureGFX10)) {
990 }
else if (
STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
991 int SDst = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::sdst);
995 AMDGPU::OpName::sdst);
1009 return MO.
setReg(
MRI.getSubReg(MO.
getReg(), AMDGPU::sub0_sub1_sub2_sub3));
1012 MRI.getSubReg(MO.
getReg(), AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5));
1015 MO.
getReg(), AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7)) {
1023 BaseReg, AMDGPU::sub0, &
MRI.getRegClass(AMDGPU::VReg_384RegClassID));
1024 return MO.
setReg(NewReg);
1041 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::blgp);
1046 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::cbsz);
1048 unsigned CBSZ =
MI.getOperand(CbszIdx).getImm();
1049 unsigned BLGP =
MI.getOperand(BlgpIdx).getImm();
1053 if (!AdjustedRegClassOpcode ||
1054 AdjustedRegClassOpcode->
Opcode ==
MI.getOpcode())
1057 MI.setOpcode(AdjustedRegClassOpcode->
Opcode);
1059 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src0);
1061 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src1);
1070 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::matrix_a_fmt);
1075 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::matrix_b_fmt);
1077 unsigned FmtA =
MI.getOperand(FmtAIdx).getImm();
1078 unsigned FmtB =
MI.getOperand(FmtBIdx).getImm();
1082 if (!AdjustedRegClassOpcode ||
1083 AdjustedRegClassOpcode->
Opcode ==
MI.getOpcode())
1086 MI.setOpcode(AdjustedRegClassOpcode->
Opcode);
1088 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src0);
1090 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src1);
1108 bool IsVOP3P =
false) {
1110 unsigned Opc =
MI.getOpcode();
1111 const AMDGPU::OpName ModOps[] = {AMDGPU::OpName::src0_modifiers,
1112 AMDGPU::OpName::src1_modifiers,
1113 AMDGPU::OpName::src2_modifiers};
1114 for (
int J = 0; J < 3; ++J) {
1115 int OpIdx = AMDGPU::getNamedOperandIdx(
Opc, ModOps[J]);
1119 unsigned Val =
MI.getOperand(
OpIdx).getImm();
1126 }
else if (J == 0) {
1137 const unsigned Opc =
MI.getOpcode();
1139 MRI.getRegClass(AMDGPU::VGPR_16RegClassID);
1140 constexpr std::array<std::tuple<AMDGPU::OpName, AMDGPU::OpName, unsigned>, 4>
1141 OpAndOpMods = {{{AMDGPU::OpName::src0, AMDGPU::OpName::src0_modifiers,
1143 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_modifiers,
1145 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_modifiers,
1147 {AMDGPU::OpName::vdst, AMDGPU::OpName::src0_modifiers,
1149 for (
const auto &[
OpName, OpModsName, OpSelMask] : OpAndOpMods) {
1151 int OpModsIdx = AMDGPU::getNamedOperandIdx(
Opc, OpModsName);
1152 if (
OpIdx == -1 || OpModsIdx == -1)
1159 unsigned OpEnc = MRI.getEncodingValue(
Op.getReg());
1160 const MCOperand &OpMods =
MI.getOperand(OpModsIdx);
1161 unsigned ModVal = OpMods.
getImm();
1162 if (ModVal & OpSelMask) {
1172 constexpr int DST_IDX = 0;
1173 auto Opcode =
MI.getOpcode();
1174 const auto &
Desc = MCII->get(Opcode);
1175 auto OldIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::old);
1177 if (OldIdx != -1 &&
Desc.getOperandConstraint(
1181 AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2),
1192 assert(
MI.getNumOperands() + 1 < MCII->get(
MI.getOpcode()).getNumOperands());
1195 AMDGPU::OpName::src2_modifiers);
1199 unsigned Opc =
MI.getOpcode();
1202 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::vdst_in);
1203 if (VDstInIdx != -1)
1206 unsigned DescNumOps = MCII->get(
Opc).getNumOperands();
1207 if (
MI.getNumOperands() < DescNumOps &&
1212 AMDGPU::OpName::op_sel);
1215 if (
MI.getNumOperands() < DescNumOps &&
1218 AMDGPU::OpName::src0_modifiers);
1220 if (
MI.getNumOperands() < DescNumOps &&
1223 AMDGPU::OpName::src1_modifiers);
1231 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::vdst_in);
1232 if (VDstInIdx != -1)
1235 unsigned Opc =
MI.getOpcode();
1236 unsigned DescNumOps = MCII->get(
Opc).getNumOperands();
1237 if (
MI.getNumOperands() < DescNumOps &&
1241 AMDGPU::OpName::op_sel);
1255 if (
MRI.getRegClass(AMDGPU::VGPR_32RegClassID).contains(Sub0))
1256 BaseReg = AMDGPU::VGPR0;
1257 else if (
MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Sub0))
1258 BaseReg = AMDGPU::AGPR0;
1260 assert(BaseReg &&
"Only vector registers expected");
1262 return (Sub0 - BaseReg + NumRegs <= 256) ?
Reg :
MCRegister();
1269 auto TSFlags = MCII->get(
MI.getOpcode()).TSFlags;
1271 int VDstIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
1272 AMDGPU::OpName::vdst);
1274 int VDataIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
1275 AMDGPU::OpName::vdata);
1277 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::vaddr0);
1279 ? AMDGPU::OpName::srsrc
1280 : AMDGPU::OpName::rsrc;
1281 int RsrcIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), RsrcOpName);
1282 int DMaskIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
1283 AMDGPU::OpName::dmask);
1285 int TFEIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
1286 AMDGPU::OpName::tfe);
1287 int D16Idx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
1288 AMDGPU::OpName::d16);
1295 if (BaseOpcode->
BVH) {
1301 bool IsAtomic = (VDstIdx != -1);
1305 bool IsPartialNSA =
false;
1306 unsigned AddrSize = Info->VAddrDwords;
1310 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::dim);
1312 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::a16);
1315 const bool IsA16 = (A16Idx != -1 &&
MI.getOperand(A16Idx).
getImm());
1322 IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA ||
1323 Info->MIMGEncoding == AMDGPU::MIMGEncGfx11NSA ||
1324 Info->MIMGEncoding == AMDGPU::MIMGEncGfx12;
1326 if (!IsVSample && AddrSize > 12)
1329 if (AddrSize > Info->VAddrDwords) {
1330 if (!
STI.hasFeature(AMDGPU::FeaturePartialNSAEncoding)) {
1335 IsPartialNSA =
true;
1340 unsigned DMask =
MI.getOperand(DMaskIdx).getImm() & 0xf;
1341 unsigned DstSize = IsGather4 ? 4 : std::max(
llvm::popcount(DMask), 1);
1343 bool D16 = D16Idx >= 0 &&
MI.getOperand(D16Idx).getImm();
1345 DstSize = (DstSize + 1) / 2;
1348 if (TFEIdx != -1 &&
MI.getOperand(TFEIdx).getImm())
1351 if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
1356 if (NewOpcode == -1)
1361 if (DstSize != Info->VDataDwords) {
1362 auto DataRCID = MCII->getOpRegClassID(
1363 MCII->get(NewOpcode).operands()[VDataIdx], HwModeRegClass);
1367 MCRegister VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
1368 Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
1371 NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0, &NewRC);
1382 int VAddrSAIdx = IsPartialNSA ? (RsrcIdx - 1) : VAddr0Idx;
1384 if (
STI.hasFeature(AMDGPU::FeatureNSAEncoding) && (!IsNSA || IsPartialNSA) &&
1385 AddrSize != Info->VAddrDwords) {
1386 MCRegister VAddrSA =
MI.getOperand(VAddrSAIdx).getReg();
1387 MCRegister VAddrSubSA = MRI.getSubReg(VAddrSA, AMDGPU::sub0);
1388 VAddrSA = VAddrSubSA ? VAddrSubSA : VAddrSA;
1390 auto AddrRCID = MCII->getOpRegClassID(
1391 MCII->get(NewOpcode).operands()[VAddrSAIdx], HwModeRegClass);
1394 NewVAddrSA = MRI.getMatchingSuperReg(VAddrSA, AMDGPU::sub0, &NewRC);
1400 MI.setOpcode(NewOpcode);
1402 if (NewVdata != AMDGPU::NoRegister) {
1414 assert(AddrSize <= Info->VAddrDwords);
1415 MI.erase(
MI.begin() + VAddr0Idx + AddrSize,
1416 MI.begin() + VAddr0Idx + Info->VAddrDwords);
1424 unsigned Opc =
MI.getOpcode();
1425 unsigned DescNumOps = MCII->get(
Opc).getNumOperands();
1428 if (
MI.getNumOperands() < DescNumOps &&
1432 if (
MI.getNumOperands() < DescNumOps &&
1435 AMDGPU::OpName::op_sel);
1436 if (
MI.getNumOperands() < DescNumOps &&
1439 AMDGPU::OpName::op_sel_hi);
1440 if (
MI.getNumOperands() < DescNumOps &&
1443 AMDGPU::OpName::neg_lo);
1444 if (
MI.getNumOperands() < DescNumOps &&
1447 AMDGPU::OpName::neg_hi);
1452 unsigned Opc =
MI.getOpcode();
1453 unsigned DescNumOps = MCII->get(
Opc).getNumOperands();
1455 if (
MI.getNumOperands() < DescNumOps &&
1459 if (
MI.getNumOperands() < DescNumOps &&
1462 AMDGPU::OpName::src0_modifiers);
1464 if (
MI.getNumOperands() < DescNumOps &&
1467 AMDGPU::OpName::src1_modifiers);
1471 unsigned Opc =
MI.getOpcode();
1472 unsigned DescNumOps = MCII->get(
Opc).getNumOperands();
1476 if (
MI.getNumOperands() < DescNumOps &&
1480 AMDGPU::OpName::op_sel);
1485 assert(HasLiteral &&
"Should have decoded a literal");
1496 const Twine& ErrMsg)
const {
1510 unsigned Val)
const {
1511 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
1512 if (Val >= RegCl.getNumRegs())
1514 ": unknown register " +
Twine(Val));
1520 unsigned Val)
const {
1524 switch (SRegClassID) {
1525 case AMDGPU::SGPR_32RegClassID:
1526 case AMDGPU::TTMP_32RegClassID:
1528 case AMDGPU::SGPR_64RegClassID:
1529 case AMDGPU::TTMP_64RegClassID:
1532 case AMDGPU::SGPR_96RegClassID:
1533 case AMDGPU::TTMP_96RegClassID:
1534 case AMDGPU::SGPR_128RegClassID:
1535 case AMDGPU::TTMP_128RegClassID:
1538 case AMDGPU::SGPR_256RegClassID:
1539 case AMDGPU::TTMP_256RegClassID:
1542 case AMDGPU::SGPR_288RegClassID:
1543 case AMDGPU::TTMP_288RegClassID:
1544 case AMDGPU::SGPR_320RegClassID:
1545 case AMDGPU::TTMP_320RegClassID:
1546 case AMDGPU::SGPR_352RegClassID:
1547 case AMDGPU::TTMP_352RegClassID:
1548 case AMDGPU::SGPR_384RegClassID:
1549 case AMDGPU::TTMP_384RegClassID:
1550 case AMDGPU::SGPR_512RegClassID:
1551 case AMDGPU::TTMP_512RegClassID:
1560 if (Val % (1 << shift)) {
1562 <<
": scalar reg isn't aligned " << Val;
1570 unsigned RegIdxInVGPR16 = RegIdx * 2 + (IsHi ? 1 : 0);
1580 "Should only decode multiple kimm with VOPD, check VSrc operand types");
1582 return errOperand(Val,
"More than one unique literal is illegal");
1593 return errOperand(Val,
"More than one unique literal is illegal");
1598 bool UseLit64 =
Hi_32(Literal) == 0;
1611 if (Bytes.size() < 4) {
1612 return errOperand(0,
"cannot read literal, inst bytes left " +
1613 Twine(Bytes.size()));
1620 bool HasInv2Pi =
true;
1624 int64_t Val = Literal;
1625 bool UseLit =
false;
1692 assert(
STI.hasFeature(AMDGPU::Feature64BitLiterals));
1695 if (Bytes.size() < 8) {
1696 return errOperand(0,
"cannot read literal64, inst bytes left " +
1697 Twine(Bytes.size()));
1703 bool UseLit64 =
Hi_32(Literal) == 0;
1712 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
1714 (
static_cast<int64_t
>(Imm) - INLINE_INTEGER_C_MIN) :
1715 (INLINE_INTEGER_C_POSITIVE_MAX -
static_cast<int64_t
>(Imm)));
1763 return 0x3fc45f306dc9c882;
1825 return VGPR_32RegClassID;
1827 return VReg_64RegClassID;
1829 return VReg_96RegClassID;
1831 return VReg_128RegClassID;
1833 return VReg_160RegClassID;
1835 return VReg_192RegClassID;
1837 return VReg_256RegClassID;
1839 return VReg_288RegClassID;
1841 return VReg_320RegClassID;
1843 return VReg_352RegClassID;
1845 return VReg_384RegClassID;
1847 return VReg_512RegClassID;
1849 return VReg_1024RegClassID;
1860 return AGPR_32RegClassID;
1862 return AReg_64RegClassID;
1864 return AReg_96RegClassID;
1866 return AReg_128RegClassID;
1868 return AReg_160RegClassID;
1870 return AReg_256RegClassID;
1872 return AReg_288RegClassID;
1874 return AReg_320RegClassID;
1876 return AReg_352RegClassID;
1878 return AReg_384RegClassID;
1880 return AReg_512RegClassID;
1882 return AReg_1024RegClassID;
1893 return SGPR_32RegClassID;
1895 return SGPR_64RegClassID;
1897 return SGPR_96RegClassID;
1899 return SGPR_128RegClassID;
1901 return SGPR_160RegClassID;
1903 return SGPR_256RegClassID;
1905 return SGPR_288RegClassID;
1907 return SGPR_320RegClassID;
1909 return SGPR_352RegClassID;
1911 return SGPR_384RegClassID;
1913 return SGPR_512RegClassID;
1924 return TTMP_32RegClassID;
1926 return TTMP_64RegClassID;
1928 return TTMP_128RegClassID;
1930 return TTMP_256RegClassID;
1932 return TTMP_288RegClassID;
1934 return TTMP_320RegClassID;
1936 return TTMP_352RegClassID;
1938 return TTMP_384RegClassID;
1940 return TTMP_512RegClassID;
1948 unsigned TTmpMin =
isGFX9Plus() ? TTMP_GFX9PLUS_MIN : TTMP_VI_MIN;
1949 unsigned TTmpMax =
isGFX9Plus() ? TTMP_GFX9PLUS_MAX : TTMP_VI_MAX;
1951 return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
1955 unsigned Val)
const {
1960 bool IsAGPR = Val & 512;
1963 if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
1972 unsigned Val)
const {
1975 assert(Val < (1 << 8) &&
"9-bit Src encoding when Val{8} is 0");
1980 static_assert(SGPR_MIN == 0);
1989 if ((INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) ||
1990 (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) ||
1991 Val == LITERAL_CONST)
1994 if (Val == LITERAL64_CONST &&
STI.hasFeature(AMDGPU::Feature64BitLiterals)) {
2017 unsigned Val)
const {
2019 AMDGPU::getNamedOperandIdx(Inst.
getOpcode(), AMDGPU::OpName::vdstX);
2022 unsigned XDstReg = MRI.getEncodingValue(Inst.
getOperand(VDstXInd).
getReg());
2023 Val |= ~XDstReg & 1;
2116 const unsigned Val)
const {
2120 if (
STI.hasFeature(AMDGPU::FeatureGFX9) ||
2121 STI.hasFeature(AMDGPU::FeatureGFX10)) {
2124 if (
int(SDWA9EncValues::SRC_VGPR_MIN) <=
int(Val) &&
2125 Val <= SDWA9EncValues::SRC_VGPR_MAX) {
2127 Val - SDWA9EncValues::SRC_VGPR_MIN);
2129 if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
2130 Val <= (
isGFX10Plus() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
2131 : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
2133 Val - SDWA9EncValues::SRC_SGPR_MIN);
2135 if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
2136 Val <= SDWA9EncValues::SRC_TTMP_MAX) {
2138 Val - SDWA9EncValues::SRC_TTMP_MIN);
2141 const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
2143 if ((INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX) ||
2144 (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX))
2149 if (
STI.hasFeature(AMDGPU::FeatureVolcanicIslands))
2165 assert((
STI.hasFeature(AMDGPU::FeatureGFX9) ||
2166 STI.hasFeature(AMDGPU::FeatureGFX10)) &&
2167 "SDWAVopcDst should be present only on GFX9+");
2169 bool IsWave32 =
STI.hasFeature(AMDGPU::FeatureWavefrontSize32);
2171 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
2172 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
2188 unsigned Val)
const {
2189 return STI.hasFeature(AMDGPU::FeatureWavefrontSize32)
2195 unsigned Val)
const {
2212 auto [
Version, W64, W32, MDP] = Encoding::decode(Imm);
2215 if (Encoding::encode(
Version, W64, W32, MDP) != Imm)
2225 if (
I == Versions.end())
2241 return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2247 return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2259 return STI.hasFeature(AMDGPU::FeatureGFX11);
2269 return STI.hasFeature(AMDGPU::FeatureGFX12);
2289 return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2311 if (PopCount == 1) {
2312 S <<
"bit (" << (TrailingZeros + BaseBytes * CHAR_BIT) <<
')';
2314 S <<
"bits in range ("
2315 << (TrailingZeros + PopCount - 1 + BaseBytes * CHAR_BIT) <<
':'
2316 << (TrailingZeros + BaseBytes * CHAR_BIT) <<
')';
2322#define GET_FIELD(MASK) (AMDHSA_BITS_GET(FourByteBuffer, MASK))
2323#define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
2325 KdStream << Indent << DIRECTIVE " " << GET_FIELD(MASK) << '\n'; \
2327#define PRINT_PSEUDO_DIRECTIVE_COMMENT(DIRECTIVE, MASK) \
2329 KdStream << Indent << MAI.getCommentString() << ' ' << DIRECTIVE " " \
2330 << GET_FIELD(MASK) << '\n'; \
2333#define CHECK_RESERVED_BITS_IMPL(MASK, DESC, MSG) \
2335 if (FourByteBuffer & (MASK)) { \
2336 return createStringError(std::errc::invalid_argument, \
2337 "kernel descriptor " DESC \
2338 " reserved %s set" MSG, \
2339 getBitRangeFromMask((MASK), 0).c_str()); \
2343#define CHECK_RESERVED_BITS(MASK) CHECK_RESERVED_BITS_IMPL(MASK, #MASK, "")
2344#define CHECK_RESERVED_BITS_MSG(MASK, MSG) \
2345 CHECK_RESERVED_BITS_IMPL(MASK, #MASK, ", " MSG)
2346#define CHECK_RESERVED_BITS_DESC(MASK, DESC) \
2347 CHECK_RESERVED_BITS_IMPL(MASK, DESC, "")
2348#define CHECK_RESERVED_BITS_DESC_MSG(MASK, DESC, MSG) \
2349 CHECK_RESERVED_BITS_IMPL(MASK, DESC, ", " MSG)
2362 uint32_t GranulatedWorkitemVGPRCount =
2363 GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT);
2366 (GranulatedWorkitemVGPRCount + 1) *
2369 KdStream << Indent <<
".amdhsa_next_free_vgpr " << NextFreeVGPR <<
'\n';
2390 uint32_t GranulatedWavefrontSGPRCount =
2391 GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT);
2395 "must be zero on gfx10+");
2397 uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) *
2400 KdStream << Indent <<
".amdhsa_reserve_vcc " << 0 <<
'\n';
2402 KdStream << Indent <<
".amdhsa_reserve_flat_scratch " << 0 <<
'\n';
2403 bool ReservedXnackMask =
STI.hasFeature(AMDGPU::FeatureXNACK);
2404 assert(!ReservedXnackMask ||
STI.hasFeature(AMDGPU::FeatureSupportsXNACK));
2405 KdStream << Indent <<
".amdhsa_reserve_xnack_mask " << ReservedXnackMask
2407 KdStream << Indent <<
".amdhsa_next_free_sgpr " << NextFreeSGPR <<
"\n";
2412 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
2414 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
2416 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
2418 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
2424 COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP);
2430 COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE);
2437 PRINT_DIRECTIVE(
".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL);
2440 "COMPUTE_PGM_RSRC1",
"must be zero pre-gfx9");
2446 COMPUTE_PGM_RSRC1_GFX125_FLAT_SCRATCH_IS_NV);
2449 "COMPUTE_PGM_RSRC1");
2460 COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE);
2462 PRINT_DIRECTIVE(
".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED);
2463 PRINT_DIRECTIVE(
".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS);
2466 "COMPUTE_PGM_RSRC1");
2471 COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN);
2483 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2485 PRINT_DIRECTIVE(
".amdhsa_system_sgpr_private_segment_wavefront_offset",
2486 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2488 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
2490 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
2492 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
2494 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
2496 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
2503 ".amdhsa_exception_fp_ieee_invalid_op",
2504 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
2506 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
2508 ".amdhsa_exception_fp_ieee_div_zero",
2509 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
2511 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
2513 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
2515 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
2517 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
2530 KdStream << Indent <<
".amdhsa_accum_offset "
2531 << (
GET_FIELD(COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
2534 PRINT_DIRECTIVE(
".amdhsa_tg_split", COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
2537 "COMPUTE_PGM_RSRC3",
"must be zero on gfx90a");
2539 "COMPUTE_PGM_RSRC3",
"must be zero on gfx90a");
2543 if (!EnableWavefrontSize32 || !*EnableWavefrontSize32) {
2545 COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2548 "SHARED_VGPR_COUNT",
2549 COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2553 "COMPUTE_PGM_RSRC3",
2554 "must be zero on gfx12+");
2560 COMPUTE_PGM_RSRC3_GFX11_INST_PREF_SIZE);
2562 COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_START);
2564 COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_END);
2567 COMPUTE_PGM_RSRC3_GFX12_PLUS_INST_PREF_SIZE);
2570 "COMPUTE_PGM_RSRC3",
2571 "must be zero on gfx10");
2576 "COMPUTE_PGM_RSRC3",
"must be zero on gfx10+");
2581 COMPUTE_PGM_RSRC3_GFX12_PLUS_GLG_EN);
2584 "COMPUTE_PGM_RSRC3",
2585 "must be zero on gfx10 or gfx11");
2591 COMPUTE_PGM_RSRC3_GFX125_NAMED_BAR_CNT);
2593 "ENABLE_DYNAMIC_VGPR", COMPUTE_PGM_RSRC3_GFX125_ENABLE_DYNAMIC_VGPR);
2595 COMPUTE_PGM_RSRC3_GFX125_TCP_SPLIT);
2597 "ENABLE_DIDT_THROTTLE",
2598 COMPUTE_PGM_RSRC3_GFX125_ENABLE_DIDT_THROTTLE);
2601 "COMPUTE_PGM_RSRC3",
2602 "must be zero on gfx10+");
2607 "COMPUTE_PGM_RSRC3",
"must be zero on gfx10+");
2612 COMPUTE_PGM_RSRC3_GFX11_PLUS_IMAGE_OP);
2615 "COMPUTE_PGM_RSRC3",
2616 "must be zero on gfx10");
2618 }
else if (FourByteBuffer) {
2620 std::errc::invalid_argument,
2621 "kernel descriptor COMPUTE_PGM_RSRC3 must be all zero before gfx9");
2625#undef PRINT_PSEUDO_DIRECTIVE_COMMENT
2626#undef PRINT_DIRECTIVE
2628#undef CHECK_RESERVED_BITS_IMPL
2629#undef CHECK_RESERVED_BITS
2630#undef CHECK_RESERVED_BITS_MSG
2631#undef CHECK_RESERVED_BITS_DESC
2632#undef CHECK_RESERVED_BITS_DESC_MSG
2637 const char *Msg =
"") {
2639 std::errc::invalid_argument,
"kernel descriptor reserved %s set%s%s",
2646 unsigned WidthInBytes) {
2650 std::errc::invalid_argument,
2651 "kernel descriptor reserved bits in range (%u:%u) set",
2652 (BaseInBytes + WidthInBytes) * CHAR_BIT - 1, BaseInBytes * CHAR_BIT);
2658#define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
2660 KdStream << Indent << DIRECTIVE " " \
2661 << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \
2670 assert(Bytes.size() == 64);
2673 switch (Cursor.tell()) {
2675 FourByteBuffer = DE.
getU32(Cursor);
2676 KdStream << Indent <<
".amdhsa_group_segment_fixed_size " << FourByteBuffer
2681 FourByteBuffer = DE.
getU32(Cursor);
2682 KdStream << Indent <<
".amdhsa_private_segment_fixed_size "
2683 << FourByteBuffer <<
'\n';
2687 FourByteBuffer = DE.
getU32(Cursor);
2688 KdStream << Indent <<
".amdhsa_kernarg_size "
2689 << FourByteBuffer <<
'\n';
2694 ReservedBytes = DE.
getBytes(Cursor, 4);
2695 for (
char B : ReservedBytes) {
2710 ReservedBytes = DE.
getBytes(Cursor, 20);
2711 for (
char B : ReservedBytes) {
2718 FourByteBuffer = DE.
getU32(Cursor);
2722 FourByteBuffer = DE.
getU32(Cursor);
2726 FourByteBuffer = DE.
getU32(Cursor);
2731 TwoByteBuffer = DE.
getU16(Cursor);
2735 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
2737 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
2739 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
2741 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
2743 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
2746 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
2748 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
2750 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0)
2756 (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) {
2758 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
2763 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2768 KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK);
2770 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1) {
2779 TwoByteBuffer = DE.
getU16(Cursor);
2780 if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_LENGTH) {
2782 KERNARG_PRELOAD_SPEC_LENGTH);
2785 if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_OFFSET) {
2787 KERNARG_PRELOAD_SPEC_OFFSET);
2793 ReservedBytes = DE.
getBytes(Cursor, 4);
2794 for (
char B : ReservedBytes) {
2804#undef PRINT_DIRECTIVE
2811 if (Bytes.size() != 64 || KdAddress % 64 != 0)
2813 "kernel descriptor must be 64-byte aligned");
2824 EnableWavefrontSize32 =
2826 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2831 KdStream <<
".amdhsa_kernel " << KdName <<
'\n';
2834 while (
C &&
C.tell() < Bytes.size()) {
2842 KdStream <<
".end_amdhsa_kernel\n";
2861 "code object v2 is not supported");
2874const MCExpr *AMDGPUDisassembler::createConstantSymbolExpr(
StringRef Id,
2877 MCSymbol *Sym = Ctx.getOrCreateSymbol(Id);
2885 if (!Valid || Res != Val)
2886 Ctx.reportWarning(
SMLoc(),
"unsupported redefinition of " + Id);
2892 const uint64_t TSFlags = MCII->get(
MI.getOpcode()).TSFlags;
2927 if (Result != Symbols->end()) {
2928 auto *Sym =
Ctx.getOrCreateSymbol(Result->Name);
2934 ReferencedAddresses.push_back(
static_cast<uint64_t>(
Value));
2953 std::unique_ptr<MCRelocationInfo> &&RelInfo) {
unsigned const MachineRegisterInfo * MRI
MCDisassembler::DecodeStatus DecodeStatus
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define CHECK_RESERVED_BITS_DESC(MASK, DESC)
static VOPModifiers collectVOPModifiers(const MCInst &MI, bool IsVOP3P=false)
static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, AMDGPU::OpName Name)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler()
static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus decodeOperand_KImmFP64(MCInst &Inst, uint64_t Imm, uint64_t Addr, const MCDisassembler *Decoder)
static SmallString< 32 > getBitRangeFromMask(uint32_t Mask, unsigned BaseBytes)
Print a string describing the reserved bit range specified by Mask with offset BaseBytes for use in e...
#define DECODE_OPERAND_SREG_8(RegClass, OpWidth)
static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
static std::bitset< 128 > eat16Bytes(ArrayRef< uint8_t > &Bytes)
static DecodeStatus decodeVersionImm(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define DECODE_OPERAND_SREG_7(RegClass, OpWidth)
static DecodeStatus decodeSrcA9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define PRINT_PSEUDO_DIRECTIVE_COMMENT(DIRECTIVE, MASK)
static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize, unsigned OpWidth, unsigned Imm, unsigned EncImm, const MCDisassembler *Decoder)
static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder)
static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
static MCRegister CheckVGPROverflow(MCRegister Reg, const MCRegisterClass &RC, const MCRegisterInfo &MRI)
static int64_t getInlineImmValBF16(unsigned Imm)
#define DECODE_SDWA(DecName)
static DecodeStatus decodeSOPPBrTarget(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
#define DECODE_OPERAND_REG_8(RegClass)
#define PRINT_DIRECTIVE(DIRECTIVE, MASK)
static DecodeStatus decodeSrcRegOrImm9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus DecodeVGPR_16RegisterClass(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static int64_t getInlineImmVal32(unsigned Imm)
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
#define CHECK_RESERVED_BITS(MASK)
static DecodeStatus decodeSrcAV10(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static int64_t getInlineImmVal64(unsigned Imm)
static T eatBytes(ArrayRef< uint8_t > &Bytes)
static DecodeStatus decodeOperand_KImmFP(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm, unsigned Opw, const MCDisassembler *Decoder)
static MCDisassembler * createAMDGPUDisassembler(const Target &T, const MCSubtargetInfo &STI, MCContext &Ctx)
static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus DecodeVGPR_16_Lo128RegisterClass(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define CHECK_RESERVED_BITS_MSG(MASK, MSG)
static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val, uint64_t Addr, const void *Decoder)
static MCSymbolizer * createAMDGPUSymbolizer(const Triple &, LLVMOpInfoCallback, LLVMSymbolLookupCallback, void *DisInfo, MCContext *Ctx, std::unique_ptr< MCRelocationInfo > &&RelInfo)
static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder)
static int64_t getInlineImmValF16(unsigned Imm)
static std::bitset< 96 > eat12Bytes(ArrayRef< uint8_t > &Bytes)
static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static Error createReservedKDBytesError(unsigned BaseInBytes, unsigned WidthInBytes)
Create an error object to return from onSymbolStart for reserved kernel descriptor bytes being set.
static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder)
static DecodeStatus decodeAV10(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define CHECK_RESERVED_BITS_DESC_MSG(MASK, DESC, MSG)
static Error createReservedKDBitsError(uint32_t Mask, unsigned BaseBytes, const char *Msg="")
Create an error object to return from onSymbolStart for reserved kernel descriptor bits being set.
static void adjustMFMA_F8F6F4OpRegClass(const MCRegisterInfo &MRI, MCOperand &MO, uint8_t NumRegs)
Adjust the register values used by V_MFMA_F8F6F4_f8_f8 instructions to the appropriate subregister fo...
This file contains declaration for AMDGPU ISA disassembler.
Provides AMDGPU specific target descriptions.
AMDHSA kernel descriptor definitions.
#define AMDHSA_BITS_GET(SRC, MSK)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_EXTERNAL_VISIBILITY
MachineInstr unsigned OpIdx
Interface definition for SIRegisterInfo.
MCOperand decodeNonVGPRSrcOp(const MCInst &Inst, unsigned Width, unsigned Val) const
MCOperand decodeLiteral64Constant() const
void convertVOPC64DPPInst(MCInst &MI) const
bool isBufferInstruction(const MCInst &MI) const
Check if the instruction is a buffer operation (MUBUF, MTBUF, or S_BUFFER)
bool hasKernargPreload() const
void convertEXPInst(MCInst &MI) const
MCOperand decodeSpecialReg64(unsigned Val) const
const char * getRegClassName(unsigned RegClassID) const
Expected< bool > decodeCOMPUTE_PGM_RSRC1(uint32_t FourByteBuffer, raw_string_ostream &KdStream) const
Decode as directives that handle COMPUTE_PGM_RSRC1.
MCOperand decodeSplitBarrier(const MCInst &Inst, unsigned Val) const
Expected< bool > decodeKernelDescriptorDirective(DataExtractor::Cursor &Cursor, ArrayRef< uint8_t > Bytes, raw_string_ostream &KdStream) const
void convertVOPCDPPInst(MCInst &MI) const
bool isGFX1250Plus() const
MCOperand decodeSpecialReg96Plus(unsigned Val) const
MCOperand decodeSDWASrc32(unsigned Val) const
void setABIVersion(unsigned Version) override
ELF-specific, set the ABI version from the object header.
Expected< bool > decodeCOMPUTE_PGM_RSRC2(uint32_t FourByteBuffer, raw_string_ostream &KdStream) const
Decode as directives that handle COMPUTE_PGM_RSRC2.
unsigned getAgprClassId(unsigned Width) const
MCOperand decodeDpp8FI(unsigned Val) const
MCOperand decodeSDWASrc(unsigned Width, unsigned Val) const
void convertFMAanyK(MCInst &MI) const
DecodeStatus tryDecodeInst(const uint8_t *Table, MCInst &MI, InsnType Inst, uint64_t Address, raw_ostream &Comments) const
void convertMacDPPInst(MCInst &MI) const
MCOperand decodeVOPDDstYOp(MCInst &Inst, unsigned Val) const
void convertDPP8Inst(MCInst &MI) const
MCOperand createVGPR16Operand(unsigned RegIdx, bool IsHi) const
MCOperand errOperand(unsigned V, const Twine &ErrMsg) const
MCOperand decodeVersionImm(unsigned Imm) const
Expected< bool > decodeKernelDescriptor(StringRef KdName, ArrayRef< uint8_t > Bytes, uint64_t KdAddress) const
void convertVOP3DPPInst(MCInst &MI) const
void convertTrue16OpSel(MCInst &MI) const
MCOperand decodeSrcOp(const MCInst &Inst, unsigned Width, unsigned Val) const
MCOperand decodeMandatoryLiteralConstant(unsigned Imm) const
MCOperand decodeLiteralConstant(const MCInstrDesc &Desc, const MCOperandInfo &OpDesc) const
Expected< bool > decodeCOMPUTE_PGM_RSRC3(uint32_t FourByteBuffer, raw_string_ostream &KdStream) const
Decode as directives that handle COMPUTE_PGM_RSRC3.
AMDGPUDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx, MCInstrInfo const *MCII)
MCOperand decodeSpecialReg32(unsigned Val) const
MCOperand createRegOperand(MCRegister Reg) const
MCOperand decodeSDWAVopcDst(unsigned Val) const
void convertVINTERPInst(MCInst &MI) const
void convertSDWAInst(MCInst &MI) const
unsigned getSgprClassId(unsigned Width) const
static MCOperand decodeIntImmed(unsigned Imm)
void convertWMMAInst(MCInst &MI) const
MCOperand decodeBoolReg(const MCInst &Inst, unsigned Val) const
unsigned getVgprClassId(unsigned Width) const
void convertMAIInst(MCInst &MI) const
f8f6f4 instructions have different pseudos depending on the used formats.
bool hasArchitectedFlatScratch() const
unsigned getTtmpClassId(unsigned Width) const
DecodeStatus getInstruction(MCInst &MI, uint64_t &Size, ArrayRef< uint8_t > Bytes, uint64_t Address, raw_ostream &CS) const override
Returns the disassembly of a single instruction.
MCOperand decodeMandatoryLiteral64Constant(uint64_t Imm) const
void convertMIMGInst(MCInst &MI) const
bool isMacDPP(MCInst &MI) const
int getTTmpIdx(unsigned Val) const
void convertVOP3PDPPInst(MCInst &MI) const
MCOperand createSRegOperand(unsigned SRegClassID, unsigned Val) const
MCOperand decodeSDWASrc16(unsigned Val) const
Expected< bool > onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size, ArrayRef< uint8_t > Bytes, uint64_t Address) const override
Used to perform separate target specific disassembly for a particular symbol.
static const AMDGPUMCExpr * createLit(LitModifier Lit, int64_t Value, MCContext &Ctx)
bool tryAddingSymbolicOperand(MCInst &Inst, raw_ostream &cStream, int64_t Value, uint64_t Address, bool IsBranch, uint64_t Offset, uint64_t OpSize, uint64_t InstSize) override
Try to add a symbolic operand instead of Value to the MCInst.
void tryAddingPcLoadReferenceComment(raw_ostream &cStream, int64_t Value, uint64_t Address) override
Try to add a comment on the PC-relative load.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Lightweight error class with error context and mandatory checking.
Tagged union holding either a T or a Error.
static const MCBinaryExpr * createOr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Context object for machine code objects.
const MCRegisterInfo * getRegisterInfo() const
Superclass for all disassemblers.
MCDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
MCContext & getContext() const
const MCSubtargetInfo & STI
raw_ostream * CommentStream
DecodeStatus
Ternary decode status.
Base class for the full range of assembler expressions which are needed for parsing.
Instances of this class represent a single low-level machine instruction.
unsigned getOpcode() const
void addOperand(const MCOperand Op)
const MCOperand & getOperand(unsigned i) const
Describe properties that are true of each instruction in the target description file.
Interface to description of machine instruction set.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint8_t OperandType
Information about the type of the operand.
Instances of this class represent operands of the MCInst class.
static MCOperand createExpr(const MCExpr *Val)
static MCOperand createReg(MCRegister Reg)
static MCOperand createImm(int64_t Val)
void setReg(MCRegister Reg)
Set the register number.
MCRegister getReg() const
Returns the register number.
MCRegisterClass - Base class of TargetRegisterClass.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
unsigned getSizeInBits() const
Return the size of the physical register in bits if we are able to determine it.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
bool isVariable() const
isVariable - Check if this is a variable symbol.
LLVM_ABI void setVariableValue(const MCExpr *Value)
const MCExpr * getVariableValue() const
Get the expression of the variable symbol.
Symbolize and annotate disassembled instructions.
Represents a location in source code.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
StringRef - Represent a constant reference to a string, i.e.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
LLVM Value Representation.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
A raw_ostream that writes to an SmallVector or SmallString.
const char *(* LLVMSymbolLookupCallback)(void *DisInfo, uint64_t ReferenceValue, uint64_t *ReferenceType, uint64_t ReferencePC, const char **ReferenceName)
The type for the symbol lookup function.
int(* LLVMOpInfoCallback)(void *DisInfo, uint64_t PC, uint64_t Offset, uint64_t OpSize, uint64_t InstSize, int TagType, void *TagBuf)
The type for the operand information call back function.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
ArrayRef< GFXVersion > getGFXVersions()
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
EncodingField< Bit, Bit, D > EncodingBit
bool isPKFMACF16InlineConstant(uint32_t Literal, bool IsGFX11Plus)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool isInlinableLiteralV2I16(uint32_t Literal)
bool isGFX10(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
bool isGFX12Plus(const MCSubtargetInfo &STI)
bool hasPackedD16(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool getSMEMIsBuffer(unsigned Opc)
bool isGFX13(const MCSubtargetInfo &STI)
bool isVOPC64DPP(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool isGFX9(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
bool isGFX13Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_IMM_V2FP16_SPLAT
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
bool hasGDS(const MCSubtargetInfo &STI)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool isGFX1170(const MCSubtargetInfo &STI)
bool isGFX1250(const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
bool isGFX1250Plus(const MCSubtargetInfo &STI)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
bool hasVOPD(const MCSubtargetInfo &STI)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
@ C
The default llvm calling convention, compatible with C.
@ KERNEL_CODE_PROPERTIES_OFFSET
@ GROUP_SEGMENT_FIXED_SIZE_OFFSET
@ COMPUTE_PGM_RSRC3_OFFSET
@ KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET
@ COMPUTE_PGM_RSRC1_OFFSET
@ COMPUTE_PGM_RSRC2_OFFSET
@ PRIVATE_SEGMENT_FIXED_SIZE_OFFSET
value_type read(const void *memory, endianness endian)
Read a value of a particular endianness from memory.
uint16_t read16(const void *P, endianness E)
This is an optimization pass for GlobalISel generic memory operations.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
SmallVectorImpl< T >::const_pointer c_str(SmallVectorImpl< T > &str)
Error createStringError(std::error_code EC, char const *Fmt, const Ts &... Vals)
Create formatted StringError object.
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
FunctionAddr VTableAddr uintptr_t uintptr_t Version
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Target & getTheGCNTarget()
The target for GCN GPUs.
To bit_cast(const From &from) noexcept
DWARFExpression::Operation Op
unsigned M0(unsigned Val)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
std::vector< SymbolInfoTy > SectionSymbolsTy
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
static void RegisterMCSymbolizer(Target &T, Target::MCSymbolizerCtorTy Fn)
RegisterMCSymbolizer - Register an MCSymbolizer implementation for the given target.
static void RegisterMCDisassembler(Target &T, Target::MCDisassemblerCtorTy Fn)
RegisterMCDisassembler - Register a MCDisassembler implementation for the given target.