19#include "llvm/IR/IntrinsicsAMDGPU.h"
20#include "llvm/IR/IntrinsicsR600.h"
30#define GET_INSTRINFO_NAMED_OPS
31#define GET_INSTRMAP_INFO
32#include "AMDGPUGenInstrInfo.inc"
42unsigned getBitMask(
unsigned Shift,
unsigned Width) {
43 return ((1 << Width) - 1) << Shift;
49unsigned packBits(
unsigned Src,
unsigned Dst,
unsigned Shift,
unsigned Width) {
50 unsigned Mask = getBitMask(Shift, Width);
51 return ((Src << Shift) & Mask) | (Dst & ~Mask);
57unsigned unpackBits(
unsigned Src,
unsigned Shift,
unsigned Width) {
58 return (Src & getBitMask(Shift, Width)) >> Shift;
62unsigned getVmcntBitShiftLo(
unsigned VersionMajor) {
67unsigned getVmcntBitWidthLo(
unsigned VersionMajor) {
72unsigned getExpcntBitShift(
unsigned VersionMajor) {
77unsigned getExpcntBitWidth(
unsigned VersionMajor) {
return 3; }
80unsigned getLgkmcntBitShift(
unsigned VersionMajor) {
85unsigned getLgkmcntBitWidth(
unsigned VersionMajor) {
90unsigned getVmcntBitShiftHi(
unsigned VersionMajor) {
return 14; }
93unsigned getVmcntBitWidthHi(
unsigned VersionMajor) {
94 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
98inline unsigned getVmVsrcBitWidth() {
return 3; }
101inline unsigned getVmVsrcBitShift() {
return 2; }
104inline unsigned getVaVdstBitWidth() {
return 4; }
107inline unsigned getVaVdstBitShift() {
return 12; }
110inline unsigned getSaSdstBitWidth() {
return 1; }
113inline unsigned getSaSdstBitShift() {
return 0; }
166 if (
auto Ver = mdconst::extract_or_null<ConstantInt>(
167 M.getModuleFlag(
"amdgpu_code_object_version"))) {
168 return (
unsigned)Ver->getZExtValue() / 100;
176 switch (CodeObjectVersion) {
190 switch (CodeObjectVersion) {
201 switch (CodeObjectVersion) {
212 switch (CodeObjectVersion) {
222#define GET_MIMGBaseOpcodesTable_IMPL
223#define GET_MIMGDimInfoTable_IMPL
224#define GET_MIMGInfoTable_IMPL
225#define GET_MIMGLZMappingTable_IMPL
226#define GET_MIMGMIPMappingTable_IMPL
227#define GET_MIMGBiasMappingTable_IMPL
228#define GET_MIMGOffsetMappingTable_IMPL
229#define GET_MIMGG16MappingTable_IMPL
230#define GET_MAIInstInfoTable_IMPL
231#include "AMDGPUGenSearchableTables.inc"
234 unsigned VDataDwords,
unsigned VAddrDwords) {
235 const MIMGInfo *
Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
236 VDataDwords, VAddrDwords);
250 return NewInfo ? NewInfo->
Opcode : -1;
255 bool IsG16Supported) {
262 AddrWords += AddrComponents;
270 if ((IsA16 && !IsG16Supported) || BaseOpcode->
G16)
331#define GET_MTBUFInfoTable_DECL
332#define GET_MTBUFInfoTable_IMPL
333#define GET_MUBUFInfoTable_DECL
334#define GET_MUBUFInfoTable_IMPL
335#define GET_SMInfoTable_DECL
336#define GET_SMInfoTable_IMPL
337#define GET_VOP1InfoTable_DECL
338#define GET_VOP1InfoTable_IMPL
339#define GET_VOP2InfoTable_DECL
340#define GET_VOP2InfoTable_IMPL
341#define GET_VOP3InfoTable_DECL
342#define GET_VOP3InfoTable_IMPL
343#define GET_VOPC64DPPTable_DECL
344#define GET_VOPC64DPPTable_IMPL
345#define GET_VOPC64DPP8Table_DECL
346#define GET_VOPC64DPP8Table_IMPL
347#define GET_VOPDComponentTable_DECL
348#define GET_VOPDComponentTable_IMPL
349#define GET_VOPDPairs_DECL
350#define GET_VOPDPairs_IMPL
351#define GET_VOPTrue16Table_DECL
352#define GET_VOPTrue16Table_IMPL
353#define GET_WMMAOpcode2AddrMappingTable_DECL
354#define GET_WMMAOpcode2AddrMappingTable_IMPL
355#define GET_WMMAOpcode3AddrMappingTable_DECL
356#define GET_WMMAOpcode3AddrMappingTable_IMPL
357#include "AMDGPUGenSearchableTables.inc"
361 return Info ?
Info->BaseOpcode : -1;
365 const MTBUFInfo *
Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
376 return Info ?
Info->has_vaddr :
false;
381 return Info ?
Info->has_srsrc :
false;
386 return Info ?
Info->has_soffset :
false;
391 return Info ?
Info->BaseOpcode : -1;
395 const MUBUFInfo *
Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
406 return Info ?
Info->has_vaddr :
false;
411 return Info ?
Info->has_srsrc :
false;
416 return Info ?
Info->has_soffset :
false;
421 return Info ?
Info->IsBufferInv :
false;
425 const SMInfo *
Info = getSMEMOpcodeHelper(Opc);
426 return Info ?
Info->IsBuffer :
false;
431 return Info ?
Info->IsSingle :
false;
436 return Info ?
Info->IsSingle :
false;
441 return Info ?
Info->IsSingle :
false;
445 return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
450 return Info ?
Info->is_dgemm :
false;
455 return Info ?
Info->is_gfx940_xdl :
false;
461 return {
Info->CanBeVOPDX,
true};
463 return {
false,
false};
476 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
477 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
478 Opc == AMDGPU::V_MAC_F32_e64_vi ||
479 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
480 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
481 Opc == AMDGPU::V_MAC_F16_e64_vi ||
482 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
483 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
484 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
485 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
486 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
487 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
488 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
489 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
490 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
491 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
492 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
493 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
497 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
498 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
499 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
500 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11;
504 return Opc == AMDGPU::G_AMDGPU_ATOMIC_FMIN ||
505 Opc == AMDGPU::G_AMDGPU_ATOMIC_FMAX ||
506 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP ||
507 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD ||
508 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB ||
509 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN ||
510 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN ||
511 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX ||
512 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX ||
513 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND ||
514 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR ||
515 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR ||
516 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC ||
517 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC ||
518 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD ||
519 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
520 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
521 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
522 Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
527 return Info ?
Info->IsTrue16 :
false;
532 return Info ?
Info->Opcode3Addr : ~0u;
537 return Info ?
Info->Opcode2Addr : ~0u;
544 return getMCOpcodeGen(Opcode,
static_cast<Subtarget
>(Gen));
548 const VOPDInfo *
Info = getVOPDInfoFromComponentOpcodes(OpX, OpY);
555 auto OpX = getVOPDBaseFromComponent(
Info->OpX);
556 auto OpY = getVOPDBaseFromComponent(
Info->OpY);
558 return {OpX->BaseVOP, OpY->BaseVOP};
570 HasSrc2Acc = TiedIdx != -1;
577 for (CompOprIdx =
Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
579 MandatoryLiteralIdx = CompOprIdx;
600 std::function<
unsigned(
unsigned,
unsigned)> GetRegIdx)
const {
608 if (OpXRegs[CompOprIdx] && OpYRegs[CompOprIdx] &&
609 ((OpXRegs[CompOprIdx] & BanksMasks) ==
610 (OpYRegs[CompOprIdx] & BanksMasks)))
626 std::function<
unsigned(
unsigned,
unsigned)> GetRegIdx)
const {
629 const auto &Comp = CompInfo[CompIdx];
632 RegIndices[
DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
635 unsigned CompSrcIdx = CompOprIdx -
DST_NUM;
637 Comp.hasRegSrcOperand(CompSrcIdx)
638 ? GetRegIdx(CompIdx, Comp.getIndexOfSrcInMCOperands(CompSrcIdx))
653 const auto &OpXDesc =
InstrInfo->get(OpX);
654 const auto &OpYDesc =
InstrInfo->get(OpY);
676 std::optional<bool> XnackRequested;
677 std::optional<bool> SramEccRequested;
679 for (
const std::string &Feature : Features.
getFeatures()) {
680 if (Feature ==
"+xnack")
681 XnackRequested =
true;
682 else if (Feature ==
"-xnack")
683 XnackRequested =
false;
684 else if (Feature ==
"+sramecc")
685 SramEccRequested =
true;
686 else if (Feature ==
"-sramecc")
687 SramEccRequested =
false;
693 if (XnackRequested) {
694 if (XnackSupported) {
700 if (*XnackRequested) {
701 errs() <<
"warning: xnack 'On' was requested for a processor that does "
704 errs() <<
"warning: xnack 'Off' was requested for a processor that "
705 "does not support it!\n";
710 if (SramEccRequested) {
711 if (SramEccSupported) {
718 if (*SramEccRequested) {
719 errs() <<
"warning: sramecc 'On' was requested for a processor that "
720 "does not support it!\n";
722 errs() <<
"warning: sramecc 'Off' was requested for a processor that "
723 "does not support it!\n";
741 TargetID.
split(TargetIDSplit,
':');
743 for (
const auto &FeatureString : TargetIDSplit) {
744 if (FeatureString.startswith(
"xnack"))
746 if (FeatureString.startswith(
"sramecc"))
752 std::string StringRep;
758 StreamRep << TargetTriple.getArchName() <<
'-'
759 << TargetTriple.getVendorName() <<
'-'
760 << TargetTriple.getOSName() <<
'-'
761 << TargetTriple.getEnvironmentName() <<
'-';
763 std::string Processor;
767 if (Version.Major >= 9)
770 Processor = (
Twine(
"gfx") +
Twine(Version.Major) +
Twine(Version.Minor) +
771 Twine(Version.Stepping))
774 std::string Features;
776 switch (CodeObjectVersion) {
780 Features +=
"+xnack";
784 Features +=
"+sram-ecc";
790 Features +=
":sramecc-";
792 Features +=
":sramecc+";
795 Features +=
":xnack-";
797 Features +=
":xnack+";
804 StreamRep << Processor << Features;
820 unsigned BytesPerCU = 0;
855 unsigned FlatWorkGroupSize) {
856 assert(FlatWorkGroupSize != 0);
866 unsigned MaxBarriers = 16;
870 return std::min(MaxWaves /
N, MaxBarriers);
887 unsigned FlatWorkGroupSize) {
902 unsigned FlatWorkGroupSize) {
908 if (Version.Major >= 10)
910 if (Version.Major >= 8)
921 if (Version.Major >= 8)
931 if (Version.Major >= 10)
933 if (Version.Major >= 8)
942 if (Version.Major >= 10)
961 if (Version.Major >= 10)
962 return Addressable ? AddressableNumSGPRs : 108;
963 if (Version.Major >= 8 && !Addressable)
964 AddressableNumSGPRs = 112;
969 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
973 bool FlatScrUsed,
bool XNACKUsed) {
974 unsigned ExtraSGPRs = 0;
979 if (Version.Major >= 10)
982 if (Version.Major < 8) {
1010 std::optional<bool> EnableWavefrontSize32) {
1014 bool IsWave32 = EnableWavefrontSize32 ?
1015 *EnableWavefrontSize32 :
1019 return IsWave32 ? 24 : 12;
1022 return IsWave32 ? 16 : 8;
1024 return IsWave32 ? 8 : 4;
1028 std::optional<bool> EnableWavefrontSize32) {
1032 bool IsWave32 = EnableWavefrontSize32 ?
1033 *EnableWavefrontSize32 :
1036 return IsWave32 ? 8 : 4;
1046 return IsWave32 ? 1536 : 768;
1047 return IsWave32 ? 1024 : 512;
1057 unsigned NumVGPRs) {
1060 if (NumVGPRs < Granule)
1062 unsigned RoundedRegs =
alignTo(NumVGPRs, Granule);
1063 return std::min(std::max(
getTotalNumVGPRs(STI) / RoundedRegs, 1u), MaxWaves);
1070 if (WavesPerEU >= MaxWavesPerEU)
1076 unsigned MaxNumVGPRs =
alignDown(TotNumVGPRs / WavesPerEU, Granule);
1078 if (MaxNumVGPRs ==
alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1082 if (WavesPerEU < MinWavesPerEU)
1085 unsigned MaxNumVGPRsNext =
alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1086 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1087 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1096 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1100 std::optional<bool> EnableWavefrontSize32) {
1101 NumVGPRs =
alignTo(std::max(1u, NumVGPRs),
1113 memset(&Header, 0,
sizeof(Header));
1115 Header.amd_kernel_code_version_major = 1;
1116 Header.amd_kernel_code_version_minor = 2;
1117 Header.amd_machine_kind = 1;
1118 Header.amd_machine_version_major = Version.Major;
1119 Header.amd_machine_version_minor = Version.Minor;
1120 Header.amd_machine_version_stepping = Version.Stepping;
1121 Header.kernel_code_entry_byte_offset =
sizeof(Header);
1122 Header.wavefront_size = 6;
1126 Header.call_convention = -1;
1130 Header.kernarg_segment_alignment = 4;
1131 Header.group_segment_alignment = 4;
1132 Header.private_segment_alignment = 4;
1134 if (Version.Major >= 10) {
1136 Header.wavefront_size = 5;
1139 Header.compute_pgm_resource_registers |=
1150 memset(&KD, 0,
sizeof(KD));
1153 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
1156 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
1158 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
1160 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
1161 if (Version.Major >= 10) {
1163 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
1166 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
1169 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
1173 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT,
1197std::pair<unsigned, unsigned>
1199 std::pair<unsigned, unsigned>
Default,
1200 bool OnlyFirstRequired) {
1202 if (!
A.isStringAttribute())
1206 std::pair<unsigned, unsigned> Ints =
Default;
1207 std::pair<StringRef, StringRef> Strs =
A.getValueAsString().split(
',');
1208 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1209 Ctx.
emitError(
"can't parse first integer attribute " +
Name);
1212 if (Strs.second.trim().getAsInteger(0, Ints.second)) {
1213 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1214 Ctx.
emitError(
"can't parse second integer attribute " +
Name);
1223 return (1 << (getVmcntBitWidthLo(Version.Major) +
1224 getVmcntBitWidthHi(Version.Major))) -
1229 return (1 << getExpcntBitWidth(Version.Major)) - 1;
1233 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1237 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1238 getVmcntBitWidthLo(Version.Major));
1239 unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1240 getExpcntBitWidth(Version.Major));
1241 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1242 getLgkmcntBitWidth(Version.Major));
1243 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1244 getVmcntBitWidthHi(Version.Major));
1245 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1249 unsigned VmcntLo = unpackBits(
Waitcnt, getVmcntBitShiftLo(Version.Major),
1250 getVmcntBitWidthLo(Version.Major));
1251 unsigned VmcntHi = unpackBits(
Waitcnt, getVmcntBitShiftHi(Version.Major),
1252 getVmcntBitWidthHi(Version.Major));
1253 return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1257 return unpackBits(
Waitcnt, getExpcntBitShift(Version.Major),
1258 getExpcntBitWidth(Version.Major));
1262 return unpackBits(
Waitcnt, getLgkmcntBitShift(Version.Major),
1263 getLgkmcntBitWidth(Version.Major));
1267 unsigned &Vmcnt,
unsigned &Expcnt,
unsigned &Lgkmcnt) {
1283 Waitcnt = packBits(Vmcnt,
Waitcnt, getVmcntBitShiftLo(Version.Major),
1284 getVmcntBitWidthLo(Version.Major));
1285 return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major),
Waitcnt,
1286 getVmcntBitShiftHi(Version.Major),
1287 getVmcntBitWidthHi(Version.Major));
1292 return packBits(Expcnt,
Waitcnt, getExpcntBitShift(Version.Major),
1293 getExpcntBitWidth(Version.Major));
1298 return packBits(Lgkmcnt,
Waitcnt, getLgkmcntBitShift(Version.Major),
1299 getLgkmcntBitWidth(Version.Major));
1303 unsigned Vmcnt,
unsigned Expcnt,
unsigned Lgkmcnt) {
1338 for (
int Idx = 0;
Idx < OpInfoSize; ++
Idx) {
1350 int OpInfoSize,
T Context) {
1352 return getOprIdx<T>(
Test, OpInfo, OpInfoSize,
Context);
1357 T Context,
bool QuickCheck =
true) {
1364 if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize,
Context) &&
1368 return getOprIdx<T>(
Test, OpInfo, OpInfoSize,
Context);
1380 const auto &
Op = Opr[
Idx];
1381 if (
Op.isSupported(STI))
1382 Enc |=
Op.encode(
Op.Default);
1388 int Size,
unsigned Code,
1389 bool &HasNonDefaultVal,
1391 unsigned UsedOprMask = 0;
1392 HasNonDefaultVal =
false;
1394 const auto &
Op = Opr[
Idx];
1395 if (!
Op.isSupported(STI))
1397 UsedOprMask |=
Op.getMask();
1398 unsigned Val =
Op.decode(Code);
1399 if (!
Op.isValid(Val))
1401 HasNonDefaultVal |= (Val !=
Op.Default);
1403 return (Code & ~UsedOprMask) == 0;
1408 unsigned &Val,
bool &IsDefault,
1411 const auto &
Op = Opr[
Idx++];
1412 if (
Op.isSupported(STI)) {
1414 Val =
Op.decode(Code);
1415 IsDefault = (Val ==
Op.Default);
1425 if (InputVal < 0 || InputVal >
Op.Max)
1427 return Op.encode(InputVal);
1432 unsigned &UsedOprMask,
1436 const auto &
Op = Opr[
Idx];
1438 if (!
Op.isSupported(STI)) {
1442 auto OprMask =
Op.getMask();
1443 if (OprMask & UsedOprMask)
1445 UsedOprMask |= OprMask;
1468 HasNonDefaultVal, STI);
1484 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
1488 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
1492 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
1496 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
1504 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
1512 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
1533 return 0 <=
Id && isUInt<ID_WIDTH_>(
Id);
1541 return 0 <= (
Width - 1) && isUInt<WIDTH_M1_WIDTH_>(
Width - 1);
1587 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
1588 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
1599 if (Val.MaxIndex == 0 &&
Name == Val.Name)
1602 if (Val.MaxIndex > 0 &&
Name.startswith(Val.Name)) {
1610 if (Suffix.
size() > 1 && Suffix[0] ==
'0')
1613 return Val.Tgt + Id;
1642namespace MTBUFFormat {
1668 if (
Name == lookupTable[Id])
1773 int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId,
Msg,
MSG_SIZE, STI);
1781 for (
int i =
F; i < L; ++i) {
1794 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
1873 return F.getFnAttributeAsParsedInteger(
"InitialPSInputAddr", 0);
1878 return F.getFnAttributeAsParsedInteger(
1879 "amdgpu-color-export",
1884 return F.getFnAttributeAsParsedInteger(
"amdgpu-depth-export", 0) != 0;
1957 return STI.
hasFeature(AMDGPU::FeatureSRAMECC);
1973 return !STI.
hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !
isCI(STI) &&
1983 if (Version.Major == 10)
1984 return Version.Minor >= 3 ? 13 : 5;
1985 if (Version.Major == 11)
1993 return STI.
hasFeature(AMDGPU::FeatureSouthernIslands);
1997 return STI.
hasFeature(AMDGPU::FeatureSeaIslands);
2001 return STI.
hasFeature(AMDGPU::FeatureVolcanicIslands);
2053 return STI.
hasFeature(AMDGPU::FeatureGCN3Encoding);
2057 return STI.
hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2061 return STI.
hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2065 return STI.
hasFeature(AMDGPU::FeatureGFX10_3Insts);
2069 return STI.
hasFeature(AMDGPU::FeatureGFX90AInsts);
2073 return STI.
hasFeature(AMDGPU::FeatureGFX940Insts);
2077 return STI.
hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2081 return STI.
hasFeature(AMDGPU::FeatureMAIInsts);
2089 return STI.
hasFeature(AMDGPU::FeatureKernargPreload);
2093 int32_t ArgNumVGPR) {
2094 if (has90AInsts && ArgNumAGPR)
2095 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2096 return std::max(ArgNumVGPR, ArgNumAGPR);
2101 const unsigned FirstSubReg =
TRI->getSubReg(
Reg, AMDGPU::sub0);
2102 return SGPRClass.
contains(FirstSubReg != 0 ? FirstSubReg :
Reg) ||
2110#define MAP_REG2REG \
2111 using namespace AMDGPU; \
2113 default: return Reg; \
2114 CASE_CI_VI(FLAT_SCR) \
2115 CASE_CI_VI(FLAT_SCR_LO) \
2116 CASE_CI_VI(FLAT_SCR_HI) \
2117 CASE_VI_GFX9PLUS(TTMP0) \
2118 CASE_VI_GFX9PLUS(TTMP1) \
2119 CASE_VI_GFX9PLUS(TTMP2) \
2120 CASE_VI_GFX9PLUS(TTMP3) \
2121 CASE_VI_GFX9PLUS(TTMP4) \
2122 CASE_VI_GFX9PLUS(TTMP5) \
2123 CASE_VI_GFX9PLUS(TTMP6) \
2124 CASE_VI_GFX9PLUS(TTMP7) \
2125 CASE_VI_GFX9PLUS(TTMP8) \
2126 CASE_VI_GFX9PLUS(TTMP9) \
2127 CASE_VI_GFX9PLUS(TTMP10) \
2128 CASE_VI_GFX9PLUS(TTMP11) \
2129 CASE_VI_GFX9PLUS(TTMP12) \
2130 CASE_VI_GFX9PLUS(TTMP13) \
2131 CASE_VI_GFX9PLUS(TTMP14) \
2132 CASE_VI_GFX9PLUS(TTMP15) \
2133 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2134 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2135 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2136 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2137 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2138 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2139 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2140 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2141 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2142 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2143 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2144 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2145 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2146 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2147 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2148 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2149 CASE_GFXPRE11_GFX11PLUS(M0) \
2150 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2151 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2154#define CASE_CI_VI(node) \
2155 assert(!isSI(STI)); \
2156 case node: return isCI(STI) ? node##_ci : node##_vi;
2158#define CASE_VI_GFX9PLUS(node) \
2159 case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2161#define CASE_GFXPRE11_GFX11PLUS(node) \
2162 case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2164#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2165 case node: return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2174#undef CASE_VI_GFX9PLUS
2175#undef CASE_GFXPRE11_GFX11PLUS
2176#undef CASE_GFXPRE11_GFX11PLUS_TO
2178#define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
2179#define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node;
2180#define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node;
2181#define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2189 case AMDGPU::SRC_SHARED_BASE_LO:
2190 case AMDGPU::SRC_SHARED_BASE:
2191 case AMDGPU::SRC_SHARED_LIMIT_LO:
2192 case AMDGPU::SRC_SHARED_LIMIT:
2193 case AMDGPU::SRC_PRIVATE_BASE_LO:
2194 case AMDGPU::SRC_PRIVATE_BASE:
2195 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2196 case AMDGPU::SRC_PRIVATE_LIMIT:
2197 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2199 case AMDGPU::SRC_VCCZ:
2200 case AMDGPU::SRC_EXECZ:
2201 case AMDGPU::SRC_SCC:
2203 case AMDGPU::SGPR_NULL:
2211#undef CASE_VI_GFX9PLUS
2212#undef CASE_GFXPRE11_GFX11PLUS
2213#undef CASE_GFXPRE11_GFX11PLUS_TO
2218 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2225 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2232 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2261 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2270 case AMDGPU::VGPR_LO16RegClassID:
2271 case AMDGPU::VGPR_HI16RegClassID:
2272 case AMDGPU::SGPR_LO16RegClassID:
2273 case AMDGPU::AGPR_LO16RegClassID:
2275 case AMDGPU::SGPR_32RegClassID:
2276 case AMDGPU::VGPR_32RegClassID:
2277 case AMDGPU::VRegOrLds_32RegClassID:
2278 case AMDGPU::AGPR_32RegClassID:
2279 case AMDGPU::VS_32RegClassID:
2280 case AMDGPU::AV_32RegClassID:
2281 case AMDGPU::SReg_32RegClassID:
2282 case AMDGPU::SReg_32_XM0RegClassID:
2283 case AMDGPU::SRegOrLds_32RegClassID:
2285 case AMDGPU::SGPR_64RegClassID:
2286 case AMDGPU::VS_64RegClassID:
2287 case AMDGPU::SReg_64RegClassID:
2288 case AMDGPU::VReg_64RegClassID:
2289 case AMDGPU::AReg_64RegClassID:
2290 case AMDGPU::SReg_64_XEXECRegClassID:
2291 case AMDGPU::VReg_64_Align2RegClassID:
2292 case AMDGPU::AReg_64_Align2RegClassID:
2293 case AMDGPU::AV_64RegClassID:
2294 case AMDGPU::AV_64_Align2RegClassID:
2296 case AMDGPU::SGPR_96RegClassID:
2297 case AMDGPU::SReg_96RegClassID:
2298 case AMDGPU::VReg_96RegClassID:
2299 case AMDGPU::AReg_96RegClassID:
2300 case AMDGPU::VReg_96_Align2RegClassID:
2301 case AMDGPU::AReg_96_Align2RegClassID:
2302 case AMDGPU::AV_96RegClassID:
2303 case AMDGPU::AV_96_Align2RegClassID:
2305 case AMDGPU::SGPR_128RegClassID:
2306 case AMDGPU::SReg_128RegClassID:
2307 case AMDGPU::VReg_128RegClassID:
2308 case AMDGPU::AReg_128RegClassID:
2309 case AMDGPU::VReg_128_Align2RegClassID:
2310 case AMDGPU::AReg_128_Align2RegClassID:
2311 case AMDGPU::AV_128RegClassID:
2312 case AMDGPU::AV_128_Align2RegClassID:
2314 case AMDGPU::SGPR_160RegClassID:
2315 case AMDGPU::SReg_160RegClassID:
2316 case AMDGPU::VReg_160RegClassID:
2317 case AMDGPU::AReg_160RegClassID:
2318 case AMDGPU::VReg_160_Align2RegClassID:
2319 case AMDGPU::AReg_160_Align2RegClassID:
2320 case AMDGPU::AV_160RegClassID:
2321 case AMDGPU::AV_160_Align2RegClassID:
2323 case AMDGPU::SGPR_192RegClassID:
2324 case AMDGPU::SReg_192RegClassID:
2325 case AMDGPU::VReg_192RegClassID:
2326 case AMDGPU::AReg_192RegClassID:
2327 case AMDGPU::VReg_192_Align2RegClassID:
2328 case AMDGPU::AReg_192_Align2RegClassID:
2329 case AMDGPU::AV_192RegClassID:
2330 case AMDGPU::AV_192_Align2RegClassID:
2332 case AMDGPU::SGPR_224RegClassID:
2333 case AMDGPU::SReg_224RegClassID:
2334 case AMDGPU::VReg_224RegClassID:
2335 case AMDGPU::AReg_224RegClassID:
2336 case AMDGPU::VReg_224_Align2RegClassID:
2337 case AMDGPU::AReg_224_Align2RegClassID:
2338 case AMDGPU::AV_224RegClassID:
2339 case AMDGPU::AV_224_Align2RegClassID:
2341 case AMDGPU::SGPR_256RegClassID:
2342 case AMDGPU::SReg_256RegClassID:
2343 case AMDGPU::VReg_256RegClassID:
2344 case AMDGPU::AReg_256RegClassID:
2345 case AMDGPU::VReg_256_Align2RegClassID:
2346 case AMDGPU::AReg_256_Align2RegClassID:
2347 case AMDGPU::AV_256RegClassID:
2348 case AMDGPU::AV_256_Align2RegClassID:
2350 case AMDGPU::SGPR_288RegClassID:
2351 case AMDGPU::SReg_288RegClassID:
2352 case AMDGPU::VReg_288RegClassID:
2353 case AMDGPU::AReg_288RegClassID:
2354 case AMDGPU::VReg_288_Align2RegClassID:
2355 case AMDGPU::AReg_288_Align2RegClassID:
2356 case AMDGPU::AV_288RegClassID:
2357 case AMDGPU::AV_288_Align2RegClassID:
2359 case AMDGPU::SGPR_320RegClassID:
2360 case AMDGPU::SReg_320RegClassID:
2361 case AMDGPU::VReg_320RegClassID:
2362 case AMDGPU::AReg_320RegClassID:
2363 case AMDGPU::VReg_320_Align2RegClassID:
2364 case AMDGPU::AReg_320_Align2RegClassID:
2365 case AMDGPU::AV_320RegClassID:
2366 case AMDGPU::AV_320_Align2RegClassID:
2368 case AMDGPU::SGPR_352RegClassID:
2369 case AMDGPU::SReg_352RegClassID:
2370 case AMDGPU::VReg_352RegClassID:
2371 case AMDGPU::AReg_352RegClassID:
2372 case AMDGPU::VReg_352_Align2RegClassID:
2373 case AMDGPU::AReg_352_Align2RegClassID:
2374 case AMDGPU::AV_352RegClassID:
2375 case AMDGPU::AV_352_Align2RegClassID:
2377 case AMDGPU::SGPR_384RegClassID:
2378 case AMDGPU::SReg_384RegClassID:
2379 case AMDGPU::VReg_384RegClassID:
2380 case AMDGPU::AReg_384RegClassID:
2381 case AMDGPU::VReg_384_Align2RegClassID:
2382 case AMDGPU::AReg_384_Align2RegClassID:
2383 case AMDGPU::AV_384RegClassID:
2384 case AMDGPU::AV_384_Align2RegClassID:
2386 case AMDGPU::SGPR_512RegClassID:
2387 case AMDGPU::SReg_512RegClassID:
2388 case AMDGPU::VReg_512RegClassID:
2389 case AMDGPU::AReg_512RegClassID:
2390 case AMDGPU::VReg_512_Align2RegClassID:
2391 case AMDGPU::AReg_512_Align2RegClassID:
2392 case AMDGPU::AV_512RegClassID:
2393 case AMDGPU::AV_512_Align2RegClassID:
2395 case AMDGPU::SGPR_1024RegClassID:
2396 case AMDGPU::SReg_1024RegClassID:
2397 case AMDGPU::VReg_1024RegClassID:
2398 case AMDGPU::AReg_1024RegClassID:
2399 case AMDGPU::VReg_1024_Align2RegClassID:
2400 case AMDGPU::AReg_1024_Align2RegClassID:
2401 case AMDGPU::AV_1024RegClassID:
2402 case AMDGPU::AV_1024_Align2RegClassID:
2416 unsigned RCID =
Desc.operands()[OpNo].RegClass;
2425 return (Val == llvm::bit_cast<uint64_t>(0.0)) ||
2426 (Val == llvm::bit_cast<uint64_t>(1.0)) ||
2427 (Val == llvm::bit_cast<uint64_t>(-1.0)) ||
2428 (Val == llvm::bit_cast<uint64_t>(0.5)) ||
2429 (Val == llvm::bit_cast<uint64_t>(-0.5)) ||
2430 (Val == llvm::bit_cast<uint64_t>(2.0)) ||
2431 (Val == llvm::bit_cast<uint64_t>(-2.0)) ||
2432 (Val == llvm::bit_cast<uint64_t>(4.0)) ||
2433 (Val == llvm::bit_cast<uint64_t>(-4.0)) ||
2434 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2451 return (Val == llvm::bit_cast<uint32_t>(0.0f)) ||
2452 (Val == llvm::bit_cast<uint32_t>(1.0f)) ||
2453 (Val == llvm::bit_cast<uint32_t>(-1.0f)) ||
2454 (Val == llvm::bit_cast<uint32_t>(0.5f)) ||
2455 (Val == llvm::bit_cast<uint32_t>(-0.5f)) ||
2456 (Val == llvm::bit_cast<uint32_t>(2.0f)) ||
2457 (Val == llvm::bit_cast<uint32_t>(-2.0f)) ||
2458 (Val == llvm::bit_cast<uint32_t>(4.0f)) ||
2459 (Val == llvm::bit_cast<uint32_t>(-4.0f)) ||
2460 (Val == 0x3e22f983 && HasInv2Pi);
2471 return Val == 0x3C00 ||
2486 int16_t Trunc =
static_cast<int16_t
>(
Literal);
2492 int16_t Lo16 =
static_cast<int16_t
>(
Literal);
2493 int16_t Hi16 =
static_cast<int16_t
>(
Literal >> 16);
2498 int16_t Lo16 =
static_cast<int16_t
>(
Literal);
2502 int16_t Hi16 =
static_cast<int16_t
>(
Literal >> 16);
2511 int16_t Lo16 =
static_cast<int16_t
>(
Literal);
2515 int16_t Hi16 =
static_cast<int16_t
>(
Literal >> 16);
2518 return Lo16 == Hi16;
2542 return A->hasAttribute(Attribute::InReg) ||
2543 A->hasAttribute(Attribute::ByVal);
2586 int64_t EncodedOffset) {
2588 : isUInt<8>(EncodedOffset);
2592 int64_t EncodedOffset,
2596 isInt<21>(EncodedOffset);
2600 return (ByteOffset & 3) == 0;
2609 return ByteOffset >> 2;
2613 int64_t ByteOffset,
bool IsBuffer) {
2617 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
2622 return std::nullopt;
2626 ? std::optional<int64_t>(EncodedOffset)
2631 int64_t ByteOffset) {
2633 return std::nullopt;
2636 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
2650struct SourceOfDivergence {
2653const SourceOfDivergence *lookupSourceOfDivergence(
unsigned Intr);
2660#define GET_SourcesOfDivergence_IMPL
2661#define GET_UniformIntrinsics_IMPL
2662#define GET_Gfx9BufferFormat_IMPL
2663#define GET_Gfx10BufferFormat_IMPL
2664#define GET_Gfx11PlusBufferFormat_IMPL
2665#include "AMDGPUGenSearchableTables.inc"
2670 return lookupSourceOfDivergence(IntrID);
2674 return lookupAlwaysUniform(IntrID);
2678 uint8_t NumComponents,
2682 ? getGfx11PlusBufferFormatInfo(BitsPerComp, NumComponents,
2684 :
isGFX10(STI) ? getGfx10BufferFormatInfo(BitsPerComp,
2685 NumComponents, NumFormat)
2686 : getGfx9BufferFormatInfo(BitsPerComp,
2687 NumComponents, NumFormat);
2694 : getGfx9BufferFormatInfo(
Format);
2698 for (
auto OpName : { OpName::vdst, OpName::src0, OpName::src1,
2704 if (OpDesc.
operands()[
Idx].RegClass == AMDGPU::VReg_64RegClassID ||
2705 OpDesc.
operands()[
Idx].RegClass == AMDGPU::VReg_64_Align2RegClassID)
2722 OS <<
"Unsupported";
unsigned const MachineRegisterInfo * MRI
static llvm::cl::opt< unsigned > AmdhsaCodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::desc("AMDHSA Code Object Version"), llvm::cl::init(4))
Provides AMDGPU specific target descriptions.
AMDHSA kernel descriptor definitions.
#define AMDHSA_BITS_SET(DST, MSK, VAL)
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
unsigned const TargetRegisterInfo * TRI
const SmallVectorImpl< MachineOperand > & Cond
#define S_00B848_MEM_ORDERED(x)
#define S_00B848_WGP_MODE(x)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isSramEccSupported() const
void setTargetIDFromFeaturesString(StringRef FS)
bool isSramEccOnOrAny() const
TargetIDSetting getXnackSetting() const
bool isXnackOnOrAny() const
AMDGPUTargetID(const MCSubtargetInfo &STI)
bool isXnackSupported() const
void setTargetIDFromTargetIDStream(StringRef TargetID)
std::string toString() const
TargetIDSetting getSramEccSetting() const
unsigned getIndexInParsedOperands(unsigned CompOprIdx) const
unsigned getIndexOfDstInParsedOperands() const
unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const
unsigned getCompParsedSrcOperandsNum() const
std::optional< unsigned > getInvalidCompOperandIndex(std::function< unsigned(unsigned, unsigned)> GetRegIdx) const
std::array< unsigned, Component::MAX_OPR_NUM > RegIndices
This class represents an incoming formal argument to a Function.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
CallingConv::ID getCallingConv() const
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
This class represents an Operation in the Expression.
Encoding
Size and signedness of expression operations' operands.
constexpr bool test(unsigned I) const
unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
unsigned getOpcode() const
Return the opcode number for this descriptor.
Interface to description of machine instruction set.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
A Module instance is used to store all the information related to an LLVM module.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
std::string str() const
str - Get the contents as an std::string.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr size_t size() const
size - Get the string size.
bool endswith(StringRef Suffix) const
Manages the enabling and disabling of subtarget specific features.
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Triple - Helper class for working with autoconf configuration names.
OSType getOS() const
Get the parsed operating system type of this triple.
ArchType getArch() const
Get the parsed architecture type of this triple.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned decodeFieldSaSdst(unsigned Encoded)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
const CustomOperandVal DepCtrInfo[]
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
unsigned decodeFieldVaVdst(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
static constexpr ExpTgt ExpTgtInfo[]
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
unsigned getTgtId(const StringRef Name)
@ ET_DUAL_SRC_BLEND_MAX_IDX
constexpr uint32_t VersionMajor
HSA metadata major version.
bool isValidHwreg(int64_t Id)
const CustomOperand< const MCSubtargetInfo & > Opr[]
bool isValidHwregOffset(int64_t Offset)
uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width)
bool isValidHwregWidth(int64_t Width)
int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI)
StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI)
void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width)
@ COMPLETION_ACTION_OFFSET
@ MULTIGRID_SYNC_ARG_OFFSET
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, std::optional< bool > EnableWavefrontSize32)
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
@ FIXED_NUM_SGPRS_FOR_INIT_BUG
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI)
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs)
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
StringRef getMsgOpName(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
int64_t getMsgOpId(int64_t MsgId, const StringRef Name)
const char *const OpGsSymbolic[OP_GS_LAST_]
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
const char *const OpSysSymbolic[OP_SYS_LAST_]
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
const CustomOperand< const MCSubtargetInfo & > Msg[]
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
constexpr unsigned VOPD_VGPR_BANK_MASKS[]
constexpr unsigned COMPONENTS_NUM
bool isGCN3Encoding(const MCSubtargetInfo &STI)
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi)
unsigned getNSAMaxSize(const MCSubtargetInfo &STI)
int getVOPDFull(unsigned OpX, unsigned OpY)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, unsigned OpNo)
Get size of register operand.
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool isHsaAbiVersion5(const MCSubtargetInfo *STI)
bool getMTBUFHasSrsrc(unsigned Opc)
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool isInlinableIntLiteralV216(int32_t Literal)
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isGFX10Before1030(const MCSubtargetInfo &STI)
bool isHsaAbiVersion4(const MCSubtargetInfo *STI)
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
const int OPR_ID_UNSUPPORTED
bool shouldEmitConstantsToTextSection(const Triple &TT)
bool isHsaAbiVersion3(const MCSubtargetInfo *STI)
int getMTBUFElements(unsigned Opc)
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
bool isGFX10(const MCSubtargetInfo &STI)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header, const MCSubtargetInfo *STI)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For FLAT segment the offset must be positive; MSB is ignored and forced to zero.
unsigned mc2PseudoReg(unsigned Reg)
Convert hardware register Reg to a pseudo register.
bool hasA16(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
CanBeVOPD getCanBeVOPD(unsigned Opc)
static int getOprIdx(std::function< bool(const CustomOperand< T > &)> Test, const CustomOperand< T > OpInfo[], int OpInfoSize, T Context)
bool hasPackedD16(const MCSubtargetInfo &STI)
unsigned getCodeObjectVersion(const Module &M)
bool isGFX940(const MCSubtargetInfo &STI)
bool isEntryFunctionCC(CallingConv::ID CC)
bool isHsaAbi(const MCSubtargetInfo &STI)
bool isGFX11(const MCSubtargetInfo &STI)
const int OPR_VAL_INVALID
bool getSMEMIsBuffer(unsigned Opc)
bool isGroupSegment(const GlobalValue *GV)
IsaVersion getIsaVersion(StringRef GPU)
bool getMTBUFHasSoffset(unsigned Opc)
bool hasXNACK(const MCSubtargetInfo &STI)
unsigned getVOPDOpcode(unsigned Opc)
bool isDPALU_DPP(const MCInstrDesc &OpDesc)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
bool isVOPC64DPP(unsigned Opc)
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool isCompute(CallingConv::ID cc)
bool getMAIIsGFX940XDL(unsigned Opc)
bool isSI(const MCSubtargetInfo &STI)
bool isReadOnlySegment(const GlobalValue *GV)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
int getMUBUFBaseOpcode(unsigned Opc)
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getWaitcntBitMask(const IsaVersion &Version)
bool getVOP3IsSingle(unsigned Opc)
bool isGFX9(const MCSubtargetInfo &STI)
bool getVOP1IsSingle(unsigned Opc)
static bool isDwordAligned(uint64_t ByteOffset)
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool getHasColorExport(const Function &F)
int getMTBUFBaseOpcode(unsigned Opc)
bool isChainCC(CallingConv::ID CC)
bool isGFX90A(const MCSubtargetInfo &STI)
unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion)
bool hasSRAMECC(const MCSubtargetInfo &STI)
bool getHasDepthExport(const Function &F)
static bool isValidOpr(int Idx, const CustomOperand< T > OpInfo[], int OpInfoSize, T Context)
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
bool getMUBUFHasVAddr(unsigned Opc)
bool isTrue16Inst(unsigned Opc)
bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc)
std::pair< unsigned, unsigned > getVOPDComponents(unsigned VOPDOpcode)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
unsigned getInitialPSInputAddr(const Function &F)
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this an AMDGPU specific source operand? These include registers, inline constants,...
unsigned getAmdhsaCodeObjectVersion()
unsigned getVmcntBitMask(const IsaVersion &Version)
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
bool hasMAIInsts(const MCSubtargetInfo &STI)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
bool isKernelCC(const Function *Func)
bool isGenericAtomic(unsigned Opc)
bool isGFX8Plus(const MCSubtargetInfo &STI)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, uint64_t NamedIdx)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer)
bool isSGPR(unsigned Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
bool isHi(unsigned Reg, const MCRegisterInfo &MRI)
bool hasMIMG_R128(const MCSubtargetInfo &STI)
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
bool getMUBUFHasSoffset(unsigned Opc)
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
bool isInlineValue(unsigned Reg)
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
bool isShader(CallingConv::ID cc)
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion)
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
bool isGFX10Plus(const MCSubtargetInfo &STI)
unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
bool isGlobalSegment(const GlobalValue *GV)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_LAST
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_INLINE_AC_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_V2INT16
@ OPERAND_REG_INLINE_AC_FP16
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_INLINE_C_FIRST
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_REG_INLINE_C_V2FP32
@ OPERAND_REG_IMM_FP32_DEFERRED
@ OPERAND_REG_IMM_FP16_DEFERRED
bool hasGDS(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
bool isGFX9Plus(const MCSubtargetInfo &STI)
std::optional< uint8_t > getHsaAbiVersion(const MCSubtargetInfo *STI)
const int OPR_ID_DUPLICATE
bool isVOPD(unsigned Opc)
VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
int getMCOpcode(uint16_t Opcode, unsigned Gen)
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
bool isVI(const MCSubtargetInfo &STI)
bool getMUBUFIsBufferInv(unsigned Opc)
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool isCI(const MCSubtargetInfo &STI)
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(const MCSubtargetInfo *STI)
bool getVOP2IsSingle(unsigned Opc)
bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi)
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion)
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
bool isModuleEntryFunctionCC(CallingConv::ID CC)
bool getMTBUFHasVAddr(unsigned Opc)
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
bool hasVOPD(const MCSubtargetInfo &STI)
bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion)
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
int getMUBUFElements(unsigned Opc)
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
bool isGraphics(CallingConv::ID cc)
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
bool isPermlane16(unsigned Opc)
bool getMUBUFHasSrsrc(unsigned Opc)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ ELFABIVERSION_AMDGPU_HSA_V4
@ ELFABIVERSION_AMDGPU_HSA_V5
@ ELFABIVERSION_AMDGPU_HSA_V3
@ FLOAT_DENORM_MODE_FLUSH_NONE
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
@ AlwaysUniform
The result values are always uniform.
@ Default
The result values are uniform if and only if all operands are uniform.
AMD Kernel Code Object (amd_kernel_code_t).
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.
Description of the encoding of one expression Op.
uint32_t compute_pgm_rsrc1
uint32_t compute_pgm_rsrc2
uint16_t kernel_code_properties
uint32_t compute_pgm_rsrc3