21#include "llvm/IR/IntrinsicsAMDGPU.h"
22#include "llvm/IR/IntrinsicsR600.h"
32#define GET_INSTRINFO_NAMED_OPS
33#define GET_INSTRMAP_INFO
34#include "AMDGPUGenInstrInfo.inc"
39 llvm::cl::desc(
"Set default AMDHSA Code Object Version (module flag "
40 "or asm directive still take priority if present)"));
45unsigned getBitMask(
unsigned Shift,
unsigned Width) {
46 return ((1 << Width) - 1) << Shift;
52unsigned packBits(
unsigned Src,
unsigned Dst,
unsigned Shift,
unsigned Width) {
53 unsigned Mask = getBitMask(Shift, Width);
54 return ((Src << Shift) & Mask) | (Dst & ~Mask);
60unsigned unpackBits(
unsigned Src,
unsigned Shift,
unsigned Width) {
61 return (Src & getBitMask(Shift, Width)) >> Shift;
65unsigned getVmcntBitShiftLo(
unsigned VersionMajor) {
70unsigned getVmcntBitWidthLo(
unsigned VersionMajor) {
75unsigned getExpcntBitShift(
unsigned VersionMajor) {
80unsigned getExpcntBitWidth(
unsigned VersionMajor) {
return 3; }
83unsigned getLgkmcntBitShift(
unsigned VersionMajor) {
88unsigned getLgkmcntBitWidth(
unsigned VersionMajor) {
93unsigned getVmcntBitShiftHi(
unsigned VersionMajor) {
return 14; }
96unsigned getVmcntBitWidthHi(
unsigned VersionMajor) {
97 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
101unsigned getLoadcntBitWidth(
unsigned VersionMajor) {
106unsigned getSamplecntBitWidth(
unsigned VersionMajor) {
111unsigned getBvhcntBitWidth(
unsigned VersionMajor) {
116unsigned getDscntBitWidth(
unsigned VersionMajor) {
121unsigned getDscntBitShift(
unsigned VersionMajor) {
return 0; }
124unsigned getStorecntBitWidth(
unsigned VersionMajor) {
129unsigned getKmcntBitWidth(
unsigned VersionMajor) {
134unsigned getXcntBitWidth(
unsigned VersionMajor,
unsigned VersionMinor) {
139unsigned getLoadcntStorecntBitShift(
unsigned VersionMajor) {
144inline unsigned getVaSdstBitWidth() {
return 3; }
147inline unsigned getVaSdstBitShift() {
return 9; }
150inline unsigned getVmVsrcBitWidth() {
return 3; }
153inline unsigned getVmVsrcBitShift() {
return 2; }
156inline unsigned getVaVdstBitWidth() {
return 4; }
159inline unsigned getVaVdstBitShift() {
return 12; }
162inline unsigned getVaVccBitWidth() {
return 1; }
165inline unsigned getVaVccBitShift() {
return 1; }
168inline unsigned getSaSdstBitWidth() {
return 1; }
171inline unsigned getSaSdstBitShift() {
return 0; }
174inline unsigned getVaSsrcBitWidth() {
return 1; }
177inline unsigned getVaSsrcBitShift() {
return 8; }
180inline unsigned getHoldCntWidth(
unsigned VersionMajor,
unsigned VersionMinor) {
181 static constexpr const unsigned MinMajor = 10;
182 static constexpr const unsigned MinMinor = 3;
183 return std::tie(VersionMajor, VersionMinor) >= std::tie(MinMajor, MinMinor)
189inline unsigned getHoldCntBitShift() {
return 7; }
214 M.getModuleFlag(
"amdhsa_code_object_version"))) {
215 return (
unsigned)Ver->getZExtValue() / 100;
226 switch (ABIVersion) {
242 switch (CodeObjectVersion) {
251 Twine(CodeObjectVersion));
256 switch (CodeObjectVersion) {
269 switch (CodeObjectVersion) {
280 switch (CodeObjectVersion) {
291 switch (CodeObjectVersion) {
301#define GET_MIMGBaseOpcodesTable_IMPL
302#define GET_MIMGDimInfoTable_IMPL
303#define GET_MIMGInfoTable_IMPL
304#define GET_MIMGLZMappingTable_IMPL
305#define GET_MIMGMIPMappingTable_IMPL
306#define GET_MIMGBiasMappingTable_IMPL
307#define GET_MIMGOffsetMappingTable_IMPL
308#define GET_MIMGG16MappingTable_IMPL
309#define GET_MAIInstInfoTable_IMPL
310#define GET_WMMAInstInfoTable_IMPL
311#include "AMDGPUGenSearchableTables.inc"
314 unsigned VDataDwords,
unsigned VAddrDwords) {
316 getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
317 return Info ? Info->Opcode : -1;
330 return NewInfo ? NewInfo->
Opcode : -1;
335 bool IsG16Supported) {
342 AddrWords += AddrComponents;
350 if ((IsA16 && !IsG16Supported) || BaseOpcode->
G16)
423#define GET_FP4FP8DstByteSelTable_DECL
424#define GET_FP4FP8DstByteSelTable_IMPL
437#define GET_DPMACCInstructionTable_DECL
438#define GET_DPMACCInstructionTable_IMPL
439#define GET_MTBUFInfoTable_DECL
440#define GET_MTBUFInfoTable_IMPL
441#define GET_MUBUFInfoTable_DECL
442#define GET_MUBUFInfoTable_IMPL
443#define GET_SMInfoTable_DECL
444#define GET_SMInfoTable_IMPL
445#define GET_VOP1InfoTable_DECL
446#define GET_VOP1InfoTable_IMPL
447#define GET_VOP2InfoTable_DECL
448#define GET_VOP2InfoTable_IMPL
449#define GET_VOP3InfoTable_DECL
450#define GET_VOP3InfoTable_IMPL
451#define GET_VOPC64DPPTable_DECL
452#define GET_VOPC64DPPTable_IMPL
453#define GET_VOPC64DPP8Table_DECL
454#define GET_VOPC64DPP8Table_IMPL
455#define GET_VOPCAsmOnlyInfoTable_DECL
456#define GET_VOPCAsmOnlyInfoTable_IMPL
457#define GET_VOP3CAsmOnlyInfoTable_DECL
458#define GET_VOP3CAsmOnlyInfoTable_IMPL
459#define GET_VOPDComponentTable_DECL
460#define GET_VOPDComponentTable_IMPL
461#define GET_VOPDPairs_DECL
462#define GET_VOPDPairs_IMPL
463#define GET_VOPTrue16Table_DECL
464#define GET_VOPTrue16Table_IMPL
465#define GET_True16D16Table_IMPL
466#define GET_WMMAOpcode2AddrMappingTable_DECL
467#define GET_WMMAOpcode2AddrMappingTable_IMPL
468#define GET_WMMAOpcode3AddrMappingTable_DECL
469#define GET_WMMAOpcode3AddrMappingTable_IMPL
470#define GET_getMFMA_F8F6F4_WithSize_DECL
471#define GET_getMFMA_F8F6F4_WithSize_IMPL
472#define GET_isMFMA_F8F6F4Table_IMPL
473#define GET_isCvtScaleF32_F32F16ToF8F4Table_IMPL
475#include "AMDGPUGenSearchableTables.inc"
479 return Info ? Info->BaseOpcode : -1;
484 getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
485 return Info ? Info->Opcode : -1;
490 return Info ? Info->elements : 0;
495 return Info && Info->has_vaddr;
500 return Info && Info->has_srsrc;
505 return Info && Info->has_soffset;
510 return Info ? Info->BaseOpcode : -1;
515 getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
516 return Info ? Info->Opcode : -1;
521 return Info ? Info->elements : 0;
526 return Info && Info->has_vaddr;
531 return Info && Info->has_srsrc;
536 return Info && Info->has_soffset;
541 return Info && Info->IsBufferInv;
546 return Info && Info->tfe;
550 const SMInfo *Info = getSMEMOpcodeHelper(
Opc);
551 return Info && Info->IsBuffer;
555 const VOPInfo *Info = getVOP1OpcodeHelper(
Opc);
556 return !Info || Info->IsSingle;
560 const VOPInfo *Info = getVOP2OpcodeHelper(
Opc);
561 return !Info || Info->IsSingle;
565 const VOPInfo *Info = getVOP3OpcodeHelper(
Opc);
566 return !Info || Info->IsSingle;
570 return isVOPC64DPPOpcodeHelper(
Opc) || isVOPC64DPP8OpcodeHelper(
Opc);
577 return Info && Info->is_dgemm;
582 return Info && Info->is_gfx940_xdl;
587 return Info ? Info->is_wmma_xdl :
false;
591 switch (EncodingVal) {
608 unsigned F8F8Opcode) {
611 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
631 unsigned F8F8Opcode) {
634 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
638 if (ST.hasFeature(AMDGPU::FeatureGFX13Insts))
640 if (ST.hasFeature(AMDGPU::FeatureGFX1250Insts))
642 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts))
644 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts))
651 Opc = IsConvertibleToBitOp ? (
unsigned)AMDGPU::V_BITOP3_B32_e64 :
Opc;
662 EncodingFamily, VOPD3) != -1;
666 CanBeVOPDX = Info->CanBeVOPDX;
669 EncodingFamily, VOPD3) != -1;
670 return {CanBeVOPDX, CanBeVOPDY};
673 return {
false,
false};
678 Opc = IsConvertibleToBitOp ? (
unsigned)AMDGPU::V_BITOP3_B32_e64 :
Opc;
680 return Info ? Info->VOPDOp : ~0u;
688 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
689 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
690 Opc == AMDGPU::V_MAC_F32_e64_vi ||
691 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
692 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
693 Opc == AMDGPU::V_MAC_F16_e64_vi ||
694 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
695 Opc == AMDGPU::V_FMAC_F64_e64_gfx12 ||
696 Opc == AMDGPU::V_FMAC_F64_e64_gfx13 ||
697 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
698 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
699 Opc == AMDGPU::V_FMAC_F32_e64_gfx12 ||
700 Opc == AMDGPU::V_FMAC_F32_e64_gfx13 ||
701 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
702 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
703 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
704 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
705 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
706 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx11 ||
707 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx12 ||
708 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx12 ||
709 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx13 ||
710 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx13 ||
711 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
712 Opc == AMDGPU::V_DOT2C_F32_BF16_e64_vi ||
713 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
714 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
715 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
719 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
720 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
721 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
722 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11 ||
723 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx12 ||
724 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx12 ||
725 Opc == AMDGPU::V_PERMLANE16_VAR_B32_e64_gfx12 ||
726 Opc == AMDGPU::V_PERMLANEX16_VAR_B32_e64_gfx12;
730 return Opc == AMDGPU::V_CVT_F32_BF8_e64_gfx12 ||
731 Opc == AMDGPU::V_CVT_F32_FP8_e64_gfx12 ||
732 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp_gfx12 ||
733 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp_gfx12 ||
734 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp8_gfx12 ||
735 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp8_gfx12 ||
736 Opc == AMDGPU::V_CVT_PK_F32_BF8_fake16_e64_gfx12 ||
737 Opc == AMDGPU::V_CVT_PK_F32_FP8_fake16_e64_gfx12 ||
738 Opc == AMDGPU::V_CVT_PK_F32_BF8_t16_e64_gfx12 ||
739 Opc == AMDGPU::V_CVT_PK_F32_FP8_t16_e64_gfx12;
743 return Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP ||
744 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD ||
745 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB ||
746 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN ||
747 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN ||
748 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX ||
749 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX ||
750 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND ||
751 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR ||
752 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR ||
753 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC ||
754 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC ||
755 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD ||
756 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
757 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
758 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
759 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32 ||
760 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32 ||
761 Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
765 return Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_gfx1250 ||
766 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_gfx1250 ||
767 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_gfx1250 ||
768 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_gfx1250 ||
769 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_SADDR_gfx1250 ||
770 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_SADDR_gfx1250 ||
771 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_SADDR_gfx1250 ||
772 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_SADDR_gfx1250;
776 return Opc == TENSOR_STORE_FROM_LDS_gfx1250 ||
777 Opc == TENSOR_STORE_FROM_LDS_D2_gfx1250;
797 return Info && Info->IsTrue16;
804 if (Info->HasFP8DstByteSel)
806 if (Info->HasFP4DstByteSel)
814 return Info && Info->IsDPMACCInstruction;
819 return Info ? Info->Opcode3Addr : ~0u;
824 return Info ? Info->Opcode2Addr : ~0u;
831 return getMCOpcodeGen(Opcode,
static_cast<Subtarget
>(Gen));
838 case AMDGPU::V_AND_B32_e32:
840 case AMDGPU::V_OR_B32_e32:
842 case AMDGPU::V_XOR_B32_e32:
844 case AMDGPU::V_XNOR_B32_e32:
849int getVOPDFull(
unsigned OpX,
unsigned OpY,
unsigned EncodingFamily,
851 bool IsConvertibleToBitOp = VOPD3 ?
getBitOp2(OpY) : 0;
852 OpY = IsConvertibleToBitOp ? (
unsigned)AMDGPU::V_BITOP3_B32_e64 : OpY;
854 getVOPDInfoFromComponentOpcodes(OpX, OpY, EncodingFamily, VOPD3);
855 return Info ? Info->Opcode : -1;
859 const VOPDInfo *Info = getVOPDOpcodeHelper(VOPDOpcode);
861 const auto *OpX = getVOPDBaseFromComponent(Info->OpX);
862 const auto *OpY = getVOPDBaseFromComponent(Info->OpY);
864 return {OpX->BaseVOP, OpY->BaseVOP};
876 HasSrc2Acc = TiedIdx != -1;
886 if (Opcode == AMDGPU::V_CNDMASK_B32_e32 ||
887 Opcode == AMDGPU::V_CNDMASK_B32_e64) {
894 getNamedOperandIdx(Opcode, OpName::src0))) {
897 NumVOPD3Mods = SrcOperandsNum;
907 for (CompOprIdx =
Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
909 MandatoryLiteralIdx = CompOprIdx;
916 return getNamedOperandIdx(Opcode, OpName::bitop3);
934 std::function<
MCRegister(
unsigned,
unsigned)> GetRegIdx,
944 unsigned BanksMask) ->
bool {
951 if ((BaseX.
id() & BanksMask) == (BaseY.
id() & BanksMask))
954 ((BaseX.
id() + 1) & BanksMask) == (BaseY.
id() & BanksMask))
957 (BaseX.
id() & BanksMask) == ((BaseY.
id() + 1) & BanksMask))
969 if (!OpXRegs[CompOprIdx] || !OpYRegs[CompOprIdx])
982 if (
MRI.regsOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx]))
988 if (banksOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx], BanksMasks) &&
990 OpXRegs[CompOprIdx] != OpYRegs[CompOprIdx]))
1005InstInfo::getRegIndices(
unsigned CompIdx,
1006 std::function<
MCRegister(
unsigned,
unsigned)> GetRegIdx,
1010 const auto &Comp = CompInfo[CompIdx];
1013 RegIndices[
DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
1016 unsigned CompSrcIdx = CompOprIdx -
DST_NUM;
1018 Comp.hasRegSrcOperand(CompSrcIdx)
1019 ? GetRegIdx(CompIdx,
1020 Comp.getIndexOfSrcInMCOperands(CompSrcIdx, VOPD3))
1035 const auto &OpXDesc = InstrInfo->get(OpX);
1036 const auto &OpYDesc = InstrInfo->get(OpY);
1048 if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
1050 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
1059 std::optional<bool> XnackRequested;
1060 std::optional<bool> SramEccRequested;
1062 for (
const std::string &Feature : Features.
getFeatures()) {
1063 if (Feature ==
"+xnack")
1064 XnackRequested =
true;
1065 else if (Feature ==
"-xnack")
1066 XnackRequested =
false;
1067 else if (Feature ==
"+sramecc")
1068 SramEccRequested =
true;
1069 else if (Feature ==
"-sramecc")
1070 SramEccRequested =
false;
1076 if (XnackRequested) {
1077 if (XnackSupported) {
1083 if (*XnackRequested) {
1084 errs() <<
"warning: xnack 'On' was requested for a processor that does "
1085 "not support it!\n";
1087 errs() <<
"warning: xnack 'Off' was requested for a processor that "
1088 "does not support it!\n";
1093 if (SramEccRequested) {
1094 if (SramEccSupported) {
1101 if (*SramEccRequested) {
1102 errs() <<
"warning: sramecc 'On' was requested for a processor that "
1103 "does not support it!\n";
1105 errs() <<
"warning: sramecc 'Off' was requested for a processor that "
1106 "does not support it!\n";
1124 TargetID.
split(TargetIDSplit,
':');
1126 for (
const auto &FeatureString : TargetIDSplit) {
1127 if (FeatureString.starts_with(
"xnack"))
1129 if (FeatureString.starts_with(
"sramecc"))
1135 std::string StringRep;
1138 auto TargetTriple = STI.getTargetTriple();
1141 StreamRep << TargetTriple.getArchName() <<
'-' << TargetTriple.getVendorName()
1142 <<
'-' << TargetTriple.getOSName() <<
'-'
1143 << TargetTriple.getEnvironmentName() <<
'-';
1145 std::string Processor;
1150 Processor = STI.getCPU().
str();
1156 std::string Features;
1160 Features +=
":sramecc-";
1162 Features +=
":sramecc+";
1165 Features +=
":xnack-";
1167 Features +=
":xnack+";
1170 StreamRep << Processor << Features;
1229 unsigned FlatWorkGroupSize) {
1230 assert(FlatWorkGroupSize != 0);
1240 unsigned MaxBarriers = 16;
1244 return std::min(MaxWaves /
N, MaxBarriers);
1259 unsigned FlatWorkGroupSize) {
1272 unsigned FlatWorkGroupSize) {
1330 return Addressable ? AddressableNumSGPRs : 108;
1331 if (
Version.Major >= 8 && !Addressable)
1332 AddressableNumSGPRs = 112;
1337 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1341 bool FlatScrUsed,
bool XNACKUsed) {
1342 unsigned ExtraSGPRs = 0;
1373 return divideCeil(std::max(1u, NumRegs), Granule);
1383 unsigned DynamicVGPRBlockSize,
1384 std::optional<bool> EnableWavefrontSize32) {
1388 if (DynamicVGPRBlockSize != 0)
1389 return DynamicVGPRBlockSize;
1391 bool IsWave32 = EnableWavefrontSize32
1392 ? *EnableWavefrontSize32
1396 return IsWave32 ? 24 : 12;
1399 return IsWave32 ? 16 : 8;
1401 return IsWave32 ? 8 : 4;
1405 std::optional<bool> EnableWavefrontSize32) {
1409 bool IsWave32 = EnableWavefrontSize32
1410 ? *EnableWavefrontSize32
1414 return IsWave32 ? 16 : 8;
1416 return IsWave32 ? 8 : 4;
1428 return IsWave32 ? 1536 : 768;
1429 return IsWave32 ? 1024 : 512;
1434 if (Features.test(Feature1024AddressableVGPRs))
1435 return Features.
test(FeatureWavefrontSize32) ? 1024 : 512;
1440 unsigned DynamicVGPRBlockSize) {
1442 if (Features.test(FeatureGFX90AInsts))
1445 if (DynamicVGPRBlockSize != 0)
1453 unsigned DynamicVGPRBlockSize) {
1461 unsigned TotalNumVGPRs) {
1462 if (NumVGPRs < Granule)
1464 unsigned RoundedRegs =
alignTo(NumVGPRs, Granule);
1465 return std::min(std::max(TotalNumVGPRs / RoundedRegs, 1u), MaxWaves);
1496 unsigned DynamicVGPRBlockSize) {
1500 if (WavesPerEU >= MaxWavesPerEU)
1504 unsigned AddrsableNumVGPRs =
1507 unsigned MaxNumVGPRs =
alignDown(TotNumVGPRs / WavesPerEU, Granule);
1509 if (MaxNumVGPRs ==
alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1513 DynamicVGPRBlockSize);
1514 if (WavesPerEU < MinWavesPerEU)
1517 unsigned MaxNumVGPRsNext =
alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1518 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1519 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1523 unsigned DynamicVGPRBlockSize) {
1526 unsigned MaxNumVGPRs =
1529 unsigned AddressableNumVGPRs =
1531 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1535 std::optional<bool> EnableWavefrontSize32) {
1543 unsigned DynamicVGPRBlockSize,
1544 std::optional<bool> EnableWavefrontSize32) {
1604 return C ==
'v' ||
C ==
's' ||
C ==
'a';
1613 if (
RegName.consume_front(
"[")) {
1620 unsigned NumRegs = End - Idx + 1;
1622 return {Kind, Idx, NumRegs};
1628 return {Kind, Idx, 1};
1634std::tuple<char, unsigned, unsigned>
1642std::pair<unsigned, unsigned>
1644 std::pair<unsigned, unsigned>
Default,
1645 bool OnlyFirstRequired) {
1647 return {Attr->first, Attr->second.value_or(
Default.second)};
1651std::optional<std::pair<unsigned, std::optional<unsigned>>>
1653 bool OnlyFirstRequired) {
1655 if (!
A.isStringAttribute())
1656 return std::nullopt;
1659 std::pair<unsigned, std::optional<unsigned>> Ints;
1660 std::pair<StringRef, StringRef> Strs =
A.getValueAsString().split(
',');
1661 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1662 Ctx.emitError(
"can't parse first integer attribute " + Name);
1663 return std::nullopt;
1665 unsigned Second = 0;
1666 if (Strs.second.trim().getAsInteger(0, Second)) {
1667 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1668 Ctx.emitError(
"can't parse second integer attribute " + Name);
1669 return std::nullopt;
1672 Ints.second = Second;
1681 std::optional<SmallVector<unsigned>> R =
1686std::optional<SmallVector<unsigned>>
1693 return std::nullopt;
1694 if (!
A.isStringAttribute()) {
1695 Ctx.emitError(Name +
" is not a string attribute");
1696 return std::nullopt;
1704 std::pair<StringRef, StringRef> Strs = S.
split(
',');
1706 if (Strs.first.trim().getAsInteger(0, IntVal)) {
1707 Ctx.emitError(
"can't parse integer attribute " + Strs.first +
" in " +
1709 return std::nullopt;
1716 Ctx.emitError(
"attribute " + Name +
1717 " has incorrect number of integers; expected " +
1719 return std::nullopt;
1736 if (
Low.ule(Val) &&
High.ugt(Val))
1739 if (
Low.uge(Val) &&
High.ult(Val))
1749 if (
Wait.LoadCnt != ~0u)
1750 OS << LS <<
"LoadCnt: " <<
Wait.LoadCnt;
1751 if (
Wait.ExpCnt != ~0u)
1752 OS << LS <<
"ExpCnt: " <<
Wait.ExpCnt;
1753 if (
Wait.DsCnt != ~0u)
1754 OS << LS <<
"DsCnt: " <<
Wait.DsCnt;
1755 if (
Wait.StoreCnt != ~0u)
1756 OS << LS <<
"StoreCnt: " <<
Wait.StoreCnt;
1757 if (
Wait.SampleCnt != ~0u)
1758 OS << LS <<
"SampleCnt: " <<
Wait.SampleCnt;
1759 if (
Wait.BvhCnt != ~0u)
1760 OS << LS <<
"BvhCnt: " <<
Wait.BvhCnt;
1761 if (
Wait.KmCnt != ~0u)
1762 OS << LS <<
"KmCnt: " <<
Wait.KmCnt;
1763 if (
Wait.XCnt != ~0u)
1764 OS << LS <<
"XCnt: " <<
Wait.XCnt;
1772 return (1 << (getVmcntBitWidthLo(
Version.Major) +
1773 getVmcntBitWidthHi(
Version.Major))) -
1778 return (1 << getLoadcntBitWidth(
Version.Major)) - 1;
1782 return (1 << getSamplecntBitWidth(
Version.Major)) - 1;
1786 return (1 << getBvhcntBitWidth(
Version.Major)) - 1;
1790 return (1 << getExpcntBitWidth(
Version.Major)) - 1;
1794 return (1 << getLgkmcntBitWidth(
Version.Major)) - 1;
1798 return (1 << getDscntBitWidth(
Version.Major)) - 1;
1802 return (1 << getKmcntBitWidth(
Version.Major)) - 1;
1810 return (1 << getStorecntBitWidth(
Version.Major)) - 1;
1814 bool HasExtendedWaitCounts =
IV.Major >= 12;
1815 if (HasExtendedWaitCounts) {
1833 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(
Version.Major),
1834 getVmcntBitWidthLo(
Version.Major));
1835 unsigned Expcnt = getBitMask(getExpcntBitShift(
Version.Major),
1836 getExpcntBitWidth(
Version.Major));
1837 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(
Version.Major),
1838 getLgkmcntBitWidth(
Version.Major));
1839 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(
Version.Major),
1840 getVmcntBitWidthHi(
Version.Major));
1841 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1845 unsigned VmcntLo = unpackBits(
Waitcnt, getVmcntBitShiftLo(
Version.Major),
1846 getVmcntBitWidthLo(
Version.Major));
1847 unsigned VmcntHi = unpackBits(
Waitcnt, getVmcntBitShiftHi(
Version.Major),
1848 getVmcntBitWidthHi(
Version.Major));
1849 return VmcntLo | VmcntHi << getVmcntBitWidthLo(
Version.Major);
1854 getExpcntBitWidth(
Version.Major));
1859 getLgkmcntBitWidth(
Version.Major));
1863 unsigned &Expcnt,
unsigned &Lgkmcnt) {
1880 getVmcntBitWidthLo(
Version.Major));
1881 return packBits(Vmcnt >> getVmcntBitWidthLo(
Version.Major),
Waitcnt,
1882 getVmcntBitShiftHi(
Version.Major),
1883 getVmcntBitWidthHi(
Version.Major));
1888 return packBits(Expcnt,
Waitcnt, getExpcntBitShift(
Version.Major),
1889 getExpcntBitWidth(
Version.Major));
1894 return packBits(Lgkmcnt,
Waitcnt, getLgkmcntBitShift(
Version.Major),
1895 getLgkmcntBitWidth(
Version.Major));
1899 unsigned Expcnt,
unsigned Lgkmcnt) {
1914 unsigned Dscnt = getBitMask(getDscntBitShift(
Version.Major),
1915 getDscntBitWidth(
Version.Major));
1917 unsigned Storecnt = getBitMask(getLoadcntStorecntBitShift(
Version.Major),
1918 getStorecntBitWidth(
Version.Major));
1919 return Dscnt | Storecnt;
1921 unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(
Version.Major),
1922 getLoadcntBitWidth(
Version.Major));
1923 return Dscnt | Loadcnt;
1929 getLoadcntStorecntBitShift(
Version.Major),
1930 getLoadcntBitWidth(
Version.Major)));
1931 Decoded.
set(
DS_CNT, unpackBits(LoadcntDscnt, getDscntBitShift(
Version.Major),
1932 getDscntBitWidth(
Version.Major)));
1939 getLoadcntStorecntBitShift(
Version.Major),
1940 getStorecntBitWidth(
Version.Major)));
1941 Decoded.
set(
DS_CNT, unpackBits(StorecntDscnt, getDscntBitShift(
Version.Major),
1942 getDscntBitWidth(
Version.Major)));
1948 return packBits(Loadcnt,
Waitcnt, getLoadcntStorecntBitShift(
Version.Major),
1949 getLoadcntBitWidth(
Version.Major));
1953 unsigned Storecnt) {
1954 return packBits(Storecnt,
Waitcnt, getLoadcntStorecntBitShift(
Version.Major),
1955 getStorecntBitWidth(
Version.Major));
1961 getDscntBitWidth(
Version.Major));
1978 unsigned Storecnt,
unsigned Dscnt) {
1999 for (
int Idx = 0; Idx <
Size; ++Idx) {
2000 const auto &
Op = Opr[Idx];
2001 if (
Op.isSupported(STI))
2002 Enc |=
Op.encode(
Op.Default);
2008 int Size,
unsigned Code,
2009 bool &HasNonDefaultVal,
2011 unsigned UsedOprMask = 0;
2012 HasNonDefaultVal =
false;
2013 for (
int Idx = 0; Idx <
Size; ++Idx) {
2014 const auto &
Op = Opr[Idx];
2015 if (!
Op.isSupported(STI))
2017 UsedOprMask |=
Op.getMask();
2018 unsigned Val =
Op.decode(Code);
2019 if (!
Op.isValid(Val))
2021 HasNonDefaultVal |= (Val !=
Op.Default);
2023 return (Code & ~UsedOprMask) == 0;
2027 unsigned Code,
int &Idx,
StringRef &Name,
2028 unsigned &Val,
bool &IsDefault,
2030 while (Idx <
Size) {
2031 const auto &
Op = Opr[Idx++];
2032 if (
Op.isSupported(STI)) {
2034 Val =
Op.decode(Code);
2035 IsDefault = (Val ==
Op.Default);
2045 if (InputVal < 0 || InputVal >
Op.Max)
2047 return Op.encode(InputVal);
2052 unsigned &UsedOprMask,
2055 for (
int Idx = 0; Idx <
Size; ++Idx) {
2056 const auto &
Op = Opr[Idx];
2057 if (
Op.Name == Name) {
2058 if (!
Op.isSupported(STI)) {
2062 auto OprMask =
Op.getMask();
2063 if (OprMask & UsedOprMask)
2065 UsedOprMask |= OprMask;
2088 HasNonDefaultVal, STI);
2120 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2124 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2128 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2132 return unpackBits(Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2136 return unpackBits(Encoded, getVaVccBitShift(), getVaVccBitWidth());
2140 return unpackBits(Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2144 return unpackBits(Encoded, getHoldCntBitShift(),
2149 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2158 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2167 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2176 return packBits(VaSdst, Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2185 return packBits(VaVcc, Encoded, getVaVccBitShift(), getVaVccBitWidth());
2194 return packBits(VaSsrc, Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2204 return packBits(HoldCnt, Encoded, getHoldCntBitShift(),
2241 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
2242 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
2253 if (Val.MaxIndex == 0 && Name == Val.Name)
2256 if (Val.MaxIndex > 0 && Name.starts_with(Val.Name)) {
2257 StringRef Suffix = Name.drop_front(Val.Name.size());
2264 if (Suffix.
size() > 1 && Suffix[0] ==
'0')
2267 return Val.Tgt + Id;
2296namespace MTBUFFormat {
2322 if (Name == lookupTable[Id])
2494 return F.getFnAttributeAsParsedInteger(
"InitialPSInputAddr", 0);
2499 return F.getFnAttributeAsParsedInteger(
2500 "amdgpu-color-export",
2505 return F.getFnAttributeAsParsedInteger(
"amdgpu-depth-export", 0) != 0;
2510 F.getFnAttributeAsParsedInteger(
"amdgpu-dynamic-vgpr-block-size", 0);
2523 return STI.
hasFeature(AMDGPU::FeatureSRAMECC);
2527 return STI.
hasFeature(AMDGPU::FeatureMIMG_R128) &&
2540 return !STI.
hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !
isCI(STI) &&
2551 return Version.Minor >= 3 ? 13 : 5;
2555 return HasSampler ? 4 : 5;
2566 return STI.
hasFeature(AMDGPU::FeatureSouthernIslands);
2570 return STI.
hasFeature(AMDGPU::FeatureSeaIslands);
2574 return STI.
hasFeature(AMDGPU::FeatureVolcanicIslands);
2668 return STI.
hasFeature(AMDGPU::FeatureGCN3Encoding);
2672 return STI.
hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2676 return STI.
hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2680 return STI.
hasFeature(AMDGPU::FeatureGFX10_3Insts);
2688 return STI.
hasFeature(AMDGPU::FeatureGFX90AInsts);
2692 return STI.
hasFeature(AMDGPU::FeatureGFX940Insts);
2696 return STI.
hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2700 return STI.
hasFeature(AMDGPU::FeatureMAIInsts);
2704 return STI.
hasFeature(AMDGPU::FeatureVOPDInsts);
2708 return STI.
hasFeature(AMDGPU::FeatureDPPSrc1SGPR);
2712 return STI.
hasFeature(AMDGPU::FeatureKernargPreload);
2716 int32_t ArgNumVGPR) {
2717 if (has90AInsts && ArgNumAGPR)
2718 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2719 return std::max(ArgNumVGPR, ArgNumAGPR);
2725 return SGPRClass.
contains(FirstSubReg != 0 ? FirstSubReg :
Reg) ||
2733#define MAP_REG2REG \
2734 using namespace AMDGPU; \
2735 switch (Reg.id()) { \
2738 CASE_CI_VI(FLAT_SCR) \
2739 CASE_CI_VI(FLAT_SCR_LO) \
2740 CASE_CI_VI(FLAT_SCR_HI) \
2741 CASE_VI_GFX9PLUS(TTMP0) \
2742 CASE_VI_GFX9PLUS(TTMP1) \
2743 CASE_VI_GFX9PLUS(TTMP2) \
2744 CASE_VI_GFX9PLUS(TTMP3) \
2745 CASE_VI_GFX9PLUS(TTMP4) \
2746 CASE_VI_GFX9PLUS(TTMP5) \
2747 CASE_VI_GFX9PLUS(TTMP6) \
2748 CASE_VI_GFX9PLUS(TTMP7) \
2749 CASE_VI_GFX9PLUS(TTMP8) \
2750 CASE_VI_GFX9PLUS(TTMP9) \
2751 CASE_VI_GFX9PLUS(TTMP10) \
2752 CASE_VI_GFX9PLUS(TTMP11) \
2753 CASE_VI_GFX9PLUS(TTMP12) \
2754 CASE_VI_GFX9PLUS(TTMP13) \
2755 CASE_VI_GFX9PLUS(TTMP14) \
2756 CASE_VI_GFX9PLUS(TTMP15) \
2757 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2758 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2759 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2760 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2761 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2762 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2763 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2764 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2765 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2766 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2767 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2768 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2769 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2770 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2771 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2773 TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2774 CASE_GFXPRE11_GFX11PLUS(M0) \
2775 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2776 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2779#define CASE_CI_VI(node) \
2780 assert(!isSI(STI)); \
2782 return isCI(STI) ? node##_ci : node##_vi;
2784#define CASE_VI_GFX9PLUS(node) \
2786 return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2788#define CASE_GFXPRE11_GFX11PLUS(node) \
2790 return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2792#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2794 return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2803#undef CASE_VI_GFX9PLUS
2804#undef CASE_GFXPRE11_GFX11PLUS
2805#undef CASE_GFXPRE11_GFX11PLUS_TO
2807#define CASE_CI_VI(node) \
2811#define CASE_VI_GFX9PLUS(node) \
2813 case node##_gfx9plus: \
2815#define CASE_GFXPRE11_GFX11PLUS(node) \
2816 case node##_gfx11plus: \
2817 case node##_gfxpre11: \
2819#define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2825 case AMDGPU::SRC_SHARED_BASE_LO:
2826 case AMDGPU::SRC_SHARED_BASE:
2827 case AMDGPU::SRC_SHARED_LIMIT_LO:
2828 case AMDGPU::SRC_SHARED_LIMIT:
2829 case AMDGPU::SRC_PRIVATE_BASE_LO:
2830 case AMDGPU::SRC_PRIVATE_BASE:
2831 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2832 case AMDGPU::SRC_PRIVATE_LIMIT:
2833 case AMDGPU::SRC_FLAT_SCRATCH_BASE_LO:
2834 case AMDGPU::SRC_FLAT_SCRATCH_BASE_HI:
2835 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2837 case AMDGPU::SRC_VCCZ:
2838 case AMDGPU::SRC_EXECZ:
2839 case AMDGPU::SRC_SCC:
2841 case AMDGPU::SGPR_NULL:
2849#undef CASE_VI_GFX9PLUS
2850#undef CASE_GFXPRE11_GFX11PLUS
2851#undef CASE_GFXPRE11_GFX11PLUS_TO
2856 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2863 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2886 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2897 case AMDGPU::VGPR_16RegClassID:
2898 case AMDGPU::VGPR_16_Lo128RegClassID:
2899 case AMDGPU::SGPR_LO16RegClassID:
2900 case AMDGPU::AGPR_LO16RegClassID:
2902 case AMDGPU::SGPR_32RegClassID:
2903 case AMDGPU::VGPR_32RegClassID:
2904 case AMDGPU::VGPR_32_Lo256RegClassID:
2905 case AMDGPU::VRegOrLds_32RegClassID:
2906 case AMDGPU::AGPR_32RegClassID:
2907 case AMDGPU::VS_32RegClassID:
2908 case AMDGPU::AV_32RegClassID:
2909 case AMDGPU::SReg_32RegClassID:
2910 case AMDGPU::SReg_32_XM0RegClassID:
2911 case AMDGPU::SRegOrLds_32RegClassID:
2913 case AMDGPU::SGPR_64RegClassID:
2914 case AMDGPU::VS_64RegClassID:
2915 case AMDGPU::SReg_64RegClassID:
2916 case AMDGPU::VReg_64RegClassID:
2917 case AMDGPU::AReg_64RegClassID:
2918 case AMDGPU::SReg_64_XEXECRegClassID:
2919 case AMDGPU::VReg_64_Align2RegClassID:
2920 case AMDGPU::AReg_64_Align2RegClassID:
2921 case AMDGPU::AV_64RegClassID:
2922 case AMDGPU::AV_64_Align2RegClassID:
2923 case AMDGPU::VReg_64_Lo256_Align2RegClassID:
2924 case AMDGPU::VS_64_Lo256RegClassID:
2926 case AMDGPU::SGPR_96RegClassID:
2927 case AMDGPU::SReg_96RegClassID:
2928 case AMDGPU::VReg_96RegClassID:
2929 case AMDGPU::AReg_96RegClassID:
2930 case AMDGPU::VReg_96_Align2RegClassID:
2931 case AMDGPU::AReg_96_Align2RegClassID:
2932 case AMDGPU::AV_96RegClassID:
2933 case AMDGPU::AV_96_Align2RegClassID:
2934 case AMDGPU::VReg_96_Lo256_Align2RegClassID:
2936 case AMDGPU::SGPR_128RegClassID:
2937 case AMDGPU::SReg_128RegClassID:
2938 case AMDGPU::VReg_128RegClassID:
2939 case AMDGPU::AReg_128RegClassID:
2940 case AMDGPU::VReg_128_Align2RegClassID:
2941 case AMDGPU::AReg_128_Align2RegClassID:
2942 case AMDGPU::AV_128RegClassID:
2943 case AMDGPU::AV_128_Align2RegClassID:
2944 case AMDGPU::SReg_128_XNULLRegClassID:
2945 case AMDGPU::VReg_128_Lo256_Align2RegClassID:
2947 case AMDGPU::SGPR_160RegClassID:
2948 case AMDGPU::SReg_160RegClassID:
2949 case AMDGPU::VReg_160RegClassID:
2950 case AMDGPU::AReg_160RegClassID:
2951 case AMDGPU::VReg_160_Align2RegClassID:
2952 case AMDGPU::AReg_160_Align2RegClassID:
2953 case AMDGPU::AV_160RegClassID:
2954 case AMDGPU::AV_160_Align2RegClassID:
2955 case AMDGPU::VReg_160_Lo256_Align2RegClassID:
2957 case AMDGPU::SGPR_192RegClassID:
2958 case AMDGPU::SReg_192RegClassID:
2959 case AMDGPU::VReg_192RegClassID:
2960 case AMDGPU::AReg_192RegClassID:
2961 case AMDGPU::VReg_192_Align2RegClassID:
2962 case AMDGPU::AReg_192_Align2RegClassID:
2963 case AMDGPU::AV_192RegClassID:
2964 case AMDGPU::AV_192_Align2RegClassID:
2965 case AMDGPU::VReg_192_Lo256_Align2RegClassID:
2967 case AMDGPU::SGPR_224RegClassID:
2968 case AMDGPU::SReg_224RegClassID:
2969 case AMDGPU::VReg_224RegClassID:
2970 case AMDGPU::AReg_224RegClassID:
2971 case AMDGPU::VReg_224_Align2RegClassID:
2972 case AMDGPU::AReg_224_Align2RegClassID:
2973 case AMDGPU::AV_224RegClassID:
2974 case AMDGPU::AV_224_Align2RegClassID:
2975 case AMDGPU::VReg_224_Lo256_Align2RegClassID:
2977 case AMDGPU::SGPR_256RegClassID:
2978 case AMDGPU::SReg_256RegClassID:
2979 case AMDGPU::VReg_256RegClassID:
2980 case AMDGPU::AReg_256RegClassID:
2981 case AMDGPU::VReg_256_Align2RegClassID:
2982 case AMDGPU::AReg_256_Align2RegClassID:
2983 case AMDGPU::AV_256RegClassID:
2984 case AMDGPU::AV_256_Align2RegClassID:
2985 case AMDGPU::SReg_256_XNULLRegClassID:
2986 case AMDGPU::VReg_256_Lo256_Align2RegClassID:
2988 case AMDGPU::SGPR_288RegClassID:
2989 case AMDGPU::SReg_288RegClassID:
2990 case AMDGPU::VReg_288RegClassID:
2991 case AMDGPU::AReg_288RegClassID:
2992 case AMDGPU::VReg_288_Align2RegClassID:
2993 case AMDGPU::AReg_288_Align2RegClassID:
2994 case AMDGPU::AV_288RegClassID:
2995 case AMDGPU::AV_288_Align2RegClassID:
2996 case AMDGPU::VReg_288_Lo256_Align2RegClassID:
2998 case AMDGPU::SGPR_320RegClassID:
2999 case AMDGPU::SReg_320RegClassID:
3000 case AMDGPU::VReg_320RegClassID:
3001 case AMDGPU::AReg_320RegClassID:
3002 case AMDGPU::VReg_320_Align2RegClassID:
3003 case AMDGPU::AReg_320_Align2RegClassID:
3004 case AMDGPU::AV_320RegClassID:
3005 case AMDGPU::AV_320_Align2RegClassID:
3006 case AMDGPU::VReg_320_Lo256_Align2RegClassID:
3008 case AMDGPU::SGPR_352RegClassID:
3009 case AMDGPU::SReg_352RegClassID:
3010 case AMDGPU::VReg_352RegClassID:
3011 case AMDGPU::AReg_352RegClassID:
3012 case AMDGPU::VReg_352_Align2RegClassID:
3013 case AMDGPU::AReg_352_Align2RegClassID:
3014 case AMDGPU::AV_352RegClassID:
3015 case AMDGPU::AV_352_Align2RegClassID:
3016 case AMDGPU::VReg_352_Lo256_Align2RegClassID:
3018 case AMDGPU::SGPR_384RegClassID:
3019 case AMDGPU::SReg_384RegClassID:
3020 case AMDGPU::VReg_384RegClassID:
3021 case AMDGPU::AReg_384RegClassID:
3022 case AMDGPU::VReg_384_Align2RegClassID:
3023 case AMDGPU::AReg_384_Align2RegClassID:
3024 case AMDGPU::AV_384RegClassID:
3025 case AMDGPU::AV_384_Align2RegClassID:
3026 case AMDGPU::VReg_384_Lo256_Align2RegClassID:
3028 case AMDGPU::SGPR_512RegClassID:
3029 case AMDGPU::SReg_512RegClassID:
3030 case AMDGPU::VReg_512RegClassID:
3031 case AMDGPU::AReg_512RegClassID:
3032 case AMDGPU::VReg_512_Align2RegClassID:
3033 case AMDGPU::AReg_512_Align2RegClassID:
3034 case AMDGPU::AV_512RegClassID:
3035 case AMDGPU::AV_512_Align2RegClassID:
3036 case AMDGPU::VReg_512_Lo256_Align2RegClassID:
3038 case AMDGPU::SGPR_1024RegClassID:
3039 case AMDGPU::SReg_1024RegClassID:
3040 case AMDGPU::VReg_1024RegClassID:
3041 case AMDGPU::AReg_1024RegClassID:
3042 case AMDGPU::VReg_1024_Align2RegClassID:
3043 case AMDGPU::AReg_1024_Align2RegClassID:
3044 case AMDGPU::AV_1024RegClassID:
3045 case AMDGPU::AV_1024_Align2RegClassID:
3046 case AMDGPU::VReg_1024_Lo256_Align2RegClassID:
3071 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
3097 (Val == 0x3e22f983 && HasInv2Pi);
3106 return Val == 0x3F00 ||
3127 return Val == 0x3C00 ||
3154 return 192 + std::abs(
Signed);
3159 case 0x3800:
return 240;
3160 case 0xB800:
return 241;
3161 case 0x3C00:
return 242;
3162 case 0xBC00:
return 243;
3163 case 0x4000:
return 244;
3164 case 0xC000:
return 245;
3165 case 0x4400:
return 246;
3166 case 0xC400:
return 247;
3167 case 0x3118:
return 248;
3174 case 0x3F000000:
return 240;
3175 case 0xBF000000:
return 241;
3176 case 0x3F800000:
return 242;
3177 case 0xBF800000:
return 243;
3178 case 0x40000000:
return 244;
3179 case 0xC0000000:
return 245;
3180 case 0x40800000:
return 246;
3181 case 0xC0800000:
return 247;
3182 case 0x3E22F983:
return 248;
3205 return 192 + std::abs(
Signed);
3209 case 0x3F00:
return 240;
3210 case 0xBF00:
return 241;
3211 case 0x3F80:
return 242;
3212 case 0xBF80:
return 243;
3213 case 0x4000:
return 244;
3214 case 0xC000:
return 245;
3215 case 0x4080:
return 246;
3216 case 0xC080:
return 247;
3217 case 0x3E22:
return 248;
3222 return std::nullopt;
3249 return 192 + std::abs(
Signed);
3255 return std::nullopt;
3315 return Imm & 0xffff;
3357 return A->hasAttribute(Attribute::InReg) ||
3358 A->hasAttribute(Attribute::ByVal);
3361 return A->hasAttribute(Attribute::InReg);
3396 int64_t EncodedOffset) {
3405 int64_t EncodedOffset,
bool IsBuffer) {
3407 if (IsBuffer && EncodedOffset < 0)
3416 return (ByteOffset & 3) == 0;
3425 return ByteOffset >> 2;
3429 int64_t ByteOffset,
bool IsBuffer,
3435 return std::nullopt;
3438 return isInt<24>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3444 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3449 return std::nullopt;
3453 ? std::optional<int64_t>(EncodedOffset)
3458 int64_t ByteOffset) {
3460 return std::nullopt;
3463 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
3478struct SourceOfDivergence {
3481const SourceOfDivergence *lookupSourceOfDivergence(
unsigned Intr);
3486const AlwaysUniform *lookupAlwaysUniform(
unsigned Intr);
3488#define GET_SourcesOfDivergence_IMPL
3489#define GET_UniformIntrinsics_IMPL
3490#define GET_Gfx9BufferFormat_IMPL
3491#define GET_Gfx10BufferFormat_IMPL
3492#define GET_Gfx11PlusBufferFormat_IMPL
3494#include "AMDGPUGenSearchableTables.inc"
3499 return lookupSourceOfDivergence(IntrID);
3503 return lookupAlwaysUniform(IntrID);
3510 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(
3511 BitsPerComp, NumComponents, NumFormat)
3513 ? getGfx10BufferFormatInfo(BitsPerComp, NumComponents, NumFormat)
3514 : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat);
3521 : getGfx9BufferFormatInfo(
Format);
3526 const unsigned VGPRClasses[] = {
3527 AMDGPU::VGPR_16RegClassID, AMDGPU::VGPR_32RegClassID,
3528 AMDGPU::VReg_64RegClassID, AMDGPU::VReg_96RegClassID,
3529 AMDGPU::VReg_128RegClassID, AMDGPU::VReg_160RegClassID,
3530 AMDGPU::VReg_192RegClassID, AMDGPU::VReg_224RegClassID,
3531 AMDGPU::VReg_256RegClassID, AMDGPU::VReg_288RegClassID,
3532 AMDGPU::VReg_320RegClassID, AMDGPU::VReg_352RegClassID,
3533 AMDGPU::VReg_384RegClassID, AMDGPU::VReg_512RegClassID,
3534 AMDGPU::VReg_1024RegClassID};
3536 for (
unsigned RCID : VGPRClasses) {
3546 unsigned Enc =
MRI.getEncodingValue(
Reg);
3553 unsigned Enc =
MRI.getEncodingValue(
Reg);
3563 if (RC->
getID() == AMDGPU::VGPR_16RegClassID) {
3573std::pair<const AMDGPU::OpName *, const AMDGPU::OpName *>
3575 static const AMDGPU::OpName VOPOps[4] = {
3576 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2,
3577 AMDGPU::OpName::vdst};
3578 static const AMDGPU::OpName VDSOps[4] = {
3579 AMDGPU::OpName::addr, AMDGPU::OpName::data0, AMDGPU::OpName::data1,
3580 AMDGPU::OpName::vdst};
3581 static const AMDGPU::OpName FLATOps[4] = {
3582 AMDGPU::OpName::vaddr, AMDGPU::OpName::vdata,
3583 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdst};
3584 static const AMDGPU::OpName BUFOps[4] = {
3585 AMDGPU::OpName::vaddr, AMDGPU::OpName::NUM_OPERAND_NAMES,
3586 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdata};
3587 static const AMDGPU::OpName VIMGOps[4] = {
3588 AMDGPU::OpName::vaddr0, AMDGPU::OpName::vaddr1, AMDGPU::OpName::vaddr2,
3589 AMDGPU::OpName::vdata};
3594 static const AMDGPU::OpName VOPDOpsX[4] = {
3595 AMDGPU::OpName::src0X, AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vsrc2X,
3596 AMDGPU::OpName::vdstX};
3597 static const AMDGPU::OpName VOPDOpsY[4] = {
3598 AMDGPU::OpName::src0Y, AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vsrc2Y,
3599 AMDGPU::OpName::vdstY};
3602 static const AMDGPU::OpName VOP2MADMKOps[4] = {
3603 AMDGPU::OpName::src0, AMDGPU::OpName::NUM_OPERAND_NAMES,
3604 AMDGPU::OpName::src1, AMDGPU::OpName::vdst};
3605 static const AMDGPU::OpName VOPDFMAMKOpsX[4] = {
3606 AMDGPU::OpName::src0X, AMDGPU::OpName::NUM_OPERAND_NAMES,
3607 AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vdstX};
3608 static const AMDGPU::OpName VOPDFMAMKOpsY[4] = {
3609 AMDGPU::OpName::src0Y, AMDGPU::OpName::NUM_OPERAND_NAMES,
3610 AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vdstY};
3612 unsigned TSFlags =
Desc.TSFlags;
3617 switch (
Desc.getOpcode()) {
3619 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32:
3620 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32_gfx1250:
3621 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64:
3622 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64_gfx1250:
3624 case AMDGPU::V_FMAMK_F16:
3625 case AMDGPU::V_FMAMK_F16_t16:
3626 case AMDGPU::V_FMAMK_F16_t16_gfx12:
3627 case AMDGPU::V_FMAMK_F16_fake16:
3628 case AMDGPU::V_FMAMK_F16_fake16_gfx12:
3629 case AMDGPU::V_FMAMK_F32:
3630 case AMDGPU::V_FMAMK_F32_gfx12:
3631 case AMDGPU::V_FMAMK_F64:
3632 case AMDGPU::V_FMAMK_F64_gfx1250:
3633 return {VOP2MADMKOps,
nullptr};
3637 return {VOPOps,
nullptr};
3641 return {VDSOps,
nullptr};
3644 return {FLATOps,
nullptr};
3647 return {BUFOps,
nullptr};
3650 return {VIMGOps,
nullptr};
3654 return {(OpX == AMDGPU::V_FMAMK_F32) ? VOPDFMAMKOpsX : VOPDOpsX,
3655 (OpY == AMDGPU::V_FMAMK_F32) ? VOPDFMAMKOpsY : VOPDOpsY};
3662 " these instructions are not expected on gfx1250");
3688 for (
auto OpName : {OpName::vdst, OpName::src0, OpName::src1, OpName::src2}) {
3696 if (RegClass == AMDGPU::VReg_64RegClassID ||
3697 RegClass == AMDGPU::VReg_64_Align2RegClassID)
3706 case AMDGPU::V_MUL_LO_U32_e64:
3707 case AMDGPU::V_MUL_LO_U32_e64_dpp:
3708 case AMDGPU::V_MUL_LO_U32_e64_dpp_gfx1250:
3709 case AMDGPU::V_MUL_HI_U32_e64:
3710 case AMDGPU::V_MUL_HI_U32_e64_dpp:
3711 case AMDGPU::V_MUL_HI_U32_e64_dpp_gfx1250:
3712 case AMDGPU::V_MUL_HI_I32_e64:
3713 case AMDGPU::V_MUL_HI_I32_e64_dpp:
3714 case AMDGPU::V_MUL_HI_I32_e64_dpp_gfx1250:
3715 case AMDGPU::V_MAD_U32_e64:
3716 case AMDGPU::V_MAD_U32_e64_dpp:
3717 case AMDGPU::V_MAD_U32_e64_dpp_gfx1250:
3726 if (!ST.hasFeature(AMDGPU::FeatureDPALU_DPP))
3730 return ST.hasFeature(AMDGPU::FeatureGFX1250Insts);
3736 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
3738 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
3740 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
3742 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
3749 case AMDGPU::V_PK_ADD_F32:
3750 case AMDGPU::V_PK_ADD_F32_gfx12:
3751 case AMDGPU::V_PK_MUL_F32:
3752 case AMDGPU::V_PK_MUL_F32_gfx12:
3753 case AMDGPU::V_PK_FMA_F32:
3754 case AMDGPU::V_PK_FMA_F32_gfx12:
3774 OS << EncoNoCluster <<
',' << EncoNoCluster <<
',' << EncoNoCluster;
3775 return Buffer.
c_str();
3778 OS << EncoVariableDims <<
',' << EncoVariableDims <<
','
3779 << EncoVariableDims;
3780 return Buffer.
c_str();
3783 OS << Dims[0] <<
',' << Dims[1] <<
',' << Dims[2];
3784 return Buffer.
c_str();
3791 std::optional<SmallVector<unsigned>> Attr =
3795 if (!Attr.has_value())
3804 A.Dims = {(*Attr)[0], (*Attr)[1], (*Attr)[2]};
3815 OS <<
"Unsupported";
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static llvm::cl::opt< unsigned > DefaultAMDHSACodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::init(llvm::AMDGPU::AMDHSA_COV6), llvm::cl::desc("Set default AMDHSA Code Object Version (module flag " "or asm directive still take priority if present)"))
Provides AMDGPU specific target descriptions.
MC layer struct for AMDGPUMCKernelCodeT, provides MCExpr functionality where required.
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Register const TargetRegisterInfo * TRI
#define S_00B848_MEM_ORDERED(x)
#define S_00B848_WGP_MODE(x)
#define S_00B848_FWD_PROGRESS(x)
unsigned unsigned DefaultVal
static const int BlockSize
static const uint32_t IV[8]
static ClusterDimsAttr get(const Function &F)
ClusterDimsAttr()=default
std::string to_string() const
const std::array< unsigned, 3 > & getDims() const
bool isSramEccSupported() const
void setTargetIDFromFeaturesString(StringRef FS)
TargetIDSetting getXnackSetting() const
AMDGPUTargetID(const MCSubtargetInfo &STI)
bool isXnackSupported() const
void setTargetIDFromTargetIDStream(StringRef TargetID)
std::string toString() const
TargetIDSetting getSramEccSetting() const
unsigned getIndexInParsedOperands(unsigned CompOprIdx) const
unsigned getIndexOfDstInParsedOperands() const
unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const
int getBitOp3OperandIdx() const
unsigned getCompParsedSrcOperandsNum() const
std::optional< unsigned > getInvalidCompOperandIndex(std::function< MCRegister(unsigned, unsigned)> GetRegIdx, const MCRegisterInfo &MRI, bool SkipSrc=false, bool AllowSameVGPR=false, bool VOPD3=false) const
std::array< MCRegister, Component::MAX_OPR_NUM > RegIndices
Represents the counter values to wait for in an s_waitcnt instruction.
unsigned get(InstCounterType T) const
void set(InstCounterType T, unsigned Val)
This class represents an incoming formal argument to a Function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
constexpr bool test(unsigned I) const
unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
A helper class to return the specified delimiter string after the first invocation of operator String...
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayStore() const
Return true if this instruction could possibly modify memory.
bool mayLoad() const
Return true if this instruction could possibly read memory.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
unsigned getOpcode() const
Return the opcode number for this descriptor.
Interface to description of machine instruction set.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo, unsigned HwModeId) const
Return the ID of the register class to use for OpInfo, for the active HwMode HwModeId.
This holds information about one operand of a machine instruction, indicating the register class for ...
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
constexpr unsigned id() const
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
A Module instance is used to store all the information related to an LLVM module.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr size_t size() const
size - Get the string size.
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Manages the enabling and disabling of subtarget specific features.
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Triple - Helper class for working with autoconf configuration names.
OSType getOS() const
Get the parsed operating system type of this triple.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
unsigned decodeFieldVaVcc(unsigned Encoded)
unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc)
unsigned decodeFieldHoldCnt(unsigned Encoded, const IsaVersion &Version)
unsigned getVaVccBitMask()
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt, const IsaVersion &Version)
unsigned getVmVsrcBitMask()
unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc)
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned decodeFieldSaSdst(unsigned Encoded)
unsigned getHoldCntBitMask(const IsaVersion &Version)
unsigned decodeFieldVaSdst(unsigned Encoded)
unsigned getVaVdstBitMask()
unsigned getVaSsrcBitMask()
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned getVaSdstBitMask()
unsigned decodeFieldVaSsrc(unsigned Encoded)
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
const CustomOperandVal DepCtrInfo[]
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
unsigned decodeFieldVaVdst(unsigned Encoded)
unsigned getSaSdstBitMask()
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
static constexpr ExpTgt ExpTgtInfo[]
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
unsigned getTgtId(const StringRef Name)
@ ET_DUAL_SRC_BLEND_MAX_IDX
constexpr uint32_t VersionMinor
HSA metadata minor version.
constexpr uint32_t VersionMajor
HSA metadata major version.
@ COMPLETION_ACTION_OFFSET
@ MULTIGRID_SYNC_ARG_OFFSET
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
@ FIXED_NUM_SGPRS_FOR_INIT_BUG
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
unsigned getArchVGPRAllocGranule()
For subtargets with a unified VGPR file and mixed ArchVGPR/AGPR usage, returns the allocation granule...
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize)
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getAllocatedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, std::optional< bool > EnableWavefrontSize32)
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves, AMDGPUSubtarget::Generation Gen)
static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs, unsigned Granule)
unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI)
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
constexpr unsigned VOPD_VGPR_BANK_MASKS[]
constexpr unsigned COMPONENTS_NUM
constexpr unsigned VOPD3_VGPR_BANK_MASKS[]
bool isPackedFP32Inst(unsigned Opc)
bool isGCN3Encoding(const MCSubtargetInfo &STI)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
bool isInlineValue(MCRegister Reg)
bool isGFX10_GFX11(const MCSubtargetInfo &STI)
bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType)
bool isPKFMACF16InlineConstant(uint32_t Literal, bool IsGFX11Plus)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Storecnt)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
bool isVOPCAsmOnly(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool getMTBUFHasSrsrc(unsigned Opc)
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool getWMMAIsXDL(unsigned Opc)
uint8_t wmmaScaleF8F6F4FormatToNumRegs(unsigned Fmt)
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isGFX10Before1030(const MCSubtargetInfo &STI)
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
const int OPR_ID_UNSUPPORTED
bool shouldEmitConstantsToTextSection(const Triple &TT)
bool isInlinableLiteralV2I16(uint32_t Literal)
bool isDPMACCInstruction(unsigned Opc)
int getMTBUFElements(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
unsigned getTemporalHintType(const MCInstrDesc TID)
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
iota_range< InstCounterType > inst_counter_types(InstCounterType MaxCounter)
bool isGFX10(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV216(bool IsFloat, uint32_t Literal)
FPType getFPDstSelType(unsigned Opc)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool hasA16(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
bool isGFX12Plus(const MCSubtargetInfo &STI)
unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler)
const MCRegisterClass * getVGPRPhysRegClass(MCRegister Reg, const MCRegisterInfo &MRI)
bool hasPackedD16(const MCSubtargetInfo &STI)
unsigned getStorecntBitMask(const IsaVersion &Version)
unsigned getLdsDwGranularity(const MCSubtargetInfo &ST)
bool isGFX940(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isHsaAbi(const MCSubtargetInfo &STI)
bool isGFX11(const MCSubtargetInfo &STI)
const int OPR_VAL_INVALID
bool getSMEMIsBuffer(unsigned Opc)
bool isGFX10_3_GFX11(const MCSubtargetInfo &STI)
bool isGFX13(const MCSubtargetInfo &STI)
bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val)
Checks if Val is inside MD, a !range-like metadata.
uint8_t mfmaScaleF8F6F4FormatToNumRegs(unsigned EncodingVal)
unsigned getVOPDOpcode(unsigned Opc, bool VOPD3)
bool isGroupSegment(const GlobalValue *GV)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
bool getMTBUFHasSoffset(unsigned Opc)
bool hasXNACK(const MCSubtargetInfo &STI)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
static unsigned getCombinedCountBitMask(const IsaVersion &Version, bool IsStore)
CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
bool isVOPC64DPP(unsigned Opc)
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool getMAIIsGFX940XDL(unsigned Opc)
bool isSI(const MCSubtargetInfo &STI)
unsigned getDefaultAMDHSACodeObjectVersion()
bool isReadOnlySegment(const GlobalValue *GV)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
int getMUBUFBaseOpcode(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getWaitcntBitMask(const IsaVersion &Version)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool isGFX9(const MCSubtargetInfo &STI)
bool isDPALU_DPP32BitOpc(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
static bool isDwordAligned(uint64_t ByteOffset)
unsigned getVOPDEncodingFamily(const MCSubtargetInfo &ST)
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool getHasColorExport(const Function &F)
int getMTBUFBaseOpcode(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
unsigned getSamplecntBitMask(const IsaVersion &Version)
unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion)
std::tuple< char, unsigned, unsigned > parseAsmPhysRegName(StringRef RegName)
Returns a valid charcode or 0 in the first entry if this is a valid physical register name.
bool hasSRAMECC(const MCSubtargetInfo &STI)
bool getHasDepthExport(const Function &F)
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
bool getMUBUFHasVAddr(unsigned Opc)
bool isTrue16Inst(unsigned Opc)
unsigned getVGPREncodingMSBs(MCRegister Reg, const MCRegisterInfo &MRI)
std::pair< unsigned, unsigned > getVOPDComponents(unsigned VOPDOpcode)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getInitialPSInputAddr(const Function &F)
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
bool isAsyncStore(unsigned Opc)
unsigned getDynamicVGPRBlockSize(const Function &F)
unsigned getKmcntBitMask(const IsaVersion &Version)
MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
unsigned getVmcntBitMask(const IsaVersion &Version)
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
bool hasMAIInsts(const MCSubtargetInfo &STI)
unsigned getBitOp2(unsigned Opc)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
unsigned getXcntBitMask(const IsaVersion &Version)
bool isGenericAtomic(unsigned Opc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
bool isGFX8Plus(const MCSubtargetInfo &STI)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
bool getMUBUFTfe(unsigned Opc)
unsigned getBvhcntBitMask(const IsaVersion &Version)
bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
bool hasMIMG_R128(const MCSubtargetInfo &STI)
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool isGFX13Plus(const MCSubtargetInfo &STI)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
int32_t getMCOpcode(uint32_t Opcode, unsigned Gen)
bool getMUBUFHasSoffset(unsigned Opc)
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
std::tuple< char, unsigned, unsigned > parseAsmConstraintPhysReg(StringRef Constraint)
Returns a valid charcode or 0 in the first entry if this is a valid physical register constraint.
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion)
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Loadcnt)
bool isGFX10Plus(const MCSubtargetInfo &STI)
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
static bool isValidRegPrefix(char C)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer, bool HasSOffset)
bool isGlobalSegment(const GlobalValue *GV)
int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_LAST
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_INLINE_AC_FIRST
@ OPERAND_REG_IMM_V2FP16_SPLAT
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FIRST
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_AC_LAST
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
std::optional< unsigned > getPKFMACF16InlineEncoding(uint32_t Literal, bool IsGFX11Plus)
raw_ostream & operator<<(raw_ostream &OS, const AMDGPU::Waitcnt &Wait)
void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &KernelCode, const MCSubtargetInfo *STI)
bool isNotGFX9Plus(const MCSubtargetInfo &STI)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool hasGDS(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI)
const int OPR_ID_DUPLICATE
bool isVOPD(unsigned Opc)
VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
bool isGFX1170(const MCSubtargetInfo &STI)
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool isGFX1250(const MCSubtargetInfo &STI)
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
bool isVI(const MCSubtargetInfo &STI)
bool isTensorStore(unsigned Opc)
bool getMUBUFIsBufferInv(unsigned Opc)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool supportsWGP(const MCSubtargetInfo &STI)
bool isCI(const MCSubtargetInfo &STI)
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
bool getVOP2IsSingle(unsigned Opc)
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion)
SmallVector< unsigned > getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size, unsigned DefaultVal)
bool isGFX1250Plus(const MCSubtargetInfo &STI)
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
bool isNotGFX12Plus(const MCSubtargetInfo &STI)
bool getMTBUFHasVAddr(unsigned Opc)
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
unsigned getLoadcntBitMask(const IsaVersion &Version)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
bool hasVOPD(const MCSubtargetInfo &STI)
int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily, bool VOPD3)
static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Dscnt)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion)
bool isGFX9_GFX10_GFX11(const MCSubtargetInfo &STI)
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
int getMUBUFElements(unsigned Opc)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
bool isPermlane16(unsigned Opc)
bool getMUBUFHasSrsrc(unsigned Opc)
unsigned getDscntBitMask(const IsaVersion &Version)
bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
@ ELFABIVERSION_AMDGPU_HSA_V4
@ ELFABIVERSION_AMDGPU_HSA_V5
@ ELFABIVERSION_AMDGPU_HSA_V6
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
testing::Matcher< const detail::ErrorHolder & > Failed()
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
std::string utostr(uint64_t X, bool isNeg=false)
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
FunctionAddr VTableAddr uintptr_t uintptr_t Version
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
To bit_cast(const From &from) noexcept
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
@ AlwaysUniform
The result values are always uniform.
@ Default
The result values are uniform if and only if all operands are uniform.
AMD Kernel Code Object (amd_kernel_code_t).
uint16_t amd_machine_version_major
uint16_t amd_machine_kind
uint16_t amd_machine_version_stepping
uint8_t private_segment_alignment
int64_t kernel_code_entry_byte_offset
uint32_t amd_kernel_code_version_major
uint16_t amd_machine_version_minor
uint8_t group_segment_alignment
uint8_t kernarg_segment_alignment
uint32_t amd_kernel_code_version_minor
uint64_t compute_pgm_resource_registers
Instruction set architecture version.