LLVM 23.0.0git
AMDGPUBaseInfo.cpp
Go to the documentation of this file.
1//===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AMDGPUBaseInfo.h"
10#include "AMDGPU.h"
11#include "AMDGPUAsmUtils.h"
12#include "AMDKernelCodeT.h"
17#include "llvm/IR/Attributes.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Function.h"
20#include "llvm/IR/GlobalValue.h"
21#include "llvm/IR/IntrinsicsAMDGPU.h"
22#include "llvm/IR/IntrinsicsR600.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/Metadata.h"
25#include "llvm/MC/MCInstrInfo.h"
30#include <optional>
31
32#define GET_INSTRINFO_NAMED_OPS
33#define GET_INSTRMAP_INFO
34#include "AMDGPUGenInstrInfo.inc"
35
37 "amdhsa-code-object-version", llvm::cl::Hidden,
39 llvm::cl::desc("Set default AMDHSA Code Object Version (module flag "
40 "or asm directive still take priority if present)"));
41
42namespace {
43
44/// \returns Bit mask for given bit \p Shift and bit \p Width.
45unsigned getBitMask(unsigned Shift, unsigned Width) {
46 return ((1 << Width) - 1) << Shift;
47}
48
49/// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
50///
51/// \returns Packed \p Dst.
52unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
53 unsigned Mask = getBitMask(Shift, Width);
54 return ((Src << Shift) & Mask) | (Dst & ~Mask);
55}
56
57/// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
58///
59/// \returns Unpacked bits.
60unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
61 return (Src & getBitMask(Shift, Width)) >> Shift;
62}
63
64/// \returns Vmcnt bit shift (lower bits).
65unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
66 return VersionMajor >= 11 ? 10 : 0;
67}
68
69/// \returns Vmcnt bit width (lower bits).
70unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
71 return VersionMajor >= 11 ? 6 : 4;
72}
73
74/// \returns Expcnt bit shift.
75unsigned getExpcntBitShift(unsigned VersionMajor) {
76 return VersionMajor >= 11 ? 0 : 4;
77}
78
79/// \returns Expcnt bit width.
80unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
81
82/// \returns Lgkmcnt bit shift.
83unsigned getLgkmcntBitShift(unsigned VersionMajor) {
84 return VersionMajor >= 11 ? 4 : 8;
85}
86
87/// \returns Lgkmcnt bit width.
88unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
89 return VersionMajor >= 10 ? 6 : 4;
90}
91
92/// \returns Vmcnt bit shift (higher bits).
93unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
94
95/// \returns Vmcnt bit width (higher bits).
96unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
97 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
98}
99
100/// \returns Loadcnt bit width
101unsigned getLoadcntBitWidth(unsigned VersionMajor) {
102 return VersionMajor >= 12 ? 6 : 0;
103}
104
105/// \returns Samplecnt bit width.
106unsigned getSamplecntBitWidth(unsigned VersionMajor) {
107 return VersionMajor >= 12 ? 6 : 0;
108}
109
110/// \returns Bvhcnt bit width.
111unsigned getBvhcntBitWidth(unsigned VersionMajor) {
112 return VersionMajor >= 12 ? 3 : 0;
113}
114
115/// \returns Dscnt bit width.
116unsigned getDscntBitWidth(unsigned VersionMajor) {
117 return VersionMajor >= 12 ? 6 : 0;
118}
119
120/// \returns Dscnt bit shift in combined S_WAIT instructions.
121unsigned getDscntBitShift(unsigned VersionMajor) { return 0; }
122
123/// \returns Storecnt or Vscnt bit width, depending on VersionMajor.
124unsigned getStorecntBitWidth(unsigned VersionMajor) {
125 return VersionMajor >= 10 ? 6 : 0;
126}
127
128/// \returns Kmcnt bit width.
129unsigned getKmcntBitWidth(unsigned VersionMajor) {
130 return VersionMajor >= 12 ? 5 : 0;
131}
132
133/// \returns Xcnt bit width.
134unsigned getXcntBitWidth(unsigned VersionMajor, unsigned VersionMinor) {
135 return VersionMajor == 12 && VersionMinor == 5 ? 6 : 0;
136}
137
138/// \returns shift for Loadcnt/Storecnt in combined S_WAIT instructions.
139unsigned getLoadcntStorecntBitShift(unsigned VersionMajor) {
140 return VersionMajor >= 12 ? 8 : 0;
141}
142
143/// \returns VaSdst bit width
144inline unsigned getVaSdstBitWidth() { return 3; }
145
146/// \returns VaSdst bit shift
147inline unsigned getVaSdstBitShift() { return 9; }
148
149/// \returns VmVsrc bit width
150inline unsigned getVmVsrcBitWidth() { return 3; }
151
152/// \returns VmVsrc bit shift
153inline unsigned getVmVsrcBitShift() { return 2; }
154
155/// \returns VaVdst bit width
156inline unsigned getVaVdstBitWidth() { return 4; }
157
158/// \returns VaVdst bit shift
159inline unsigned getVaVdstBitShift() { return 12; }
160
161/// \returns VaVcc bit width
162inline unsigned getVaVccBitWidth() { return 1; }
163
164/// \returns VaVcc bit shift
165inline unsigned getVaVccBitShift() { return 1; }
166
167/// \returns SaSdst bit width
168inline unsigned getSaSdstBitWidth() { return 1; }
169
170/// \returns SaSdst bit shift
171inline unsigned getSaSdstBitShift() { return 0; }
172
173/// \returns VaSsrc width
174inline unsigned getVaSsrcBitWidth() { return 1; }
175
176/// \returns VaSsrc bit shift
177inline unsigned getVaSsrcBitShift() { return 8; }
178
179/// \returns HoldCnt bit shift
180inline unsigned getHoldCntWidth(unsigned VersionMajor, unsigned VersionMinor) {
181 static constexpr const unsigned MinMajor = 10;
182 static constexpr const unsigned MinMinor = 3;
183 return std::tie(VersionMajor, VersionMinor) >= std::tie(MinMajor, MinMinor)
184 ? 1
185 : 0;
186}
187
188/// \returns HoldCnt bit shift
189inline unsigned getHoldCntBitShift() { return 7; }
190
191} // end anonymous namespace
192
193namespace llvm {
194
195namespace AMDGPU {
196
197/// \returns true if the target supports signed immediate offset for SMRD
198/// instructions.
200 return isGFX9Plus(ST);
201}
202
203/// \returns True if \p STI is AMDHSA.
204bool isHsaAbi(const MCSubtargetInfo &STI) {
205 return STI.getTargetTriple().getOS() == Triple::AMDHSA;
206}
207
210 M.getModuleFlag("amdhsa_code_object_version"))) {
211 return (unsigned)Ver->getZExtValue() / 100;
212 }
213
215}
216
220
221unsigned getAMDHSACodeObjectVersion(unsigned ABIVersion) {
222 switch (ABIVersion) {
224 return 4;
226 return 5;
228 return 6;
229 default:
231 }
232}
233
234uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion) {
235 if (T.getOS() != Triple::AMDHSA)
236 return 0;
237
238 switch (CodeObjectVersion) {
239 case 4:
241 case 5:
243 case 6:
245 default:
246 report_fatal_error("Unsupported AMDHSA Code Object Version " +
247 Twine(CodeObjectVersion));
248 }
249}
250
251unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion) {
252 switch (CodeObjectVersion) {
253 case AMDHSA_COV4:
254 return 48;
255 case AMDHSA_COV5:
256 case AMDHSA_COV6:
257 default:
259 }
260}
261
262// FIXME: All such magic numbers about the ABI should be in a
263// central TD file.
264unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion) {
265 switch (CodeObjectVersion) {
266 case AMDHSA_COV4:
267 return 24;
268 case AMDHSA_COV5:
269 case AMDHSA_COV6:
270 default:
272 }
273}
274
275unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion) {
276 switch (CodeObjectVersion) {
277 case AMDHSA_COV4:
278 return 32;
279 case AMDHSA_COV5:
280 case AMDHSA_COV6:
281 default:
283 }
284}
285
286unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
287 switch (CodeObjectVersion) {
288 case AMDHSA_COV4:
289 return 40;
290 case AMDHSA_COV5:
291 case AMDHSA_COV6:
292 default:
294 }
295}
296
297#define GET_MIMGBaseOpcodesTable_IMPL
298#define GET_MIMGDimInfoTable_IMPL
299#define GET_MIMGInfoTable_IMPL
300#define GET_MIMGLZMappingTable_IMPL
301#define GET_MIMGMIPMappingTable_IMPL
302#define GET_MIMGBiasMappingTable_IMPL
303#define GET_MIMGOffsetMappingTable_IMPL
304#define GET_MIMGG16MappingTable_IMPL
305#define GET_MAIInstInfoTable_IMPL
306#define GET_WMMAInstInfoTable_IMPL
307#include "AMDGPUGenSearchableTables.inc"
308
309int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
310 unsigned VDataDwords, unsigned VAddrDwords) {
311 const MIMGInfo *Info =
312 getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
313 return Info ? Info->Opcode : -1;
314}
315
317 const MIMGInfo *Info = getMIMGInfo(Opc);
318 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
319}
320
321int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
322 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
323 const MIMGInfo *NewInfo =
324 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
325 NewChannels, OrigInfo->VAddrDwords);
326 return NewInfo ? NewInfo->Opcode : -1;
327}
328
329unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
330 const MIMGDimInfo *Dim, bool IsA16,
331 bool IsG16Supported) {
332 unsigned AddrWords = BaseOpcode->NumExtraArgs;
333 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
334 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
335 if (IsA16)
336 AddrWords += divideCeil(AddrComponents, 2);
337 else
338 AddrWords += AddrComponents;
339
340 // Note: For subtargets that support A16 but not G16, enabling A16 also
341 // enables 16 bit gradients.
342 // For subtargets that support A16 (operand) and G16 (done with a different
343 // instruction encoding), they are independent.
344
345 if (BaseOpcode->Gradients) {
346 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
347 // There are two gradients per coordinate, we pack them separately.
348 // For the 3d case,
349 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
350 AddrWords += alignTo<2>(Dim->NumGradients / 2);
351 else
352 AddrWords += Dim->NumGradients;
353 }
354 return AddrWords;
355}
356
367
376
381
386
390
394
398
405
413
418
419#define GET_FP4FP8DstByteSelTable_DECL
420#define GET_FP4FP8DstByteSelTable_IMPL
421
426
432
433#define GET_DPMACCInstructionTable_DECL
434#define GET_DPMACCInstructionTable_IMPL
435#define GET_MTBUFInfoTable_DECL
436#define GET_MTBUFInfoTable_IMPL
437#define GET_MUBUFInfoTable_DECL
438#define GET_MUBUFInfoTable_IMPL
439#define GET_SMInfoTable_DECL
440#define GET_SMInfoTable_IMPL
441#define GET_VOP1InfoTable_DECL
442#define GET_VOP1InfoTable_IMPL
443#define GET_VOP2InfoTable_DECL
444#define GET_VOP2InfoTable_IMPL
445#define GET_VOP3InfoTable_DECL
446#define GET_VOP3InfoTable_IMPL
447#define GET_VOPC64DPPTable_DECL
448#define GET_VOPC64DPPTable_IMPL
449#define GET_VOPC64DPP8Table_DECL
450#define GET_VOPC64DPP8Table_IMPL
451#define GET_VOPCAsmOnlyInfoTable_DECL
452#define GET_VOPCAsmOnlyInfoTable_IMPL
453#define GET_VOP3CAsmOnlyInfoTable_DECL
454#define GET_VOP3CAsmOnlyInfoTable_IMPL
455#define GET_VOPDComponentTable_DECL
456#define GET_VOPDComponentTable_IMPL
457#define GET_VOPDPairs_DECL
458#define GET_VOPDPairs_IMPL
459#define GET_VOPTrue16Table_DECL
460#define GET_VOPTrue16Table_IMPL
461#define GET_True16D16Table_IMPL
462#define GET_WMMAOpcode2AddrMappingTable_DECL
463#define GET_WMMAOpcode2AddrMappingTable_IMPL
464#define GET_WMMAOpcode3AddrMappingTable_DECL
465#define GET_WMMAOpcode3AddrMappingTable_IMPL
466#define GET_getMFMA_F8F6F4_WithSize_DECL
467#define GET_getMFMA_F8F6F4_WithSize_IMPL
468#define GET_isMFMA_F8F6F4Table_IMPL
469#define GET_isCvtScaleF32_F32F16ToF8F4Table_IMPL
470
471#include "AMDGPUGenSearchableTables.inc"
472
473int getMTBUFBaseOpcode(unsigned Opc) {
474 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
475 return Info ? Info->BaseOpcode : -1;
476}
477
478int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
479 const MTBUFInfo *Info =
480 getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
481 return Info ? Info->Opcode : -1;
482}
483
484int getMTBUFElements(unsigned Opc) {
485 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
486 return Info ? Info->elements : 0;
487}
488
489bool getMTBUFHasVAddr(unsigned Opc) {
490 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
491 return Info && Info->has_vaddr;
492}
493
494bool getMTBUFHasSrsrc(unsigned Opc) {
495 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
496 return Info && Info->has_srsrc;
497}
498
499bool getMTBUFHasSoffset(unsigned Opc) {
500 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
501 return Info && Info->has_soffset;
502}
503
504int getMUBUFBaseOpcode(unsigned Opc) {
505 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
506 return Info ? Info->BaseOpcode : -1;
507}
508
509int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
510 const MUBUFInfo *Info =
511 getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
512 return Info ? Info->Opcode : -1;
513}
514
515int getMUBUFElements(unsigned Opc) {
516 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
517 return Info ? Info->elements : 0;
518}
519
520bool getMUBUFHasVAddr(unsigned Opc) {
521 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
522 return Info && Info->has_vaddr;
523}
524
525bool getMUBUFHasSrsrc(unsigned Opc) {
526 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
527 return Info && Info->has_srsrc;
528}
529
530bool getMUBUFHasSoffset(unsigned Opc) {
531 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
532 return Info && Info->has_soffset;
533}
534
535bool getMUBUFIsBufferInv(unsigned Opc) {
536 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
537 return Info && Info->IsBufferInv;
538}
539
540bool getMUBUFTfe(unsigned Opc) {
541 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
542 return Info && Info->tfe;
543}
544
545bool getSMEMIsBuffer(unsigned Opc) {
546 const SMInfo *Info = getSMEMOpcodeHelper(Opc);
547 return Info && Info->IsBuffer;
548}
549
550bool getVOP1IsSingle(unsigned Opc) {
551 const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
552 return !Info || Info->IsSingle;
553}
554
555bool getVOP2IsSingle(unsigned Opc) {
556 const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
557 return !Info || Info->IsSingle;
558}
559
560bool getVOP3IsSingle(unsigned Opc) {
561 const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
562 return !Info || Info->IsSingle;
563}
564
565bool isVOPC64DPP(unsigned Opc) {
566 return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
567}
568
569bool isVOPCAsmOnly(unsigned Opc) { return isVOPCAsmOnlyOpcodeHelper(Opc); }
570
571bool getMAIIsDGEMM(unsigned Opc) {
572 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
573 return Info && Info->is_dgemm;
574}
575
576bool getMAIIsGFX940XDL(unsigned Opc) {
577 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
578 return Info && Info->is_gfx940_xdl;
579}
580
581bool getWMMAIsXDL(unsigned Opc) {
582 const WMMAInstInfo *Info = getWMMAInstInfoHelper(Opc);
583 return Info ? Info->is_wmma_xdl : false;
584}
585
587 switch (EncodingVal) {
590 return 6;
592 return 4;
595 default:
596 return 8;
597 }
598
599 llvm_unreachable("covered switch over mfma scale formats");
600}
601
603 unsigned BLGP,
604 unsigned F8F8Opcode) {
605 uint8_t SrcANumRegs = mfmaScaleF8F6F4FormatToNumRegs(CBSZ);
606 uint8_t SrcBNumRegs = mfmaScaleF8F6F4FormatToNumRegs(BLGP);
607 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
608}
609
611 switch (Fmt) {
614 return 16;
617 return 12;
619 return 8;
620 }
621
622 llvm_unreachable("covered switch over wmma scale formats");
623}
624
626 unsigned FmtB,
627 unsigned F8F8Opcode) {
628 uint8_t SrcANumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtA);
629 uint8_t SrcBNumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtB);
630 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
631}
632
634 if (ST.hasFeature(AMDGPU::FeatureGFX1250Insts))
636 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts))
638 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts))
640 llvm_unreachable("Subtarget generation does not support VOPD!");
641}
642
643CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3) {
644 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
645 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
646 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
647 if (Info) {
648 // Check that Opc can be used as VOPDY for this encoding. V_MOV_B32 as a
649 // VOPDX is just a placeholder here, it is supported on all encodings.
650 // TODO: This can be optimized by creating tables of supported VOPDY
651 // opcodes per encoding.
652 unsigned VOPDMov = AMDGPU::getVOPDOpcode(AMDGPU::V_MOV_B32_e32, VOPD3);
653 bool CanBeVOPDY = getVOPDFull(VOPDMov, AMDGPU::getVOPDOpcode(Opc, VOPD3),
654 EncodingFamily, VOPD3) != -1;
655 return {VOPD3 ? Info->CanBeVOPD3X : Info->CanBeVOPDX, CanBeVOPDY};
656 }
657
658 return {false, false};
659}
660
661unsigned getVOPDOpcode(unsigned Opc, bool VOPD3) {
662 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
663 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
664 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
665 return Info ? Info->VOPDOp : ~0u;
666}
667
668bool isVOPD(unsigned Opc) {
669 return AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0X);
670}
671
672bool isMAC(unsigned Opc) {
673 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
674 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
675 Opc == AMDGPU::V_MAC_F32_e64_vi ||
676 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
677 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
678 Opc == AMDGPU::V_MAC_F16_e64_vi ||
679 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
680 Opc == AMDGPU::V_FMAC_F64_e64_gfx12 ||
681 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
682 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
683 Opc == AMDGPU::V_FMAC_F32_e64_gfx12 ||
684 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
685 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
686 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
687 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
688 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
689 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx11 ||
690 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx12 ||
691 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx12 ||
692 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
693 Opc == AMDGPU::V_DOT2C_F32_BF16_e64_vi ||
694 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
695 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
696 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
697}
698
699bool isPermlane16(unsigned Opc) {
700 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
701 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
702 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
703 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11 ||
704 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx12 ||
705 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx12 ||
706 Opc == AMDGPU::V_PERMLANE16_VAR_B32_e64_gfx12 ||
707 Opc == AMDGPU::V_PERMLANEX16_VAR_B32_e64_gfx12;
708}
709
711 return Opc == AMDGPU::V_CVT_F32_BF8_e64_gfx12 ||
712 Opc == AMDGPU::V_CVT_F32_FP8_e64_gfx12 ||
713 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp_gfx12 ||
714 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp_gfx12 ||
715 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp8_gfx12 ||
716 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp8_gfx12 ||
717 Opc == AMDGPU::V_CVT_PK_F32_BF8_fake16_e64_gfx12 ||
718 Opc == AMDGPU::V_CVT_PK_F32_FP8_fake16_e64_gfx12 ||
719 Opc == AMDGPU::V_CVT_PK_F32_BF8_t16_e64_gfx12 ||
720 Opc == AMDGPU::V_CVT_PK_F32_FP8_t16_e64_gfx12;
721}
722
723bool isGenericAtomic(unsigned Opc) {
724 return Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP ||
725 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD ||
726 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB ||
727 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN ||
728 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN ||
729 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX ||
730 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX ||
731 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND ||
732 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR ||
733 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR ||
734 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC ||
735 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC ||
736 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD ||
737 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
738 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
739 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
740 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32 ||
741 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32 ||
742 Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
743}
744
745bool isAsyncStore(unsigned Opc) {
746 return Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_gfx1250 ||
747 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_gfx1250 ||
748 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_gfx1250 ||
749 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_gfx1250 ||
750 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_SADDR_gfx1250 ||
751 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_SADDR_gfx1250 ||
752 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_SADDR_gfx1250 ||
753 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_SADDR_gfx1250;
754}
755
756bool isTensorStore(unsigned Opc) {
757 return Opc == TENSOR_STORE_FROM_LDS_gfx1250 ||
758 Opc == TENSOR_STORE_FROM_LDS_D2_gfx1250;
759}
760
761unsigned getTemporalHintType(const MCInstrDesc TID) {
764 unsigned Opc = TID.getOpcode();
765 // Async and Tensor store should have the temporal hint type of TH_TYPE_STORE
766 if (TID.mayStore() &&
767 (isAsyncStore(Opc) || isTensorStore(Opc) || !TID.mayLoad()))
768 return CPol::TH_TYPE_STORE;
769
770 // This will default to returning TH_TYPE_LOAD when neither MayStore nor
771 // MayLoad flag is present which is the case with instructions like
772 // image_get_resinfo.
773 return CPol::TH_TYPE_LOAD;
774}
775
776bool isTrue16Inst(unsigned Opc) {
777 const VOPTrue16Info *Info = getTrue16OpcodeHelper(Opc);
778 return Info && Info->IsTrue16;
779}
780
782 const FP4FP8DstByteSelInfo *Info = getFP4FP8DstByteSelHelper(Opc);
783 if (!Info)
784 return FPType::None;
785 if (Info->HasFP8DstByteSel)
786 return FPType::FP8;
787 if (Info->HasFP4DstByteSel)
788 return FPType::FP4;
789
790 return FPType::None;
791}
792
793bool isDPMACCInstruction(unsigned Opc) {
794 const DPMACCInstructionInfo *Info = getDPMACCInstructionHelper(Opc);
795 return Info && Info->IsDPMACCInstruction;
796}
797
798unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
799 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
800 return Info ? Info->Opcode3Addr : ~0u;
801}
802
803unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc) {
804 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom3AddrOpcode(Opc);
805 return Info ? Info->Opcode2Addr : ~0u;
806}
807
808// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
809// header files, so we need to wrap it in a function that takes unsigned
810// instead.
811int getMCOpcode(uint16_t Opcode, unsigned Gen) {
812 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
813}
814
815unsigned getBitOp2(unsigned Opc) {
816 switch (Opc) {
817 default:
818 return 0;
819 case AMDGPU::V_AND_B32_e32:
820 return 0x40;
821 case AMDGPU::V_OR_B32_e32:
822 return 0x54;
823 case AMDGPU::V_XOR_B32_e32:
824 return 0x14;
825 case AMDGPU::V_XNOR_B32_e32:
826 return 0x41;
827 }
828}
829
830int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily,
831 bool VOPD3) {
832 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(OpY) : 0;
833 OpY = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : OpY;
834 const VOPDInfo *Info =
835 getVOPDInfoFromComponentOpcodes(OpX, OpY, EncodingFamily, VOPD3);
836 return Info ? Info->Opcode : -1;
837}
838
839std::pair<unsigned, unsigned> getVOPDComponents(unsigned VOPDOpcode) {
840 const VOPDInfo *Info = getVOPDOpcodeHelper(VOPDOpcode);
841 assert(Info);
842 const auto *OpX = getVOPDBaseFromComponent(Info->OpX);
843 const auto *OpY = getVOPDBaseFromComponent(Info->OpY);
844 assert(OpX && OpY);
845 return {OpX->BaseVOP, OpY->BaseVOP};
846}
847
848namespace VOPD {
849
850ComponentProps::ComponentProps(const MCInstrDesc &OpDesc, bool VOP3Layout) {
852
855 auto TiedIdx = OpDesc.getOperandConstraint(Component::SRC2, MCOI::TIED_TO);
856 assert(TiedIdx == -1 || TiedIdx == Component::DST);
857 HasSrc2Acc = TiedIdx != -1;
858 Opcode = OpDesc.getOpcode();
859
860 IsVOP3 = VOP3Layout || (OpDesc.TSFlags & SIInstrFlags::VOP3);
861 SrcOperandsNum = AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2) ? 3
862 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::imm) ? 3
863 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1) ? 2
864 : 1;
865 assert(SrcOperandsNum <= Component::MAX_SRC_NUM);
866
867 if (Opcode == AMDGPU::V_CNDMASK_B32_e32 ||
868 Opcode == AMDGPU::V_CNDMASK_B32_e64) {
869 // CNDMASK is an awkward exception, it has FP modifiers, but not FP
870 // operands.
871 NumVOPD3Mods = 2;
872 if (IsVOP3)
873 SrcOperandsNum = 3;
874 } else if (isSISrcFPOperand(OpDesc,
875 getNamedOperandIdx(Opcode, OpName::src0))) {
876 // All FP VOPD instructions have Neg modifiers for all operands except
877 // for tied src2.
878 NumVOPD3Mods = SrcOperandsNum;
879 if (HasSrc2Acc)
880 --NumVOPD3Mods;
881 }
882
883 if (OpDesc.TSFlags & SIInstrFlags::VOP3)
884 return;
885
886 auto OperandsNum = OpDesc.getNumOperands();
887 unsigned CompOprIdx;
888 for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
889 if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
890 MandatoryLiteralIdx = CompOprIdx;
891 break;
892 }
893 }
894}
895
897 return getNamedOperandIdx(Opcode, OpName::bitop3);
898}
899
900unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
901 assert(CompOprIdx < Component::MAX_OPR_NUM);
902
903 if (CompOprIdx == Component::DST)
905
906 auto CompSrcIdx = CompOprIdx - Component::DST_NUM;
907 if (CompSrcIdx < getCompParsedSrcOperandsNum())
908 return getIndexOfSrcInParsedOperands(CompSrcIdx);
909
910 // The specified operand does not exist.
911 return 0;
912}
913
915 std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
916 const MCRegisterInfo &MRI, bool SkipSrc, bool AllowSameVGPR,
917 bool VOPD3) const {
918
919 auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx,
920 CompInfo[ComponentIndex::X].isVOP3());
921 auto OpYRegs = getRegIndices(ComponentIndex::Y, GetRegIdx,
922 CompInfo[ComponentIndex::Y].isVOP3());
923
924 const auto banksOverlap = [&MRI](MCRegister X, MCRegister Y,
925 unsigned BanksMask) -> bool {
926 MCRegister BaseX = MRI.getSubReg(X, AMDGPU::sub0);
927 MCRegister BaseY = MRI.getSubReg(Y, AMDGPU::sub0);
928 if (!BaseX)
929 BaseX = X;
930 if (!BaseY)
931 BaseY = Y;
932 if ((BaseX.id() & BanksMask) == (BaseY.id() & BanksMask))
933 return true;
934 if (BaseX != X /* This is 64-bit register */ &&
935 ((BaseX.id() + 1) & BanksMask) == (BaseY.id() & BanksMask))
936 return true;
937 if (BaseY != Y &&
938 (BaseX.id() & BanksMask) == ((BaseY.id() + 1) & BanksMask))
939 return true;
940
941 // If both are 64-bit bank conflict will be detected yet while checking
942 // the first subreg.
943 return false;
944 };
945
946 unsigned CompOprIdx;
947 for (CompOprIdx = 0; CompOprIdx < Component::MAX_OPR_NUM; ++CompOprIdx) {
948 unsigned BanksMasks = VOPD3 ? VOPD3_VGPR_BANK_MASKS[CompOprIdx]
949 : VOPD_VGPR_BANK_MASKS[CompOprIdx];
950 if (!OpXRegs[CompOprIdx] || !OpYRegs[CompOprIdx])
951 continue;
952
953 if (getVGPREncodingMSBs(OpXRegs[CompOprIdx], MRI) !=
954 getVGPREncodingMSBs(OpYRegs[CompOprIdx], MRI))
955 return CompOprIdx;
956
957 if (SkipSrc && CompOprIdx >= Component::DST_NUM)
958 continue;
959
960 if (CompOprIdx < Component::DST_NUM) {
961 // Even if we do not check vdst parity, vdst operands still shall not
962 // overlap.
963 if (MRI.regsOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx]))
964 return CompOprIdx;
965 if (VOPD3) // No need to check dst parity.
966 continue;
967 }
968
969 if (banksOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx], BanksMasks) &&
970 (!AllowSameVGPR || CompOprIdx < Component::DST_NUM ||
971 OpXRegs[CompOprIdx] != OpYRegs[CompOprIdx]))
972 return CompOprIdx;
973 }
974
975 return {};
976}
977
978// Return an array of VGPR registers [DST,SRC0,SRC1,SRC2] used
979// by the specified component. If an operand is unused
980// or is not a VGPR, the corresponding value is 0.
981//
982// GetRegIdx(Component, MCOperandIdx) must return a VGPR register index
983// for the specified component and MC operand. The callback must return 0
984// if the operand is not a register or not a VGPR.
986InstInfo::getRegIndices(unsigned CompIdx,
987 std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
988 bool VOPD3) const {
989 assert(CompIdx < COMPONENTS_NUM);
990
991 const auto &Comp = CompInfo[CompIdx];
993
994 RegIndices[DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
995
996 for (unsigned CompOprIdx : {SRC0, SRC1, SRC2}) {
997 unsigned CompSrcIdx = CompOprIdx - DST_NUM;
998 RegIndices[CompOprIdx] =
999 Comp.hasRegSrcOperand(CompSrcIdx)
1000 ? GetRegIdx(CompIdx,
1001 Comp.getIndexOfSrcInMCOperands(CompSrcIdx, VOPD3))
1002 : MCRegister();
1003 }
1004 return RegIndices;
1005}
1006
1007} // namespace VOPD
1008
1010 return VOPD::InstInfo(OpX, OpY);
1011}
1012
1014 const MCInstrInfo *InstrInfo) {
1015 auto [OpX, OpY] = getVOPDComponents(VOPDOpcode);
1016 const auto &OpXDesc = InstrInfo->get(OpX);
1017 const auto &OpYDesc = InstrInfo->get(OpY);
1018 bool VOPD3 = InstrInfo->get(VOPDOpcode).TSFlags & SIInstrFlags::VOPD3;
1020 VOPD::ComponentInfo OpYInfo(OpYDesc, OpXInfo, VOPD3);
1021 return VOPD::InstInfo(OpXInfo, OpYInfo);
1022}
1023
1024namespace IsaInfo {
1025
1027 : STI(STI), XnackSetting(TargetIDSetting::Any),
1028 SramEccSetting(TargetIDSetting::Any) {
1029 if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
1030 XnackSetting = TargetIDSetting::Unsupported;
1031 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
1032 SramEccSetting = TargetIDSetting::Unsupported;
1033}
1034
1036 // Check if xnack or sramecc is explicitly enabled or disabled. In the
1037 // absence of the target features we assume we must generate code that can run
1038 // in any environment.
1039 SubtargetFeatures Features(FS);
1040 std::optional<bool> XnackRequested;
1041 std::optional<bool> SramEccRequested;
1042
1043 for (const std::string &Feature : Features.getFeatures()) {
1044 if (Feature == "+xnack")
1045 XnackRequested = true;
1046 else if (Feature == "-xnack")
1047 XnackRequested = false;
1048 else if (Feature == "+sramecc")
1049 SramEccRequested = true;
1050 else if (Feature == "-sramecc")
1051 SramEccRequested = false;
1052 }
1053
1054 bool XnackSupported = isXnackSupported();
1055 bool SramEccSupported = isSramEccSupported();
1056
1057 if (XnackRequested) {
1058 if (XnackSupported) {
1059 XnackSetting =
1060 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1061 } else {
1062 // If a specific xnack setting was requested and this GPU does not support
1063 // xnack emit a warning. Setting will remain set to "Unsupported".
1064 if (*XnackRequested) {
1065 errs() << "warning: xnack 'On' was requested for a processor that does "
1066 "not support it!\n";
1067 } else {
1068 errs() << "warning: xnack 'Off' was requested for a processor that "
1069 "does not support it!\n";
1070 }
1071 }
1072 }
1073
1074 if (SramEccRequested) {
1075 if (SramEccSupported) {
1076 SramEccSetting =
1077 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1078 } else {
1079 // If a specific sramecc setting was requested and this GPU does not
1080 // support sramecc emit a warning. Setting will remain set to
1081 // "Unsupported".
1082 if (*SramEccRequested) {
1083 errs() << "warning: sramecc 'On' was requested for a processor that "
1084 "does not support it!\n";
1085 } else {
1086 errs() << "warning: sramecc 'Off' was requested for a processor that "
1087 "does not support it!\n";
1088 }
1089 }
1090 }
1091}
1092
1093static TargetIDSetting
1095 if (FeatureString.ends_with("-"))
1096 return TargetIDSetting::Off;
1097 if (FeatureString.ends_with("+"))
1098 return TargetIDSetting::On;
1099
1100 llvm_unreachable("Malformed feature string");
1101}
1102
1104 SmallVector<StringRef, 3> TargetIDSplit;
1105 TargetID.split(TargetIDSplit, ':');
1106
1107 for (const auto &FeatureString : TargetIDSplit) {
1108 if (FeatureString.starts_with("xnack"))
1109 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
1110 if (FeatureString.starts_with("sramecc"))
1111 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
1112 }
1113}
1114
1115std::string AMDGPUTargetID::toString() const {
1116 std::string StringRep;
1117 raw_string_ostream StreamRep(StringRep);
1118
1119 auto TargetTriple = STI.getTargetTriple();
1120 auto Version = getIsaVersion(STI.getCPU());
1121
1122 StreamRep << TargetTriple.getArchName() << '-' << TargetTriple.getVendorName()
1123 << '-' << TargetTriple.getOSName() << '-'
1124 << TargetTriple.getEnvironmentName() << '-';
1125
1126 std::string Processor;
1127 // TODO: Following else statement is present here because we used various
1128 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
1129 // Remove once all aliases are removed from GCNProcessors.td.
1130 if (Version.Major >= 9)
1131 Processor = STI.getCPU().str();
1132 else
1133 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
1134 Twine(Version.Stepping))
1135 .str();
1136
1137 std::string Features;
1138 if (STI.getTargetTriple().getOS() == Triple::AMDHSA) {
1139 // sramecc.
1141 Features += ":sramecc-";
1143 Features += ":sramecc+";
1144 // xnack.
1146 Features += ":xnack-";
1148 Features += ":xnack+";
1149 }
1150
1151 StreamRep << Processor << Features;
1152
1153 return StringRep;
1154}
1155
1156unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
1157 if (STI->getFeatureBits().test(FeatureWavefrontSize16))
1158 return 16;
1159 if (STI->getFeatureBits().test(FeatureWavefrontSize32))
1160 return 32;
1161
1162 return 64;
1163}
1164
1166 unsigned BytesPerCU = getAddressableLocalMemorySize(STI);
1167
1168 // "Per CU" really means "per whatever functional block the waves of a
1169 // workgroup must share". So the effective local memory size is doubled in
1170 // WGP mode on gfx10.
1171 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1172 BytesPerCU *= 2;
1173
1174 return BytesPerCU;
1175}
1176
1178 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
1179 return 32768;
1180 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
1181 return 65536;
1182 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
1183 return 163840;
1184 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
1185 return 327680;
1186 return 32768;
1187}
1188
1189unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
1190 // "Per CU" really means "per whatever functional block the waves of a
1191 // workgroup must share".
1192
1193 // GFX12.5 only supports CU mode, which contains four SIMDs.
1194 if (isGFX1250(*STI)) {
1195 assert(STI->getFeatureBits().test(FeatureCuMode));
1196 return 4;
1197 }
1198
1199 // For gfx10 in CU mode the functional block is the CU, which contains
1200 // two SIMDs.
1201 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
1202 return 2;
1203
1204 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP
1205 // contains two CUs, so a total of four SIMDs.
1206 return 4;
1207}
1208
1210 unsigned FlatWorkGroupSize) {
1211 assert(FlatWorkGroupSize != 0);
1212 if (!STI->getTargetTriple().isAMDGCN())
1213 return 8;
1214 unsigned MaxWaves = getMaxWavesPerEU(STI) * getEUsPerCU(STI);
1215 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
1216 if (N == 1) {
1217 // Single-wave workgroups don't consume barrier resources.
1218 return MaxWaves;
1219 }
1220
1221 unsigned MaxBarriers = 16;
1222 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1223 MaxBarriers = 32;
1224
1225 return std::min(MaxWaves / N, MaxBarriers);
1226}
1227
1228unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { return 1; }
1229
1230unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
1231 // FIXME: Need to take scratch memory into account.
1232 if (isGFX90A(*STI))
1233 return 8;
1234 if (!isGFX10Plus(*STI))
1235 return 10;
1236 return hasGFX10_3Insts(*STI) ? 16 : 20;
1237}
1238
1240 unsigned FlatWorkGroupSize) {
1241 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
1242 getEUsPerCU(STI));
1243}
1244
1245unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { return 1; }
1246
1248 // Some subtargets allow encoding 2048, but this isn't tested or supported.
1249 return 1024;
1250}
1251
1253 unsigned FlatWorkGroupSize) {
1254 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
1255}
1256
1259 if (Version.Major >= 10)
1260 return getAddressableNumSGPRs(STI);
1261 if (Version.Major >= 8)
1262 return 16;
1263 return 8;
1264}
1265
1266unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { return 8; }
1267
1268unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
1270 if (Version.Major >= 8)
1271 return 800;
1272 return 512;
1273}
1274
1276 if (STI->getFeatureBits().test(FeatureSGPRInitBug))
1278
1280 if (Version.Major >= 10)
1281 return 106;
1282 if (Version.Major >= 8)
1283 return 102;
1284 return 104;
1285}
1286
1287unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
1288 assert(WavesPerEU != 0);
1289
1291 if (Version.Major >= 10)
1292 return 0;
1293
1294 if (WavesPerEU >= getMaxWavesPerEU(STI))
1295 return 0;
1296
1297 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
1298 if (STI->getFeatureBits().test(FeatureTrapHandler))
1299 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1300 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
1301 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
1302}
1303
1304unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1305 bool Addressable) {
1306 assert(WavesPerEU != 0);
1307
1308 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
1310 if (Version.Major >= 10)
1311 return Addressable ? AddressableNumSGPRs : 108;
1312 if (Version.Major >= 8 && !Addressable)
1313 AddressableNumSGPRs = 112;
1314 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
1315 if (STI->getFeatureBits().test(FeatureTrapHandler))
1316 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1317 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
1318 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1319}
1320
1321unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1322 bool FlatScrUsed, bool XNACKUsed) {
1323 unsigned ExtraSGPRs = 0;
1324 if (VCCUsed)
1325 ExtraSGPRs = 2;
1326
1328 if (Version.Major >= 10)
1329 return ExtraSGPRs;
1330
1331 if (Version.Major < 8) {
1332 if (FlatScrUsed)
1333 ExtraSGPRs = 4;
1334 } else {
1335 if (XNACKUsed)
1336 ExtraSGPRs = 4;
1337
1338 if (FlatScrUsed ||
1339 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
1340 ExtraSGPRs = 6;
1341 }
1342
1343 return ExtraSGPRs;
1344}
1345
1346unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1347 bool FlatScrUsed) {
1348 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
1349 STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
1350}
1351
1352static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs,
1353 unsigned Granule) {
1354 return divideCeil(std::max(1u, NumRegs), Granule);
1355}
1356
1357unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
1358 // SGPRBlocks is actual number of SGPR blocks minus 1.
1360 1;
1361}
1362
1364 unsigned DynamicVGPRBlockSize,
1365 std::optional<bool> EnableWavefrontSize32) {
1366 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1367 return 8;
1368
1369 if (DynamicVGPRBlockSize != 0)
1370 return DynamicVGPRBlockSize;
1371
1372 bool IsWave32 = EnableWavefrontSize32
1373 ? *EnableWavefrontSize32
1374 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1375
1376 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1377 return IsWave32 ? 24 : 12;
1378
1379 if (hasGFX10_3Insts(*STI))
1380 return IsWave32 ? 16 : 8;
1381
1382 return IsWave32 ? 8 : 4;
1383}
1384
1386 std::optional<bool> EnableWavefrontSize32) {
1387 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1388 return 8;
1389
1390 bool IsWave32 = EnableWavefrontSize32
1391 ? *EnableWavefrontSize32
1392 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1393
1394 if (STI->getFeatureBits().test(Feature1024AddressableVGPRs))
1395 return IsWave32 ? 16 : 8;
1396
1397 return IsWave32 ? 8 : 4;
1398}
1399
1400unsigned getArchVGPRAllocGranule() { return 4; }
1401
1402unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
1403 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1404 return 512;
1405 if (!isGFX10Plus(*STI))
1406 return 256;
1407 bool IsWave32 = STI->getFeatureBits().test(FeatureWavefrontSize32);
1408 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1409 return IsWave32 ? 1536 : 768;
1410 return IsWave32 ? 1024 : 512;
1411}
1412
1414 const auto &Features = STI->getFeatureBits();
1415 if (Features.test(Feature1024AddressableVGPRs))
1416 return Features.test(FeatureWavefrontSize32) ? 1024 : 512;
1417 return 256;
1418}
1419
1421 unsigned DynamicVGPRBlockSize) {
1422 const auto &Features = STI->getFeatureBits();
1423 if (Features.test(FeatureGFX90AInsts))
1424 return 512;
1425
1426 if (DynamicVGPRBlockSize != 0)
1427 // On GFX12 we can allocate at most 8 blocks of VGPRs.
1428 return 8 * getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1429 return getAddressableNumArchVGPRs(STI);
1430}
1431
1433 unsigned NumVGPRs,
1434 unsigned DynamicVGPRBlockSize) {
1436 NumVGPRs, getVGPRAllocGranule(STI, DynamicVGPRBlockSize),
1438}
1439
1440unsigned getNumWavesPerEUWithNumVGPRs(unsigned NumVGPRs, unsigned Granule,
1441 unsigned MaxWaves,
1442 unsigned TotalNumVGPRs) {
1443 if (NumVGPRs < Granule)
1444 return MaxWaves;
1445 unsigned RoundedRegs = alignTo(NumVGPRs, Granule);
1446 return std::min(std::max(TotalNumVGPRs / RoundedRegs, 1u), MaxWaves);
1447}
1448
1449unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves,
1451 if (Gen >= AMDGPUSubtarget::GFX10)
1452 return MaxWaves;
1453
1455 if (SGPRs <= 80)
1456 return 10;
1457 if (SGPRs <= 88)
1458 return 9;
1459 if (SGPRs <= 100)
1460 return 8;
1461 return 7;
1462 }
1463 if (SGPRs <= 48)
1464 return 10;
1465 if (SGPRs <= 56)
1466 return 9;
1467 if (SGPRs <= 64)
1468 return 8;
1469 if (SGPRs <= 72)
1470 return 7;
1471 if (SGPRs <= 80)
1472 return 6;
1473 return 5;
1474}
1475
1476unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1477 unsigned DynamicVGPRBlockSize) {
1478 assert(WavesPerEU != 0);
1479
1480 unsigned MaxWavesPerEU = getMaxWavesPerEU(STI);
1481 if (WavesPerEU >= MaxWavesPerEU)
1482 return 0;
1483
1484 unsigned TotNumVGPRs = getTotalNumVGPRs(STI);
1485 unsigned AddrsableNumVGPRs =
1486 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1487 unsigned Granule = getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1488 unsigned MaxNumVGPRs = alignDown(TotNumVGPRs / WavesPerEU, Granule);
1489
1490 if (MaxNumVGPRs == alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1491 return 0;
1492
1493 unsigned MinWavesPerEU = getNumWavesPerEUWithNumVGPRs(STI, AddrsableNumVGPRs,
1494 DynamicVGPRBlockSize);
1495 if (WavesPerEU < MinWavesPerEU)
1496 return getMinNumVGPRs(STI, MinWavesPerEU, DynamicVGPRBlockSize);
1497
1498 unsigned MaxNumVGPRsNext = alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1499 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1500 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1501}
1502
1503unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1504 unsigned DynamicVGPRBlockSize) {
1505 assert(WavesPerEU != 0);
1506
1507 unsigned MaxNumVGPRs =
1508 alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
1509 getVGPRAllocGranule(STI, DynamicVGPRBlockSize));
1510 unsigned AddressableNumVGPRs =
1511 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1512 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1513}
1514
1515unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
1516 std::optional<bool> EnableWavefrontSize32) {
1518 NumVGPRs, getVGPREncodingGranule(STI, EnableWavefrontSize32)) -
1519 1;
1520}
1521
1523 unsigned NumVGPRs,
1524 unsigned DynamicVGPRBlockSize,
1525 std::optional<bool> EnableWavefrontSize32) {
1527 NumVGPRs,
1528 getVGPRAllocGranule(STI, DynamicVGPRBlockSize, EnableWavefrontSize32));
1529}
1530} // end namespace IsaInfo
1531
1533 const MCSubtargetInfo *STI) {
1535 KernelCode.amd_kernel_code_version_major = 1;
1536 KernelCode.amd_kernel_code_version_minor = 2;
1537 KernelCode.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
1538 KernelCode.amd_machine_version_major = Version.Major;
1539 KernelCode.amd_machine_version_minor = Version.Minor;
1540 KernelCode.amd_machine_version_stepping = Version.Stepping;
1542 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
1543 KernelCode.wavefront_size = 5;
1545 } else {
1546 KernelCode.wavefront_size = 6;
1547 }
1548
1549 // If the code object does not support indirect functions, then the value must
1550 // be 0xffffffff.
1551 KernelCode.call_convention = -1;
1552
1553 // These alignment values are specified in powers of two, so alignment =
1554 // 2^n. The minimum alignment is 2^4 = 16.
1555 KernelCode.kernarg_segment_alignment = 4;
1556 KernelCode.group_segment_alignment = 4;
1557 KernelCode.private_segment_alignment = 4;
1558
1559 if (Version.Major >= 10) {
1560 KernelCode.compute_pgm_resource_registers |=
1561 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
1563 }
1564}
1565
1568}
1569
1572}
1573
1575 unsigned AS = GV->getAddressSpace();
1576 return AS == AMDGPUAS::CONSTANT_ADDRESS ||
1578}
1579
1581 return TT.getArch() == Triple::r600;
1582}
1583
1584static bool isValidRegPrefix(char C) {
1585 return C == 'v' || C == 's' || C == 'a';
1586}
1587
1588std::tuple<char, unsigned, unsigned> parseAsmPhysRegName(StringRef RegName) {
1589 char Kind = RegName.front();
1590 if (!isValidRegPrefix(Kind))
1591 return {};
1592
1593 RegName = RegName.drop_front();
1594 if (RegName.consume_front("[")) {
1595 unsigned Idx, End;
1596 bool Failed = RegName.consumeInteger(10, Idx);
1597 Failed |= !RegName.consume_front(":");
1598 Failed |= RegName.consumeInteger(10, End);
1599 Failed |= !RegName.consume_back("]");
1600 if (!Failed) {
1601 unsigned NumRegs = End - Idx + 1;
1602 if (NumRegs > 1)
1603 return {Kind, Idx, NumRegs};
1604 }
1605 } else {
1606 unsigned Idx;
1607 bool Failed = RegName.getAsInteger(10, Idx);
1608 if (!Failed)
1609 return {Kind, Idx, 1};
1610 }
1611
1612 return {};
1613}
1614
1615std::tuple<char, unsigned, unsigned>
1617 StringRef RegName = Constraint;
1618 if (!RegName.consume_front("{") || !RegName.consume_back("}"))
1619 return {};
1621}
1622
1623std::pair<unsigned, unsigned>
1625 std::pair<unsigned, unsigned> Default,
1626 bool OnlyFirstRequired) {
1627 if (auto Attr = getIntegerPairAttribute(F, Name, OnlyFirstRequired))
1628 return {Attr->first, Attr->second.value_or(Default.second)};
1629 return Default;
1630}
1631
1632std::optional<std::pair<unsigned, std::optional<unsigned>>>
1634 bool OnlyFirstRequired) {
1635 Attribute A = F.getFnAttribute(Name);
1636 if (!A.isStringAttribute())
1637 return std::nullopt;
1638
1639 LLVMContext &Ctx = F.getContext();
1640 std::pair<unsigned, std::optional<unsigned>> Ints;
1641 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
1642 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1643 Ctx.emitError("can't parse first integer attribute " + Name);
1644 return std::nullopt;
1645 }
1646 unsigned Second = 0;
1647 if (Strs.second.trim().getAsInteger(0, Second)) {
1648 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1649 Ctx.emitError("can't parse second integer attribute " + Name);
1650 return std::nullopt;
1651 }
1652 } else {
1653 Ints.second = Second;
1654 }
1655
1656 return Ints;
1657}
1658
1660 unsigned Size,
1661 unsigned DefaultVal) {
1662 std::optional<SmallVector<unsigned>> R =
1664 return R.has_value() ? *R : SmallVector<unsigned>(Size, DefaultVal);
1665}
1666
1667std::optional<SmallVector<unsigned>>
1669 assert(Size > 2);
1670 LLVMContext &Ctx = F.getContext();
1671
1672 Attribute A = F.getFnAttribute(Name);
1673 if (!A.isValid())
1674 return std::nullopt;
1675 if (!A.isStringAttribute()) {
1676 Ctx.emitError(Name + " is not a string attribute");
1677 return std::nullopt;
1678 }
1679
1681
1682 StringRef S = A.getValueAsString();
1683 unsigned i = 0;
1684 for (; !S.empty() && i < Size; i++) {
1685 std::pair<StringRef, StringRef> Strs = S.split(',');
1686 unsigned IntVal;
1687 if (Strs.first.trim().getAsInteger(0, IntVal)) {
1688 Ctx.emitError("can't parse integer attribute " + Strs.first + " in " +
1689 Name);
1690 return std::nullopt;
1691 }
1692 Vals[i] = IntVal;
1693 S = Strs.second;
1694 }
1695
1696 if (!S.empty() || i < Size) {
1697 Ctx.emitError("attribute " + Name +
1698 " has incorrect number of integers; expected " +
1700 return std::nullopt;
1701 }
1702 return Vals;
1703}
1704
1705bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val) {
1706 assert((MD.getNumOperands() % 2 == 0) && "invalid number of operands!");
1707 for (unsigned I = 0, E = MD.getNumOperands() / 2; I != E; ++I) {
1708 auto Low =
1709 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 0))->getValue();
1710 auto High =
1711 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 1))->getValue();
1712 // There are two types of [A; B) ranges:
1713 // A < B, e.g. [4; 5) which is a range that only includes 4.
1714 // A > B, e.g. [5; 4) which is a range that wraps around and includes
1715 // everything except 4.
1716 if (Low.ult(High)) {
1717 if (Low.ule(Val) && High.ugt(Val))
1718 return true;
1719 } else {
1720 if (Low.uge(Val) && High.ult(Val))
1721 return true;
1722 }
1723 }
1724
1725 return false;
1726}
1727
1729 ListSeparator LS;
1730 if (Wait.LoadCnt != ~0u)
1731 OS << LS << "LoadCnt: " << Wait.LoadCnt;
1732 if (Wait.ExpCnt != ~0u)
1733 OS << LS << "ExpCnt: " << Wait.ExpCnt;
1734 if (Wait.DsCnt != ~0u)
1735 OS << LS << "DsCnt: " << Wait.DsCnt;
1736 if (Wait.StoreCnt != ~0u)
1737 OS << LS << "StoreCnt: " << Wait.StoreCnt;
1738 if (Wait.SampleCnt != ~0u)
1739 OS << LS << "SampleCnt: " << Wait.SampleCnt;
1740 if (Wait.BvhCnt != ~0u)
1741 OS << LS << "BvhCnt: " << Wait.BvhCnt;
1742 if (Wait.KmCnt != ~0u)
1743 OS << LS << "KmCnt: " << Wait.KmCnt;
1744 if (Wait.XCnt != ~0u)
1745 OS << LS << "XCnt: " << Wait.XCnt;
1746 if (LS.unused())
1747 OS << "none";
1748 OS << '\n';
1749 return OS;
1750}
1751
1753 return (1 << (getVmcntBitWidthLo(Version.Major) +
1754 getVmcntBitWidthHi(Version.Major))) -
1755 1;
1756}
1757
1759 return (1 << getLoadcntBitWidth(Version.Major)) - 1;
1760}
1761
1763 return (1 << getSamplecntBitWidth(Version.Major)) - 1;
1764}
1765
1767 return (1 << getBvhcntBitWidth(Version.Major)) - 1;
1768}
1769
1771 return (1 << getExpcntBitWidth(Version.Major)) - 1;
1772}
1773
1775 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1776}
1777
1779 return (1 << getDscntBitWidth(Version.Major)) - 1;
1780}
1781
1783 return (1 << getKmcntBitWidth(Version.Major)) - 1;
1784}
1785
1787 return (1 << getXcntBitWidth(Version.Major, Version.Minor)) - 1;
1788}
1789
1791 return (1 << getStorecntBitWidth(Version.Major)) - 1;
1792}
1793
1795 bool HasExtendedWaitCounts = IV.Major >= 12;
1796 if (HasExtendedWaitCounts) {
1799 } else {
1802 }
1811}
1812
1814 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1815 getVmcntBitWidthLo(Version.Major));
1816 unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1817 getExpcntBitWidth(Version.Major));
1818 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1819 getLgkmcntBitWidth(Version.Major));
1820 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1821 getVmcntBitWidthHi(Version.Major));
1822 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1823}
1824
1825unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1826 unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1827 getVmcntBitWidthLo(Version.Major));
1828 unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1829 getVmcntBitWidthHi(Version.Major));
1830 return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1831}
1832
1833unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1834 return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1835 getExpcntBitWidth(Version.Major));
1836}
1837
1838unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1839 return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1840 getLgkmcntBitWidth(Version.Major));
1841}
1842
1843void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt,
1844 unsigned &Expcnt, unsigned &Lgkmcnt) {
1845 Vmcnt = decodeVmcnt(Version, Waitcnt);
1846 Expcnt = decodeExpcnt(Version, Waitcnt);
1847 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1848}
1849
1850Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1851 Waitcnt Decoded;
1852 Decoded.LoadCnt = decodeVmcnt(Version, Encoded);
1853 Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1854 Decoded.DsCnt = decodeLgkmcnt(Version, Encoded);
1855 return Decoded;
1856}
1857
1858unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1859 unsigned Vmcnt) {
1860 Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1861 getVmcntBitWidthLo(Version.Major));
1862 return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1863 getVmcntBitShiftHi(Version.Major),
1864 getVmcntBitWidthHi(Version.Major));
1865}
1866
1867unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1868 unsigned Expcnt) {
1869 return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1870 getExpcntBitWidth(Version.Major));
1871}
1872
1873unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1874 unsigned Lgkmcnt) {
1875 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1876 getLgkmcntBitWidth(Version.Major));
1877}
1878
1879unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt,
1880 unsigned Expcnt, unsigned Lgkmcnt) {
1881 unsigned Waitcnt = getWaitcntBitMask(Version);
1883 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1884 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1885 return Waitcnt;
1886}
1887
1888unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1889 return encodeWaitcnt(Version, Decoded.LoadCnt, Decoded.ExpCnt, Decoded.DsCnt);
1890}
1891
1893 bool IsStore) {
1894 unsigned Dscnt = getBitMask(getDscntBitShift(Version.Major),
1895 getDscntBitWidth(Version.Major));
1896 if (IsStore) {
1897 unsigned Storecnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1898 getStorecntBitWidth(Version.Major));
1899 return Dscnt | Storecnt;
1900 }
1901 unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1902 getLoadcntBitWidth(Version.Major));
1903 return Dscnt | Loadcnt;
1904}
1905
1906Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt) {
1907 Waitcnt Decoded;
1908 Decoded.LoadCnt =
1909 unpackBits(LoadcntDscnt, getLoadcntStorecntBitShift(Version.Major),
1910 getLoadcntBitWidth(Version.Major));
1911 Decoded.DsCnt = unpackBits(LoadcntDscnt, getDscntBitShift(Version.Major),
1912 getDscntBitWidth(Version.Major));
1913 return Decoded;
1914}
1915
1916Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt) {
1917 Waitcnt Decoded;
1918 Decoded.StoreCnt =
1919 unpackBits(StorecntDscnt, getLoadcntStorecntBitShift(Version.Major),
1920 getStorecntBitWidth(Version.Major));
1921 Decoded.DsCnt = unpackBits(StorecntDscnt, getDscntBitShift(Version.Major),
1922 getDscntBitWidth(Version.Major));
1923 return Decoded;
1924}
1925
1926static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt,
1927 unsigned Loadcnt) {
1928 return packBits(Loadcnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1929 getLoadcntBitWidth(Version.Major));
1930}
1931
1932static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt,
1933 unsigned Storecnt) {
1934 return packBits(Storecnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1935 getStorecntBitWidth(Version.Major));
1936}
1937
1938static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt,
1939 unsigned Dscnt) {
1940 return packBits(Dscnt, Waitcnt, getDscntBitShift(Version.Major),
1941 getDscntBitWidth(Version.Major));
1942}
1943
1944static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt,
1945 unsigned Dscnt) {
1946 unsigned Waitcnt = getCombinedCountBitMask(Version, false);
1947 Waitcnt = encodeLoadcnt(Version, Waitcnt, Loadcnt);
1949 return Waitcnt;
1950}
1951
1952unsigned encodeLoadcntDscnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1953 return encodeLoadcntDscnt(Version, Decoded.LoadCnt, Decoded.DsCnt);
1954}
1955
1957 unsigned Storecnt, unsigned Dscnt) {
1958 unsigned Waitcnt = getCombinedCountBitMask(Version, true);
1959 Waitcnt = encodeStorecnt(Version, Waitcnt, Storecnt);
1961 return Waitcnt;
1962}
1963
1965 const Waitcnt &Decoded) {
1966 return encodeStorecntDscnt(Version, Decoded.StoreCnt, Decoded.DsCnt);
1967}
1968
1969//===----------------------------------------------------------------------===//
1970// Custom Operand Values
1971//===----------------------------------------------------------------------===//
1972
1974 int Size,
1975 const MCSubtargetInfo &STI) {
1976 unsigned Enc = 0;
1977 for (int Idx = 0; Idx < Size; ++Idx) {
1978 const auto &Op = Opr[Idx];
1979 if (Op.isSupported(STI))
1980 Enc |= Op.encode(Op.Default);
1981 }
1982 return Enc;
1983}
1984
1986 int Size, unsigned Code,
1987 bool &HasNonDefaultVal,
1988 const MCSubtargetInfo &STI) {
1989 unsigned UsedOprMask = 0;
1990 HasNonDefaultVal = false;
1991 for (int Idx = 0; Idx < Size; ++Idx) {
1992 const auto &Op = Opr[Idx];
1993 if (!Op.isSupported(STI))
1994 continue;
1995 UsedOprMask |= Op.getMask();
1996 unsigned Val = Op.decode(Code);
1997 if (!Op.isValid(Val))
1998 return false;
1999 HasNonDefaultVal |= (Val != Op.Default);
2000 }
2001 return (Code & ~UsedOprMask) == 0;
2002}
2003
2004static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
2005 unsigned Code, int &Idx, StringRef &Name,
2006 unsigned &Val, bool &IsDefault,
2007 const MCSubtargetInfo &STI) {
2008 while (Idx < Size) {
2009 const auto &Op = Opr[Idx++];
2010 if (Op.isSupported(STI)) {
2011 Name = Op.Name;
2012 Val = Op.decode(Code);
2013 IsDefault = (Val == Op.Default);
2014 return true;
2015 }
2016 }
2017
2018 return false;
2019}
2020
2022 int64_t InputVal) {
2023 if (InputVal < 0 || InputVal > Op.Max)
2024 return OPR_VAL_INVALID;
2025 return Op.encode(InputVal);
2026}
2027
2028static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
2029 const StringRef Name, int64_t InputVal,
2030 unsigned &UsedOprMask,
2031 const MCSubtargetInfo &STI) {
2032 int InvalidId = OPR_ID_UNKNOWN;
2033 for (int Idx = 0; Idx < Size; ++Idx) {
2034 const auto &Op = Opr[Idx];
2035 if (Op.Name == Name) {
2036 if (!Op.isSupported(STI)) {
2037 InvalidId = OPR_ID_UNSUPPORTED;
2038 continue;
2039 }
2040 auto OprMask = Op.getMask();
2041 if (OprMask & UsedOprMask)
2042 return OPR_ID_DUPLICATE;
2043 UsedOprMask |= OprMask;
2044 return encodeCustomOperandVal(Op, InputVal);
2045 }
2046 }
2047 return InvalidId;
2048}
2049
2050//===----------------------------------------------------------------------===//
2051// DepCtr
2052//===----------------------------------------------------------------------===//
2053
2054namespace DepCtr {
2055
2057 static int Default = -1;
2058 if (Default == -1)
2060 return Default;
2061}
2062
2063bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
2064 const MCSubtargetInfo &STI) {
2066 HasNonDefaultVal, STI);
2067}
2068
2069bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
2070 bool &IsDefault, const MCSubtargetInfo &STI) {
2071 return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
2072 IsDefault, STI);
2073}
2074
2075int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
2076 const MCSubtargetInfo &STI) {
2077 return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
2078 STI);
2079}
2080
2081unsigned getVaVdstBitMask() { return (1 << getVaVdstBitWidth()) - 1; }
2082
2083unsigned getVaSdstBitMask() { return (1 << getVaSdstBitWidth()) - 1; }
2084
2085unsigned getVaSsrcBitMask() { return (1 << getVaSsrcBitWidth()) - 1; }
2086
2088 return (1 << getHoldCntWidth(Version.Major, Version.Minor)) - 1;
2089}
2090
2091unsigned getVmVsrcBitMask() { return (1 << getVmVsrcBitWidth()) - 1; }
2092
2093unsigned getVaVccBitMask() { return (1 << getVaVccBitWidth()) - 1; }
2094
2095unsigned getSaSdstBitMask() { return (1 << getSaSdstBitWidth()) - 1; }
2096
2097unsigned decodeFieldVmVsrc(unsigned Encoded) {
2098 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2099}
2100
2101unsigned decodeFieldVaVdst(unsigned Encoded) {
2102 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2103}
2104
2105unsigned decodeFieldSaSdst(unsigned Encoded) {
2106 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2107}
2108
2109unsigned decodeFieldVaSdst(unsigned Encoded) {
2110 return unpackBits(Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2111}
2112
2113unsigned decodeFieldVaVcc(unsigned Encoded) {
2114 return unpackBits(Encoded, getVaVccBitShift(), getVaVccBitWidth());
2115}
2116
2117unsigned decodeFieldVaSsrc(unsigned Encoded) {
2118 return unpackBits(Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2119}
2120
2121unsigned decodeFieldHoldCnt(unsigned Encoded, const IsaVersion &Version) {
2122 return unpackBits(Encoded, getHoldCntBitShift(),
2123 getHoldCntWidth(Version.Major, Version.Minor));
2124}
2125
2126unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc) {
2127 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2128}
2129
2130unsigned encodeFieldVmVsrc(unsigned VmVsrc, const MCSubtargetInfo &STI) {
2131 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2132 return encodeFieldVmVsrc(Encoded, VmVsrc);
2133}
2134
2135unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst) {
2136 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2137}
2138
2139unsigned encodeFieldVaVdst(unsigned VaVdst, const MCSubtargetInfo &STI) {
2140 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2141 return encodeFieldVaVdst(Encoded, VaVdst);
2142}
2143
2144unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst) {
2145 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2146}
2147
2148unsigned encodeFieldSaSdst(unsigned SaSdst, const MCSubtargetInfo &STI) {
2149 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2150 return encodeFieldSaSdst(Encoded, SaSdst);
2151}
2152
2153unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst) {
2154 return packBits(VaSdst, Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2155}
2156
2157unsigned encodeFieldVaSdst(unsigned VaSdst, const MCSubtargetInfo &STI) {
2158 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2159 return encodeFieldVaSdst(Encoded, VaSdst);
2160}
2161
2162unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc) {
2163 return packBits(VaVcc, Encoded, getVaVccBitShift(), getVaVccBitWidth());
2164}
2165
2166unsigned encodeFieldVaVcc(unsigned VaVcc, const MCSubtargetInfo &STI) {
2167 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2168 return encodeFieldVaVcc(Encoded, VaVcc);
2169}
2170
2171unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc) {
2172 return packBits(VaSsrc, Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2173}
2174
2175unsigned encodeFieldVaSsrc(unsigned VaSsrc, const MCSubtargetInfo &STI) {
2176 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2177 return encodeFieldVaSsrc(Encoded, VaSsrc);
2178}
2179
2180unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt,
2181 const IsaVersion &Version) {
2182 return packBits(HoldCnt, Encoded, getHoldCntBitShift(),
2183 getHoldCntWidth(Version.Major, Version.Minor));
2184}
2185
2186unsigned encodeFieldHoldCnt(unsigned HoldCnt, const MCSubtargetInfo &STI) {
2187 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2188 return encodeFieldHoldCnt(Encoded, HoldCnt, getIsaVersion(STI.getCPU()));
2189}
2190
2191} // namespace DepCtr
2192
2193//===----------------------------------------------------------------------===//
2194// exp tgt
2195//===----------------------------------------------------------------------===//
2196
2197namespace Exp {
2198
2199struct ExpTgt {
2201 unsigned Tgt;
2202 unsigned MaxIndex;
2203};
2204
2205// clang-format off
2206static constexpr ExpTgt ExpTgtInfo[] = {
2207 {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
2208 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
2209 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
2210 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
2211 {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
2212 {{"dual_src_blend"},ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
2213 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
2214};
2215// clang-format on
2216
2217bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
2218 for (const ExpTgt &Val : ExpTgtInfo) {
2219 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
2220 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
2221 Name = Val.Name;
2222 return true;
2223 }
2224 }
2225 return false;
2226}
2227
2228unsigned getTgtId(const StringRef Name) {
2229
2230 for (const ExpTgt &Val : ExpTgtInfo) {
2231 if (Val.MaxIndex == 0 && Name == Val.Name)
2232 return Val.Tgt;
2233
2234 if (Val.MaxIndex > 0 && Name.starts_with(Val.Name)) {
2235 StringRef Suffix = Name.drop_front(Val.Name.size());
2236
2237 unsigned Id;
2238 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
2239 return ET_INVALID;
2240
2241 // Disable leading zeroes
2242 if (Suffix.size() > 1 && Suffix[0] == '0')
2243 return ET_INVALID;
2244
2245 return Val.Tgt + Id;
2246 }
2247 }
2248 return ET_INVALID;
2249}
2250
2251bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
2252 switch (Id) {
2253 case ET_NULL:
2254 return !isGFX11Plus(STI);
2255 case ET_POS4:
2256 case ET_PRIM:
2257 return isGFX10Plus(STI);
2258 case ET_DUAL_SRC_BLEND0:
2259 case ET_DUAL_SRC_BLEND1:
2260 return isGFX11Plus(STI);
2261 default:
2262 if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
2263 return !isGFX11Plus(STI);
2264 return true;
2265 }
2266}
2267
2268} // namespace Exp
2269
2270//===----------------------------------------------------------------------===//
2271// MTBUF Format
2272//===----------------------------------------------------------------------===//
2273
2274namespace MTBUFFormat {
2275
2276int64_t getDfmt(const StringRef Name) {
2277 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
2278 if (Name == DfmtSymbolic[Id])
2279 return Id;
2280 }
2281 return DFMT_UNDEF;
2282}
2283
2285 assert(Id <= DFMT_MAX);
2286 return DfmtSymbolic[Id];
2287}
2288
2290 if (isSI(STI) || isCI(STI))
2291 return NfmtSymbolicSICI;
2292 if (isVI(STI) || isGFX9(STI))
2293 return NfmtSymbolicVI;
2294 return NfmtSymbolicGFX10;
2295}
2296
2297int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
2298 const auto *lookupTable = getNfmtLookupTable(STI);
2299 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
2300 if (Name == lookupTable[Id])
2301 return Id;
2302 }
2303 return NFMT_UNDEF;
2304}
2305
2306StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
2307 assert(Id <= NFMT_MAX);
2308 return getNfmtLookupTable(STI)[Id];
2309}
2310
2311bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2312 unsigned Dfmt;
2313 unsigned Nfmt;
2314 decodeDfmtNfmt(Id, Dfmt, Nfmt);
2315 return isValidNfmt(Nfmt, STI);
2316}
2317
2318bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2319 return !getNfmtName(Id, STI).empty();
2320}
2321
2322int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
2323 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
2324}
2325
2326void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
2327 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
2328 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
2329}
2330
2331int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
2332 if (isGFX11Plus(STI)) {
2333 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2334 if (Name == UfmtSymbolicGFX11[Id])
2335 return Id;
2336 }
2337 } else {
2338 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2339 if (Name == UfmtSymbolicGFX10[Id])
2340 return Id;
2341 }
2342 }
2343 return UFMT_UNDEF;
2344}
2345
2347 if (isValidUnifiedFormat(Id, STI))
2348 return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
2349 return "";
2350}
2351
2352bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
2353 return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
2354}
2355
2356int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
2357 const MCSubtargetInfo &STI) {
2358 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
2359 if (isGFX11Plus(STI)) {
2360 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2361 if (Fmt == DfmtNfmt2UFmtGFX11[Id])
2362 return Id;
2363 }
2364 } else {
2365 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2366 if (Fmt == DfmtNfmt2UFmtGFX10[Id])
2367 return Id;
2368 }
2369 }
2370 return UFMT_UNDEF;
2371}
2372
2373bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
2374 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
2375}
2376
2378 if (isGFX10Plus(STI))
2379 return UFMT_DEFAULT;
2380 return DFMT_NFMT_DEFAULT;
2381}
2382
2383} // namespace MTBUFFormat
2384
2385//===----------------------------------------------------------------------===//
2386// SendMsg
2387//===----------------------------------------------------------------------===//
2388
2389namespace SendMsg {
2390
2394
2395bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
2396 return (MsgId & ~(getMsgIdMask(STI))) == 0;
2397}
2398
2399bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
2400 bool Strict) {
2401 assert(isValidMsgId(MsgId, STI));
2402
2403 if (!Strict)
2404 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
2405
2406 if (msgRequiresOp(MsgId, STI)) {
2407 if (MsgId == ID_GS_PreGFX11 && OpId == OP_GS_NOP)
2408 return false;
2409
2410 return !getMsgOpName(MsgId, OpId, STI).empty();
2411 }
2412
2413 return OpId == OP_NONE_;
2414}
2415
2416bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
2417 const MCSubtargetInfo &STI, bool Strict) {
2418 assert(isValidMsgOp(MsgId, OpId, STI, Strict));
2419
2420 if (!Strict)
2422
2423 if (!isGFX11Plus(STI)) {
2424 switch (MsgId) {
2425 case ID_GS_PreGFX11:
2428 return (OpId == OP_GS_NOP)
2431 }
2432 }
2433 return StreamId == STREAM_ID_NONE_;
2434}
2435
2436bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
2437 return MsgId == ID_SYSMSG ||
2438 (!isGFX11Plus(STI) &&
2439 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
2440}
2441
2442bool msgSupportsStream(int64_t MsgId, int64_t OpId,
2443 const MCSubtargetInfo &STI) {
2444 return !isGFX11Plus(STI) &&
2445 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
2446 OpId != OP_GS_NOP;
2447}
2448
2449void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
2450 uint16_t &StreamId, const MCSubtargetInfo &STI) {
2451 MsgId = Val & getMsgIdMask(STI);
2452 if (isGFX11Plus(STI)) {
2453 OpId = 0;
2454 StreamId = 0;
2455 } else {
2456 OpId = (Val & OP_MASK_) >> OP_SHIFT_;
2458 }
2459}
2460
2462 return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
2463}
2464
2465} // namespace SendMsg
2466
2467//===----------------------------------------------------------------------===//
2468//
2469//===----------------------------------------------------------------------===//
2470
2472 return F.getFnAttributeAsParsedInteger("InitialPSInputAddr", 0);
2473}
2474
2476 // As a safe default always respond as if PS has color exports.
2477 return F.getFnAttributeAsParsedInteger(
2478 "amdgpu-color-export",
2479 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
2480}
2481
2483 return F.getFnAttributeAsParsedInteger("amdgpu-depth-export", 0) != 0;
2484}
2485
2487 unsigned BlockSize =
2488 F.getFnAttributeAsParsedInteger("amdgpu-dynamic-vgpr-block-size", 0);
2489
2490 if (BlockSize == 16 || BlockSize == 32)
2491 return BlockSize;
2492
2493 return 0;
2494}
2495
2496bool hasXNACK(const MCSubtargetInfo &STI) {
2497 return STI.hasFeature(AMDGPU::FeatureXNACK);
2498}
2499
2500bool hasSRAMECC(const MCSubtargetInfo &STI) {
2501 return STI.hasFeature(AMDGPU::FeatureSRAMECC);
2502}
2503
2505 return STI.hasFeature(AMDGPU::FeatureMIMG_R128) &&
2506 !STI.hasFeature(AMDGPU::FeatureR128A16);
2507}
2508
2509bool hasA16(const MCSubtargetInfo &STI) {
2510 return STI.hasFeature(AMDGPU::FeatureA16);
2511}
2512
2513bool hasG16(const MCSubtargetInfo &STI) {
2514 return STI.hasFeature(AMDGPU::FeatureG16);
2515}
2516
2518 return !STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !isCI(STI) &&
2519 !isSI(STI);
2520}
2521
2522bool hasGDS(const MCSubtargetInfo &STI) {
2523 return STI.hasFeature(AMDGPU::FeatureGDS);
2524}
2525
2526unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler) {
2527 auto Version = getIsaVersion(STI.getCPU());
2528 if (Version.Major == 10)
2529 return Version.Minor >= 3 ? 13 : 5;
2530 if (Version.Major == 11)
2531 return 5;
2532 if (Version.Major >= 12)
2533 return HasSampler ? 4 : 5;
2534 return 0;
2535}
2536
2538 if (isGFX1250Plus(STI))
2539 return 32;
2540 return 16;
2541}
2542
2543bool isSI(const MCSubtargetInfo &STI) {
2544 return STI.hasFeature(AMDGPU::FeatureSouthernIslands);
2545}
2546
2547bool isCI(const MCSubtargetInfo &STI) {
2548 return STI.hasFeature(AMDGPU::FeatureSeaIslands);
2549}
2550
2551bool isVI(const MCSubtargetInfo &STI) {
2552 return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2553}
2554
2555bool isGFX9(const MCSubtargetInfo &STI) {
2556 return STI.hasFeature(AMDGPU::FeatureGFX9);
2557}
2558
2560 return isGFX9(STI) || isGFX10(STI);
2561}
2562
2564 return isGFX9(STI) || isGFX10(STI) || isGFX11(STI);
2565}
2566
2568 return isVI(STI) || isGFX9(STI) || isGFX10(STI);
2569}
2570
2571bool isGFX8Plus(const MCSubtargetInfo &STI) {
2572 return isVI(STI) || isGFX9Plus(STI);
2573}
2574
2575bool isGFX9Plus(const MCSubtargetInfo &STI) {
2576 return isGFX9(STI) || isGFX10Plus(STI);
2577}
2578
2579bool isNotGFX9Plus(const MCSubtargetInfo &STI) { return !isGFX9Plus(STI); }
2580
2581bool isGFX10(const MCSubtargetInfo &STI) {
2582 return STI.hasFeature(AMDGPU::FeatureGFX10);
2583}
2584
2586 return isGFX10(STI) || isGFX11(STI);
2587}
2588
2590 return isGFX10(STI) || isGFX11Plus(STI);
2591}
2592
2593bool isGFX11(const MCSubtargetInfo &STI) {
2594 return STI.hasFeature(AMDGPU::FeatureGFX11);
2595}
2596
2598 return isGFX11(STI) || isGFX12Plus(STI);
2599}
2600
2601bool isGFX12(const MCSubtargetInfo &STI) {
2602 return STI.getFeatureBits()[AMDGPU::FeatureGFX12];
2603}
2604
2606 return isGFX12(STI) || isGFX13Plus(STI);
2607}
2608
2609bool isNotGFX12Plus(const MCSubtargetInfo &STI) { return !isGFX12Plus(STI); }
2610
2611bool isGFX1250(const MCSubtargetInfo &STI) {
2612 return STI.getFeatureBits()[AMDGPU::FeatureGFX1250Insts] && !isGFX13(STI);
2613}
2614
2616 return STI.getFeatureBits()[AMDGPU::FeatureGFX1250Insts];
2617}
2618
2619bool isGFX13(const MCSubtargetInfo &STI) {
2620 return STI.getFeatureBits()[AMDGPU::FeatureGFX13];
2621}
2622
2623bool isGFX13Plus(const MCSubtargetInfo &STI) { return isGFX13(STI); }
2624
2626 if (isGFX1250(STI))
2627 return false;
2628 return isGFX10Plus(STI);
2629}
2630
2631bool isNotGFX11Plus(const MCSubtargetInfo &STI) { return !isGFX11Plus(STI); }
2632
2634 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
2635}
2636
2638 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
2639}
2640
2642 return STI.hasFeature(AMDGPU::FeatureGCN3Encoding);
2643}
2644
2646 return STI.hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2647}
2648
2650 return STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2651}
2652
2654 return STI.hasFeature(AMDGPU::FeatureGFX10_3Insts);
2655}
2656
2658 return isGFX10_BEncoding(STI) && !isGFX12Plus(STI);
2659}
2660
2661bool isGFX90A(const MCSubtargetInfo &STI) {
2662 return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2663}
2664
2665bool isGFX940(const MCSubtargetInfo &STI) {
2666 return STI.hasFeature(AMDGPU::FeatureGFX940Insts);
2667}
2668
2670 return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2671}
2672
2674 return STI.hasFeature(AMDGPU::FeatureMAIInsts);
2675}
2676
2677bool hasVOPD(const MCSubtargetInfo &STI) {
2678 return STI.hasFeature(AMDGPU::FeatureVOPDInsts);
2679}
2680
2682 return STI.hasFeature(AMDGPU::FeatureDPPSrc1SGPR);
2683}
2684
2686 return STI.hasFeature(AMDGPU::FeatureKernargPreload);
2687}
2688
2689int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
2690 int32_t ArgNumVGPR) {
2691 if (has90AInsts && ArgNumAGPR)
2692 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2693 return std::max(ArgNumVGPR, ArgNumAGPR);
2694}
2695
2697 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
2698 const MCRegister FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
2699 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
2700 Reg == AMDGPU::SCC;
2701}
2702
2704 return MRI.getEncodingValue(Reg) & AMDGPU::HWEncoding::IS_HI16;
2705}
2706
2707#define MAP_REG2REG \
2708 using namespace AMDGPU; \
2709 switch (Reg.id()) { \
2710 default: \
2711 return Reg; \
2712 CASE_CI_VI(FLAT_SCR) \
2713 CASE_CI_VI(FLAT_SCR_LO) \
2714 CASE_CI_VI(FLAT_SCR_HI) \
2715 CASE_VI_GFX9PLUS(TTMP0) \
2716 CASE_VI_GFX9PLUS(TTMP1) \
2717 CASE_VI_GFX9PLUS(TTMP2) \
2718 CASE_VI_GFX9PLUS(TTMP3) \
2719 CASE_VI_GFX9PLUS(TTMP4) \
2720 CASE_VI_GFX9PLUS(TTMP5) \
2721 CASE_VI_GFX9PLUS(TTMP6) \
2722 CASE_VI_GFX9PLUS(TTMP7) \
2723 CASE_VI_GFX9PLUS(TTMP8) \
2724 CASE_VI_GFX9PLUS(TTMP9) \
2725 CASE_VI_GFX9PLUS(TTMP10) \
2726 CASE_VI_GFX9PLUS(TTMP11) \
2727 CASE_VI_GFX9PLUS(TTMP12) \
2728 CASE_VI_GFX9PLUS(TTMP13) \
2729 CASE_VI_GFX9PLUS(TTMP14) \
2730 CASE_VI_GFX9PLUS(TTMP15) \
2731 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2732 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2733 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2734 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2735 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2736 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2737 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2738 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2739 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2740 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2741 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2742 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2743 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2744 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2745 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2746 CASE_VI_GFX9PLUS( \
2747 TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2748 CASE_GFXPRE11_GFX11PLUS(M0) \
2749 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2750 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2751 }
2752
2753#define CASE_CI_VI(node) \
2754 assert(!isSI(STI)); \
2755 case node: \
2756 return isCI(STI) ? node##_ci : node##_vi;
2757
2758#define CASE_VI_GFX9PLUS(node) \
2759 case node: \
2760 return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2761
2762#define CASE_GFXPRE11_GFX11PLUS(node) \
2763 case node: \
2764 return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2765
2766#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2767 case node: \
2768 return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2769
2771 if (STI.getTargetTriple().getArch() == Triple::r600)
2772 return Reg;
2774}
2775
2776#undef CASE_CI_VI
2777#undef CASE_VI_GFX9PLUS
2778#undef CASE_GFXPRE11_GFX11PLUS
2779#undef CASE_GFXPRE11_GFX11PLUS_TO
2780
2781#define CASE_CI_VI(node) \
2782 case node##_ci: \
2783 case node##_vi: \
2784 return node;
2785#define CASE_VI_GFX9PLUS(node) \
2786 case node##_vi: \
2787 case node##_gfx9plus: \
2788 return node;
2789#define CASE_GFXPRE11_GFX11PLUS(node) \
2790 case node##_gfx11plus: \
2791 case node##_gfxpre11: \
2792 return node;
2793#define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2794
2796
2798 switch (Reg.id()) {
2799 case AMDGPU::SRC_SHARED_BASE_LO:
2800 case AMDGPU::SRC_SHARED_BASE:
2801 case AMDGPU::SRC_SHARED_LIMIT_LO:
2802 case AMDGPU::SRC_SHARED_LIMIT:
2803 case AMDGPU::SRC_PRIVATE_BASE_LO:
2804 case AMDGPU::SRC_PRIVATE_BASE:
2805 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2806 case AMDGPU::SRC_PRIVATE_LIMIT:
2807 case AMDGPU::SRC_FLAT_SCRATCH_BASE_LO:
2808 case AMDGPU::SRC_FLAT_SCRATCH_BASE_HI:
2809 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2810 return true;
2811 case AMDGPU::SRC_VCCZ:
2812 case AMDGPU::SRC_EXECZ:
2813 case AMDGPU::SRC_SCC:
2814 return true;
2815 case AMDGPU::SGPR_NULL:
2816 return true;
2817 default:
2818 return false;
2819 }
2820}
2821
2822#undef CASE_CI_VI
2823#undef CASE_VI_GFX9PLUS
2824#undef CASE_GFXPRE11_GFX11PLUS
2825#undef CASE_GFXPRE11_GFX11PLUS_TO
2826#undef MAP_REG2REG
2827
2828bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2829 assert(OpNo < Desc.NumOperands);
2830 unsigned OpType = Desc.operands()[OpNo].OperandType;
2831 return OpType >= AMDGPU::OPERAND_KIMM_FIRST &&
2832 OpType <= AMDGPU::OPERAND_KIMM_LAST;
2833}
2834
2835bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2836 assert(OpNo < Desc.NumOperands);
2837 unsigned OpType = Desc.operands()[OpNo].OperandType;
2838 switch (OpType) {
2852 return true;
2853 default:
2854 return false;
2855 }
2856}
2857
2858bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2859 assert(OpNo < Desc.NumOperands);
2860 unsigned OpType = Desc.operands()[OpNo].OperandType;
2861 return (OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
2865}
2866
2867// Avoid using MCRegisterClass::getSize, since that function will go away
2868// (move from MC* level to Target* level). Return size in bits.
2869unsigned getRegBitWidth(unsigned RCID) {
2870 switch (RCID) {
2871 case AMDGPU::VGPR_16RegClassID:
2872 case AMDGPU::VGPR_16_Lo128RegClassID:
2873 case AMDGPU::SGPR_LO16RegClassID:
2874 case AMDGPU::AGPR_LO16RegClassID:
2875 return 16;
2876 case AMDGPU::SGPR_32RegClassID:
2877 case AMDGPU::VGPR_32RegClassID:
2878 case AMDGPU::VGPR_32_Lo256RegClassID:
2879 case AMDGPU::VRegOrLds_32RegClassID:
2880 case AMDGPU::AGPR_32RegClassID:
2881 case AMDGPU::VS_32RegClassID:
2882 case AMDGPU::AV_32RegClassID:
2883 case AMDGPU::SReg_32RegClassID:
2884 case AMDGPU::SReg_32_XM0RegClassID:
2885 case AMDGPU::SRegOrLds_32RegClassID:
2886 return 32;
2887 case AMDGPU::SGPR_64RegClassID:
2888 case AMDGPU::VS_64RegClassID:
2889 case AMDGPU::SReg_64RegClassID:
2890 case AMDGPU::VReg_64RegClassID:
2891 case AMDGPU::AReg_64RegClassID:
2892 case AMDGPU::SReg_64_XEXECRegClassID:
2893 case AMDGPU::VReg_64_Align2RegClassID:
2894 case AMDGPU::AReg_64_Align2RegClassID:
2895 case AMDGPU::AV_64RegClassID:
2896 case AMDGPU::AV_64_Align2RegClassID:
2897 case AMDGPU::VReg_64_Lo256_Align2RegClassID:
2898 case AMDGPU::VS_64_Lo256RegClassID:
2899 return 64;
2900 case AMDGPU::SGPR_96RegClassID:
2901 case AMDGPU::SReg_96RegClassID:
2902 case AMDGPU::VReg_96RegClassID:
2903 case AMDGPU::AReg_96RegClassID:
2904 case AMDGPU::VReg_96_Align2RegClassID:
2905 case AMDGPU::AReg_96_Align2RegClassID:
2906 case AMDGPU::AV_96RegClassID:
2907 case AMDGPU::AV_96_Align2RegClassID:
2908 case AMDGPU::VReg_96_Lo256_Align2RegClassID:
2909 return 96;
2910 case AMDGPU::SGPR_128RegClassID:
2911 case AMDGPU::SReg_128RegClassID:
2912 case AMDGPU::VReg_128RegClassID:
2913 case AMDGPU::AReg_128RegClassID:
2914 case AMDGPU::VReg_128_Align2RegClassID:
2915 case AMDGPU::AReg_128_Align2RegClassID:
2916 case AMDGPU::AV_128RegClassID:
2917 case AMDGPU::AV_128_Align2RegClassID:
2918 case AMDGPU::SReg_128_XNULLRegClassID:
2919 case AMDGPU::VReg_128_Lo256_Align2RegClassID:
2920 return 128;
2921 case AMDGPU::SGPR_160RegClassID:
2922 case AMDGPU::SReg_160RegClassID:
2923 case AMDGPU::VReg_160RegClassID:
2924 case AMDGPU::AReg_160RegClassID:
2925 case AMDGPU::VReg_160_Align2RegClassID:
2926 case AMDGPU::AReg_160_Align2RegClassID:
2927 case AMDGPU::AV_160RegClassID:
2928 case AMDGPU::AV_160_Align2RegClassID:
2929 case AMDGPU::VReg_160_Lo256_Align2RegClassID:
2930 return 160;
2931 case AMDGPU::SGPR_192RegClassID:
2932 case AMDGPU::SReg_192RegClassID:
2933 case AMDGPU::VReg_192RegClassID:
2934 case AMDGPU::AReg_192RegClassID:
2935 case AMDGPU::VReg_192_Align2RegClassID:
2936 case AMDGPU::AReg_192_Align2RegClassID:
2937 case AMDGPU::AV_192RegClassID:
2938 case AMDGPU::AV_192_Align2RegClassID:
2939 case AMDGPU::VReg_192_Lo256_Align2RegClassID:
2940 return 192;
2941 case AMDGPU::SGPR_224RegClassID:
2942 case AMDGPU::SReg_224RegClassID:
2943 case AMDGPU::VReg_224RegClassID:
2944 case AMDGPU::AReg_224RegClassID:
2945 case AMDGPU::VReg_224_Align2RegClassID:
2946 case AMDGPU::AReg_224_Align2RegClassID:
2947 case AMDGPU::AV_224RegClassID:
2948 case AMDGPU::AV_224_Align2RegClassID:
2949 case AMDGPU::VReg_224_Lo256_Align2RegClassID:
2950 return 224;
2951 case AMDGPU::SGPR_256RegClassID:
2952 case AMDGPU::SReg_256RegClassID:
2953 case AMDGPU::VReg_256RegClassID:
2954 case AMDGPU::AReg_256RegClassID:
2955 case AMDGPU::VReg_256_Align2RegClassID:
2956 case AMDGPU::AReg_256_Align2RegClassID:
2957 case AMDGPU::AV_256RegClassID:
2958 case AMDGPU::AV_256_Align2RegClassID:
2959 case AMDGPU::SReg_256_XNULLRegClassID:
2960 case AMDGPU::VReg_256_Lo256_Align2RegClassID:
2961 return 256;
2962 case AMDGPU::SGPR_288RegClassID:
2963 case AMDGPU::SReg_288RegClassID:
2964 case AMDGPU::VReg_288RegClassID:
2965 case AMDGPU::AReg_288RegClassID:
2966 case AMDGPU::VReg_288_Align2RegClassID:
2967 case AMDGPU::AReg_288_Align2RegClassID:
2968 case AMDGPU::AV_288RegClassID:
2969 case AMDGPU::AV_288_Align2RegClassID:
2970 case AMDGPU::VReg_288_Lo256_Align2RegClassID:
2971 return 288;
2972 case AMDGPU::SGPR_320RegClassID:
2973 case AMDGPU::SReg_320RegClassID:
2974 case AMDGPU::VReg_320RegClassID:
2975 case AMDGPU::AReg_320RegClassID:
2976 case AMDGPU::VReg_320_Align2RegClassID:
2977 case AMDGPU::AReg_320_Align2RegClassID:
2978 case AMDGPU::AV_320RegClassID:
2979 case AMDGPU::AV_320_Align2RegClassID:
2980 case AMDGPU::VReg_320_Lo256_Align2RegClassID:
2981 return 320;
2982 case AMDGPU::SGPR_352RegClassID:
2983 case AMDGPU::SReg_352RegClassID:
2984 case AMDGPU::VReg_352RegClassID:
2985 case AMDGPU::AReg_352RegClassID:
2986 case AMDGPU::VReg_352_Align2RegClassID:
2987 case AMDGPU::AReg_352_Align2RegClassID:
2988 case AMDGPU::AV_352RegClassID:
2989 case AMDGPU::AV_352_Align2RegClassID:
2990 case AMDGPU::VReg_352_Lo256_Align2RegClassID:
2991 return 352;
2992 case AMDGPU::SGPR_384RegClassID:
2993 case AMDGPU::SReg_384RegClassID:
2994 case AMDGPU::VReg_384RegClassID:
2995 case AMDGPU::AReg_384RegClassID:
2996 case AMDGPU::VReg_384_Align2RegClassID:
2997 case AMDGPU::AReg_384_Align2RegClassID:
2998 case AMDGPU::AV_384RegClassID:
2999 case AMDGPU::AV_384_Align2RegClassID:
3000 case AMDGPU::VReg_384_Lo256_Align2RegClassID:
3001 return 384;
3002 case AMDGPU::SGPR_512RegClassID:
3003 case AMDGPU::SReg_512RegClassID:
3004 case AMDGPU::VReg_512RegClassID:
3005 case AMDGPU::AReg_512RegClassID:
3006 case AMDGPU::VReg_512_Align2RegClassID:
3007 case AMDGPU::AReg_512_Align2RegClassID:
3008 case AMDGPU::AV_512RegClassID:
3009 case AMDGPU::AV_512_Align2RegClassID:
3010 case AMDGPU::VReg_512_Lo256_Align2RegClassID:
3011 return 512;
3012 case AMDGPU::SGPR_1024RegClassID:
3013 case AMDGPU::SReg_1024RegClassID:
3014 case AMDGPU::VReg_1024RegClassID:
3015 case AMDGPU::AReg_1024RegClassID:
3016 case AMDGPU::VReg_1024_Align2RegClassID:
3017 case AMDGPU::AReg_1024_Align2RegClassID:
3018 case AMDGPU::AV_1024RegClassID:
3019 case AMDGPU::AV_1024_Align2RegClassID:
3020 case AMDGPU::VReg_1024_Lo256_Align2RegClassID:
3021 return 1024;
3022 default:
3023 llvm_unreachable("Unexpected register class");
3024 }
3025}
3026
3027unsigned getRegBitWidth(const MCRegisterClass &RC) {
3028 return getRegBitWidth(RC.getID());
3029}
3030
3031bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
3033 return true;
3034
3035 uint64_t Val = static_cast<uint64_t>(Literal);
3036 return (Val == llvm::bit_cast<uint64_t>(0.0)) ||
3037 (Val == llvm::bit_cast<uint64_t>(1.0)) ||
3038 (Val == llvm::bit_cast<uint64_t>(-1.0)) ||
3039 (Val == llvm::bit_cast<uint64_t>(0.5)) ||
3040 (Val == llvm::bit_cast<uint64_t>(-0.5)) ||
3041 (Val == llvm::bit_cast<uint64_t>(2.0)) ||
3042 (Val == llvm::bit_cast<uint64_t>(-2.0)) ||
3043 (Val == llvm::bit_cast<uint64_t>(4.0)) ||
3044 (Val == llvm::bit_cast<uint64_t>(-4.0)) ||
3045 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
3046}
3047
3048bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
3050 return true;
3051
3052 // The actual type of the operand does not seem to matter as long
3053 // as the bits match one of the inline immediate values. For example:
3054 //
3055 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
3056 // so it is a legal inline immediate.
3057 //
3058 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
3059 // floating-point, so it is a legal inline immediate.
3060
3061 uint32_t Val = static_cast<uint32_t>(Literal);
3062 return (Val == llvm::bit_cast<uint32_t>(0.0f)) ||
3063 (Val == llvm::bit_cast<uint32_t>(1.0f)) ||
3064 (Val == llvm::bit_cast<uint32_t>(-1.0f)) ||
3065 (Val == llvm::bit_cast<uint32_t>(0.5f)) ||
3066 (Val == llvm::bit_cast<uint32_t>(-0.5f)) ||
3067 (Val == llvm::bit_cast<uint32_t>(2.0f)) ||
3068 (Val == llvm::bit_cast<uint32_t>(-2.0f)) ||
3069 (Val == llvm::bit_cast<uint32_t>(4.0f)) ||
3070 (Val == llvm::bit_cast<uint32_t>(-4.0f)) ||
3071 (Val == 0x3e22f983 && HasInv2Pi);
3072}
3073
3074bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi) {
3075 if (!HasInv2Pi)
3076 return false;
3078 return true;
3079 uint16_t Val = static_cast<uint16_t>(Literal);
3080 return Val == 0x3F00 || // 0.5
3081 Val == 0xBF00 || // -0.5
3082 Val == 0x3F80 || // 1.0
3083 Val == 0xBF80 || // -1.0
3084 Val == 0x4000 || // 2.0
3085 Val == 0xC000 || // -2.0
3086 Val == 0x4080 || // 4.0
3087 Val == 0xC080 || // -4.0
3088 Val == 0x3E22; // 1.0 / (2.0 * pi)
3089}
3090
3091bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi) {
3092 return isInlinableLiteral32(Literal, HasInv2Pi);
3093}
3094
3095bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi) {
3096 if (!HasInv2Pi)
3097 return false;
3099 return true;
3100 uint16_t Val = static_cast<uint16_t>(Literal);
3101 return Val == 0x3C00 || // 1.0
3102 Val == 0xBC00 || // -1.0
3103 Val == 0x3800 || // 0.5
3104 Val == 0xB800 || // -0.5
3105 Val == 0x4000 || // 2.0
3106 Val == 0xC000 || // -2.0
3107 Val == 0x4400 || // 4.0
3108 Val == 0xC400 || // -4.0
3109 Val == 0x3118; // 1/2pi
3110}
3111
3112std::optional<unsigned> getInlineEncodingV216(bool IsFloat, uint32_t Literal) {
3113 // Unfortunately, the Instruction Set Architecture Reference Guide is
3114 // misleading about how the inline operands work for (packed) 16-bit
3115 // instructions. In a nutshell, the actual HW behavior is:
3116 //
3117 // - integer encodings (-16 .. 64) are always produced as sign-extended
3118 // 32-bit values
3119 // - float encodings are produced as:
3120 // - for F16 instructions: corresponding half-precision float values in
3121 // the LSBs, 0 in the MSBs
3122 // - for UI16 instructions: corresponding single-precision float value
3123 int32_t Signed = static_cast<int32_t>(Literal);
3124 if (Signed >= 0 && Signed <= 64)
3125 return 128 + Signed;
3126
3127 if (Signed >= -16 && Signed <= -1)
3128 return 192 + std::abs(Signed);
3129
3130 if (IsFloat) {
3131 // clang-format off
3132 switch (Literal) {
3133 case 0x3800: return 240; // 0.5
3134 case 0xB800: return 241; // -0.5
3135 case 0x3C00: return 242; // 1.0
3136 case 0xBC00: return 243; // -1.0
3137 case 0x4000: return 244; // 2.0
3138 case 0xC000: return 245; // -2.0
3139 case 0x4400: return 246; // 4.0
3140 case 0xC400: return 247; // -4.0
3141 case 0x3118: return 248; // 1.0 / (2.0 * pi)
3142 default: break;
3143 }
3144 // clang-format on
3145 } else {
3146 // clang-format off
3147 switch (Literal) {
3148 case 0x3F000000: return 240; // 0.5
3149 case 0xBF000000: return 241; // -0.5
3150 case 0x3F800000: return 242; // 1.0
3151 case 0xBF800000: return 243; // -1.0
3152 case 0x40000000: return 244; // 2.0
3153 case 0xC0000000: return 245; // -2.0
3154 case 0x40800000: return 246; // 4.0
3155 case 0xC0800000: return 247; // -4.0
3156 case 0x3E22F983: return 248; // 1.0 / (2.0 * pi)
3157 default: break;
3158 }
3159 // clang-format on
3160 }
3161
3162 return {};
3163}
3164
3165// Encoding of the literal as an inline constant for a V_PK_*_IU16 instruction
3166// or nullopt.
3167std::optional<unsigned> getInlineEncodingV2I16(uint32_t Literal) {
3168 return getInlineEncodingV216(false, Literal);
3169}
3170
3171// Encoding of the literal as an inline constant for a V_PK_*_BF16 instruction
3172// or nullopt.
3173std::optional<unsigned> getInlineEncodingV2BF16(uint32_t Literal) {
3174 int32_t Signed = static_cast<int32_t>(Literal);
3175 if (Signed >= 0 && Signed <= 64)
3176 return 128 + Signed;
3177
3178 if (Signed >= -16 && Signed <= -1)
3179 return 192 + std::abs(Signed);
3180
3181 // clang-format off
3182 switch (Literal) {
3183 case 0x3F00: return 240; // 0.5
3184 case 0xBF00: return 241; // -0.5
3185 case 0x3F80: return 242; // 1.0
3186 case 0xBF80: return 243; // -1.0
3187 case 0x4000: return 244; // 2.0
3188 case 0xC000: return 245; // -2.0
3189 case 0x4080: return 246; // 4.0
3190 case 0xC080: return 247; // -4.0
3191 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
3192 default: break;
3193 }
3194 // clang-format on
3195
3196 return std::nullopt;
3197}
3198
3199// Encoding of the literal as an inline constant for a V_PK_*_F16 instruction
3200// or nullopt.
3201std::optional<unsigned> getInlineEncodingV2F16(uint32_t Literal) {
3202 return getInlineEncodingV216(true, Literal);
3203}
3204
3205// Encoding of the literal as an inline constant for V_PK_FMAC_F16 instruction
3206// or nullopt. This accounts for different inline constant behavior:
3207// - Pre-GFX11: fp16 inline constants have the value in low 16 bits, 0 in high
3208// - GFX11+: fp16 inline constants are duplicated into both halves
3210 bool IsGFX11Plus) {
3211 // Pre-GFX11 behavior: f16 in low bits, 0 in high bits
3212 if (!IsGFX11Plus)
3213 return getInlineEncodingV216(/*IsFloat=*/true, Literal);
3214
3215 // GFX11+ behavior: f16 duplicated in both halves
3216 // First, check for sign-extended integer inline constants (-16 to 64)
3217 // These work the same across all generations
3218 int32_t Signed = static_cast<int32_t>(Literal);
3219 if (Signed >= 0 && Signed <= 64)
3220 return 128 + Signed;
3221
3222 if (Signed >= -16 && Signed <= -1)
3223 return 192 + std::abs(Signed);
3224
3225 // For float inline constants on GFX11+, both halves must be equal
3226 uint16_t Lo = static_cast<uint16_t>(Literal);
3227 uint16_t Hi = static_cast<uint16_t>(Literal >> 16);
3228 if (Lo != Hi)
3229 return std::nullopt;
3230 return getInlineEncodingV216(/*IsFloat=*/true, Lo);
3231}
3232
3233// Whether the given literal can be inlined for a V_PK_* instruction.
3235 switch (OpType) {
3238 return getInlineEncodingV216(false, Literal).has_value();
3241 return getInlineEncodingV216(true, Literal).has_value();
3243 llvm_unreachable("OPERAND_REG_IMM_V2FP16_SPLAT is not supported");
3248 return false;
3249 default:
3250 llvm_unreachable("bad packed operand type");
3251 }
3252}
3253
3254// Whether the given literal can be inlined for a V_PK_*_IU16 instruction.
3258
3259// Whether the given literal can be inlined for a V_PK_*_BF16 instruction.
3263
3264// Whether the given literal can be inlined for a V_PK_*_F16 instruction.
3268
3269// Whether the given literal can be inlined for V_PK_FMAC_F16 instruction.
3271 return getPKFMACF16InlineEncoding(Literal, IsGFX11Plus).has_value();
3272}
3273
3274bool isValid32BitLiteral(uint64_t Val, bool IsFP64) {
3275 if (IsFP64)
3276 return !Lo_32(Val);
3277
3278 return isUInt<32>(Val) || isInt<32>(Val);
3279}
3280
3281int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit) {
3282 switch (Type) {
3283 default:
3284 break;
3289 return Imm & 0xffff;
3303 return Lo_32(Imm);
3305 return IsLit ? Imm : Hi_32(Imm);
3306 }
3307 return Imm;
3308}
3309
3311 const Function *F = A->getParent();
3312
3313 // Arguments to compute shaders are never a source of divergence.
3314 CallingConv::ID CC = F->getCallingConv();
3315 switch (CC) {
3318 return true;
3329 // For non-compute shaders, SGPR inputs are marked with either inreg or
3330 // byval. Everything else is in VGPRs.
3331 return A->hasAttribute(Attribute::InReg) ||
3332 A->hasAttribute(Attribute::ByVal);
3333 default:
3334 // TODO: treat i1 as divergent?
3335 return A->hasAttribute(Attribute::InReg);
3336 }
3337}
3338
3339bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo) {
3340 // Arguments to compute shaders are never a source of divergence.
3342 switch (CC) {
3345 return true;
3356 // For non-compute shaders, SGPR inputs are marked with either inreg or
3357 // byval. Everything else is in VGPRs.
3358 return CB->paramHasAttr(ArgNo, Attribute::InReg) ||
3359 CB->paramHasAttr(ArgNo, Attribute::ByVal);
3360 default:
3361 return CB->paramHasAttr(ArgNo, Attribute::InReg);
3362 }
3363}
3364
3365static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
3366 return isGCN3Encoding(ST) || isGFX10Plus(ST);
3367}
3368
3370 int64_t EncodedOffset) {
3371 if (isGFX12Plus(ST))
3372 return isUInt<23>(EncodedOffset);
3373
3374 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
3375 : isUInt<8>(EncodedOffset);
3376}
3377
3379 int64_t EncodedOffset, bool IsBuffer) {
3380 if (isGFX12Plus(ST)) {
3381 if (IsBuffer && EncodedOffset < 0)
3382 return false;
3383 return isInt<24>(EncodedOffset);
3384 }
3385
3386 return !IsBuffer && hasSMRDSignedImmOffset(ST) && isInt<21>(EncodedOffset);
3387}
3388
3389static bool isDwordAligned(uint64_t ByteOffset) {
3390 return (ByteOffset & 3) == 0;
3391}
3392
3394 uint64_t ByteOffset) {
3395 if (hasSMEMByteOffset(ST))
3396 return ByteOffset;
3397
3398 assert(isDwordAligned(ByteOffset));
3399 return ByteOffset >> 2;
3400}
3401
3402std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
3403 int64_t ByteOffset, bool IsBuffer,
3404 bool HasSOffset) {
3405 // For unbuffered smem loads, it is illegal for the Immediate Offset to be
3406 // negative if the resulting (Offset + (M0 or SOffset or zero) is negative.
3407 // Handle case where SOffset is not present.
3408 if (!IsBuffer && !HasSOffset && ByteOffset < 0 && hasSMRDSignedImmOffset(ST))
3409 return std::nullopt;
3410
3411 if (isGFX12Plus(ST)) // 24 bit signed offsets
3412 return isInt<24>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3413 : std::nullopt;
3414
3415 // The signed version is always a byte offset.
3416 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
3418 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3419 : std::nullopt;
3420 }
3421
3422 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
3423 return std::nullopt;
3424
3425 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3426 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
3427 ? std::optional<int64_t>(EncodedOffset)
3428 : std::nullopt;
3429}
3430
3431std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
3432 int64_t ByteOffset) {
3433 if (!isCI(ST) || !isDwordAligned(ByteOffset))
3434 return std::nullopt;
3435
3436 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3437 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
3438 : std::nullopt;
3439}
3440
3442 if (AMDGPU::isGFX10(ST))
3443 return 12;
3444
3445 if (AMDGPU::isGFX12(ST))
3446 return 24;
3447 return 13;
3448}
3449
3450namespace {
3451
3452struct SourceOfDivergence {
3453 unsigned Intr;
3454};
3455const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
3456
3457struct AlwaysUniform {
3458 unsigned Intr;
3459};
3460const AlwaysUniform *lookupAlwaysUniform(unsigned Intr);
3461
3462#define GET_SourcesOfDivergence_IMPL
3463#define GET_UniformIntrinsics_IMPL
3464#define GET_Gfx9BufferFormat_IMPL
3465#define GET_Gfx10BufferFormat_IMPL
3466#define GET_Gfx11PlusBufferFormat_IMPL
3467
3468#include "AMDGPUGenSearchableTables.inc"
3469
3470} // end anonymous namespace
3471
3472bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
3473 return lookupSourceOfDivergence(IntrID);
3474}
3475
3476bool isIntrinsicAlwaysUniform(unsigned IntrID) {
3477 return lookupAlwaysUniform(IntrID);
3478}
3479
3481 uint8_t NumComponents,
3482 uint8_t NumFormat,
3483 const MCSubtargetInfo &STI) {
3484 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(
3485 BitsPerComp, NumComponents, NumFormat)
3486 : isGFX10(STI)
3487 ? getGfx10BufferFormatInfo(BitsPerComp, NumComponents, NumFormat)
3488 : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat);
3489}
3490
3492 const MCSubtargetInfo &STI) {
3493 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
3494 : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
3495 : getGfx9BufferFormatInfo(Format);
3496}
3497
3499 const MCRegisterInfo &MRI) {
3500 const unsigned VGPRClasses[] = {
3501 AMDGPU::VGPR_16RegClassID, AMDGPU::VGPR_32RegClassID,
3502 AMDGPU::VReg_64RegClassID, AMDGPU::VReg_96RegClassID,
3503 AMDGPU::VReg_128RegClassID, AMDGPU::VReg_160RegClassID,
3504 AMDGPU::VReg_192RegClassID, AMDGPU::VReg_224RegClassID,
3505 AMDGPU::VReg_256RegClassID, AMDGPU::VReg_288RegClassID,
3506 AMDGPU::VReg_320RegClassID, AMDGPU::VReg_352RegClassID,
3507 AMDGPU::VReg_384RegClassID, AMDGPU::VReg_512RegClassID,
3508 AMDGPU::VReg_1024RegClassID};
3509
3510 for (unsigned RCID : VGPRClasses) {
3511 const MCRegisterClass &RC = MRI.getRegClass(RCID);
3512 if (RC.contains(Reg))
3513 return &RC;
3514 }
3515
3516 return nullptr;
3517}
3518
3520 unsigned Enc = MRI.getEncodingValue(Reg);
3521 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
3522 return Idx >> 8;
3523}
3524
3526 const MCRegisterInfo &MRI) {
3527 unsigned Enc = MRI.getEncodingValue(Reg);
3528 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
3529 if (Idx >= 0x100)
3530 return MCRegister();
3531
3533 if (!RC)
3534 return MCRegister();
3535
3536 Idx |= MSBs << 8;
3537 if (RC->getID() == AMDGPU::VGPR_16RegClassID) {
3538 // This class has 2048 registers with interleaved lo16 and hi16.
3539 Idx *= 2;
3541 ++Idx;
3542 }
3543
3544 return RC->getRegister(Idx);
3545}
3546
3547std::pair<const AMDGPU::OpName *, const AMDGPU::OpName *>
3549 static const AMDGPU::OpName VOPOps[4] = {
3550 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2,
3551 AMDGPU::OpName::vdst};
3552 static const AMDGPU::OpName VDSOps[4] = {
3553 AMDGPU::OpName::addr, AMDGPU::OpName::data0, AMDGPU::OpName::data1,
3554 AMDGPU::OpName::vdst};
3555 static const AMDGPU::OpName FLATOps[4] = {
3556 AMDGPU::OpName::vaddr, AMDGPU::OpName::vdata,
3557 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdst};
3558 static const AMDGPU::OpName BUFOps[4] = {
3559 AMDGPU::OpName::vaddr, AMDGPU::OpName::NUM_OPERAND_NAMES,
3560 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdata};
3561 static const AMDGPU::OpName VIMGOps[4] = {
3562 AMDGPU::OpName::vaddr0, AMDGPU::OpName::vaddr1, AMDGPU::OpName::vaddr2,
3563 AMDGPU::OpName::vdata};
3564
3565 // For VOPD instructions MSB of a corresponding Y component operand VGPR
3566 // address is supposed to match X operand, otherwise VOPD shall not be
3567 // combined.
3568 static const AMDGPU::OpName VOPDOpsX[4] = {
3569 AMDGPU::OpName::src0X, AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vsrc2X,
3570 AMDGPU::OpName::vdstX};
3571 static const AMDGPU::OpName VOPDOpsY[4] = {
3572 AMDGPU::OpName::src0Y, AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vsrc2Y,
3573 AMDGPU::OpName::vdstY};
3574
3575 // VOP2 MADMK instructions use src0, imm, src1 scheme.
3576 static const AMDGPU::OpName VOP2MADMKOps[4] = {
3577 AMDGPU::OpName::src0, AMDGPU::OpName::NUM_OPERAND_NAMES,
3578 AMDGPU::OpName::src1, AMDGPU::OpName::vdst};
3579 static const AMDGPU::OpName VOPDFMAMKOpsX[4] = {
3580 AMDGPU::OpName::src0X, AMDGPU::OpName::NUM_OPERAND_NAMES,
3581 AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vdstX};
3582 static const AMDGPU::OpName VOPDFMAMKOpsY[4] = {
3583 AMDGPU::OpName::src0Y, AMDGPU::OpName::NUM_OPERAND_NAMES,
3584 AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vdstY};
3585
3586 unsigned TSFlags = Desc.TSFlags;
3587
3588 if (TSFlags &
3591 switch (Desc.getOpcode()) {
3592 // LD_SCALE operands ignore MSB.
3593 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32:
3594 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32_gfx1250:
3595 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64:
3596 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64_gfx1250:
3597 return {};
3598 case AMDGPU::V_FMAMK_F16:
3599 case AMDGPU::V_FMAMK_F16_t16:
3600 case AMDGPU::V_FMAMK_F16_t16_gfx12:
3601 case AMDGPU::V_FMAMK_F16_fake16:
3602 case AMDGPU::V_FMAMK_F16_fake16_gfx12:
3603 case AMDGPU::V_FMAMK_F32:
3604 case AMDGPU::V_FMAMK_F32_gfx12:
3605 case AMDGPU::V_FMAMK_F64:
3606 case AMDGPU::V_FMAMK_F64_gfx1250:
3607 return {VOP2MADMKOps, nullptr};
3608 default:
3609 break;
3610 }
3611 return {VOPOps, nullptr};
3612 }
3613
3614 if (TSFlags & SIInstrFlags::DS)
3615 return {VDSOps, nullptr};
3616
3617 if (TSFlags & SIInstrFlags::FLAT)
3618 return {FLATOps, nullptr};
3619
3620 if (TSFlags & (SIInstrFlags::MUBUF | SIInstrFlags::MTBUF))
3621 return {BUFOps, nullptr};
3622
3623 if (TSFlags & SIInstrFlags::VIMAGE)
3624 return {VIMGOps, nullptr};
3625
3626 if (AMDGPU::isVOPD(Desc.getOpcode())) {
3627 auto [OpX, OpY] = getVOPDComponents(Desc.getOpcode());
3628 return {(OpX == AMDGPU::V_FMAMK_F32) ? VOPDFMAMKOpsX : VOPDOpsX,
3629 (OpY == AMDGPU::V_FMAMK_F32) ? VOPDFMAMKOpsY : VOPDOpsY};
3630 }
3631
3632 assert(!(TSFlags & SIInstrFlags::MIMG));
3633
3634 if (TSFlags & (SIInstrFlags::VSAMPLE | SIInstrFlags::EXP))
3635 llvm_unreachable("Sample and export VGPR lowering is not implemented and"
3636 " these instructions are not expected on gfx1250");
3637
3638 return {};
3639}
3640
3641bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode) {
3642 uint64_t TSFlags = MII.get(Opcode).TSFlags;
3643
3644 if (TSFlags & SIInstrFlags::SMRD)
3645 return !getSMEMIsBuffer(Opcode);
3646 if (!(TSFlags & SIInstrFlags::FLAT))
3647 return false;
3648
3649 // Only SV and SVS modes are supported.
3650 if (TSFlags & SIInstrFlags::FlatScratch)
3651 return hasNamedOperand(Opcode, OpName::vaddr);
3652
3653 // Only GVS mode is supported.
3654 return hasNamedOperand(Opcode, OpName::vaddr) &&
3655 hasNamedOperand(Opcode, OpName::saddr);
3656
3657 return false;
3658}
3659
3660bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
3661 const MCSubtargetInfo &ST) {
3662 for (auto OpName : {OpName::vdst, OpName::src0, OpName::src1, OpName::src2}) {
3663 int Idx = getNamedOperandIdx(OpDesc.getOpcode(), OpName);
3664 if (Idx == -1)
3665 continue;
3666
3667 const MCOperandInfo &OpInfo = OpDesc.operands()[Idx];
3668 int16_t RegClass = MII.getOpRegClassID(
3669 OpInfo, ST.getHwMode(MCSubtargetInfo::HwMode_RegInfo));
3670 if (RegClass == AMDGPU::VReg_64RegClassID ||
3671 RegClass == AMDGPU::VReg_64_Align2RegClassID)
3672 return true;
3673 }
3674
3675 return false;
3676}
3677
3678bool isDPALU_DPP32BitOpc(unsigned Opc) {
3679 switch (Opc) {
3680 case AMDGPU::V_MUL_LO_U32_e64:
3681 case AMDGPU::V_MUL_LO_U32_e64_dpp:
3682 case AMDGPU::V_MUL_LO_U32_e64_dpp_gfx1250:
3683 case AMDGPU::V_MUL_HI_U32_e64:
3684 case AMDGPU::V_MUL_HI_U32_e64_dpp:
3685 case AMDGPU::V_MUL_HI_U32_e64_dpp_gfx1250:
3686 case AMDGPU::V_MUL_HI_I32_e64:
3687 case AMDGPU::V_MUL_HI_I32_e64_dpp:
3688 case AMDGPU::V_MUL_HI_I32_e64_dpp_gfx1250:
3689 case AMDGPU::V_MAD_U32_e64:
3690 case AMDGPU::V_MAD_U32_e64_dpp:
3691 case AMDGPU::V_MAD_U32_e64_dpp_gfx1250:
3692 return true;
3693 default:
3694 return false;
3695 }
3696}
3697
3698bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
3699 const MCSubtargetInfo &ST) {
3700 if (!ST.hasFeature(AMDGPU::FeatureDPALU_DPP))
3701 return false;
3702
3703 if (isDPALU_DPP32BitOpc(OpDesc.getOpcode()))
3704 return ST.hasFeature(AMDGPU::FeatureGFX1250Insts);
3705
3706 return hasAny64BitVGPROperands(OpDesc, MII, ST);
3707}
3708
3710 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
3711 return 64;
3712 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
3713 return 128;
3714 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
3715 return 320;
3716 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
3717 return 512;
3718 return 64; // In sync with getAddressableLocalMemorySize
3719}
3720
3721bool isPackedFP32Inst(unsigned Opc) {
3722 switch (Opc) {
3723 case AMDGPU::V_PK_ADD_F32:
3724 case AMDGPU::V_PK_ADD_F32_gfx12:
3725 case AMDGPU::V_PK_MUL_F32:
3726 case AMDGPU::V_PK_MUL_F32_gfx12:
3727 case AMDGPU::V_PK_FMA_F32:
3728 case AMDGPU::V_PK_FMA_F32_gfx12:
3729 return true;
3730 default:
3731 return false;
3732 }
3733}
3734
3735const std::array<unsigned, 3> &ClusterDimsAttr::getDims() const {
3736 assert(isFixedDims() && "expect kind to be FixedDims");
3737 return Dims;
3738}
3739
3740std::string ClusterDimsAttr::to_string() const {
3741 SmallString<10> Buffer;
3742 raw_svector_ostream OS(Buffer);
3743
3744 switch (getKind()) {
3745 case Kind::Unknown:
3746 return "";
3747 case Kind::NoCluster: {
3748 OS << EncoNoCluster << ',' << EncoNoCluster << ',' << EncoNoCluster;
3749 return Buffer.c_str();
3750 }
3751 case Kind::VariableDims: {
3752 OS << EncoVariableDims << ',' << EncoVariableDims << ','
3753 << EncoVariableDims;
3754 return Buffer.c_str();
3755 }
3756 case Kind::FixedDims: {
3757 OS << Dims[0] << ',' << Dims[1] << ',' << Dims[2];
3758 return Buffer.c_str();
3759 }
3760 }
3761 llvm_unreachable("Unknown ClusterDimsAttr kind");
3762}
3763
3765 std::optional<SmallVector<unsigned>> Attr =
3766 getIntegerVecAttribute(F, "amdgpu-cluster-dims", /*Size=*/3);
3768
3769 if (!Attr.has_value())
3770 AttrKind = Kind::Unknown;
3771 else if (all_of(*Attr, equal_to(EncoNoCluster)))
3772 AttrKind = Kind::NoCluster;
3773 else if (all_of(*Attr, equal_to(EncoVariableDims)))
3774 AttrKind = Kind::VariableDims;
3775
3776 ClusterDimsAttr A(AttrKind);
3777 if (AttrKind == Kind::FixedDims)
3778 A.Dims = {(*Attr)[0], (*Attr)[1], (*Attr)[2]};
3779
3780 return A;
3781}
3782
3783} // namespace AMDGPU
3784
3787 switch (S) {
3789 OS << "Unsupported";
3790 break;
3792 OS << "Any";
3793 break;
3795 OS << "Off";
3796 break;
3798 OS << "On";
3799 break;
3800 }
3801 return OS;
3802}
3803
3804} // namespace llvm
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static llvm::cl::opt< unsigned > DefaultAMDHSACodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::init(llvm::AMDGPU::AMDHSA_COV6), llvm::cl::desc("Set default AMDHSA Code Object Version (module flag " "or asm directive still take priority if present)"))
#define MAP_REG2REG
Provides AMDGPU specific target descriptions.
MC layer struct for AMDGPUMCKernelCodeT, provides MCExpr functionality where required.
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define RegName(no)
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
#define T
uint64_t High
if(PassOpts->AAPipeline)
#define S_00B848_MEM_ORDERED(x)
Definition SIDefines.h:1239
#define S_00B848_WGP_MODE(x)
Definition SIDefines.h:1236
#define S_00B848_FWD_PROGRESS(x)
Definition SIDefines.h:1242
unsigned unsigned DefaultVal
This file contains some functions that are useful when dealing with strings.
static const int BlockSize
Definition TarWriter.cpp:33
static const uint32_t IV[8]
Definition blake3_impl.h:83
static ClusterDimsAttr get(const Function &F)
const std::array< unsigned, 3 > & getDims() const
TargetIDSetting getXnackSetting() const
AMDGPUTargetID(const MCSubtargetInfo &STI)
void setTargetIDFromTargetIDStream(StringRef TargetID)
TargetIDSetting getSramEccSetting() const
unsigned getIndexInParsedOperands(unsigned CompOprIdx) const
unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const
std::optional< unsigned > getInvalidCompOperandIndex(std::function< MCRegister(unsigned, unsigned)> GetRegIdx, const MCRegisterInfo &MRI, bool SkipSrc=false, bool AllowSameVGPR=false, bool VOPD3=false) const
std::array< MCRegister, Component::MAX_OPR_NUM > RegIndices
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:103
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
constexpr bool test(unsigned I) const
unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
A helper class to return the specified delimiter string after the first invocation of operator String...
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayStore() const
Return true if this instruction could possibly modify memory.
bool mayLoad() const
Return true if this instruction could possibly read memory.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
unsigned getOpcode() const
Return the opcode number for this descriptor.
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
int16_t getOpRegClassID(const MCOperandInfo &OpInfo, unsigned HwModeId) const
Return the ID of the register class to use for OpInfo, for the active HwMode HwModeId.
Definition MCInstrInfo.h:80
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
StringRef getCPU() const
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
const char * c_str()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
Definition StringRef.h:864
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
Manages the enabling and disabling of subtarget specific features.
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
OSType getOS() const
Get the parsed operating system type of this triple.
Definition Triple.h:427
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:418
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Definition Triple.h:933
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
unsigned decodeFieldVaVcc(unsigned Encoded)
unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc)
unsigned decodeFieldHoldCnt(unsigned Encoded, const IsaVersion &Version)
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt, const IsaVersion &Version)
unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc)
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned decodeFieldSaSdst(unsigned Encoded)
unsigned getHoldCntBitMask(const IsaVersion &Version)
unsigned decodeFieldVaSdst(unsigned Encoded)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaSsrc(unsigned Encoded)
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
const CustomOperandVal DepCtrInfo[]
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
unsigned decodeFieldVaVdst(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
static constexpr ExpTgt ExpTgtInfo[]
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
unsigned getTgtId(const StringRef Name)
constexpr uint32_t VersionMinor
HSA metadata minor version.
constexpr uint32_t VersionMajor
HSA metadata major version.
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
unsigned getArchVGPRAllocGranule()
For subtargets with a unified VGPR file and mixed ArchVGPR/AGPR usage, returns the allocation granule...
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize)
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getAllocatedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, std::optional< bool > EnableWavefrontSize32)
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves, AMDGPUSubtarget::Generation Gen)
static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs, unsigned Granule)
unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI)
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize)
StringLiteral const UfmtSymbolicGFX11[]
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI)
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX10[]
StringLiteral const DfmtSymbolic[]
static StringLiteral const * getNfmtLookupTable(const MCSubtargetInfo &STI)
bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI)
StringLiteral const NfmtSymbolicGFX10[]
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, const MCSubtargetInfo &STI)
StringRef getDfmtName(unsigned Id)
int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt)
int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI)
bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI)
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX11[]
StringLiteral const NfmtSymbolicVI[]
StringLiteral const NfmtSymbolicSICI[]
int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI)
int64_t getDfmt(const StringRef Name)
StringLiteral const UfmtSymbolicGFX10[]
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
constexpr unsigned VOPD_VGPR_BANK_MASKS[]
constexpr unsigned COMPONENTS_NUM
constexpr unsigned VOPD3_VGPR_BANK_MASKS[]
bool isPackedFP32Inst(unsigned Opc)
bool isGCN3Encoding(const MCSubtargetInfo &STI)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
bool isInlineValue(MCRegister Reg)
bool isGFX10_GFX11(const MCSubtargetInfo &STI)
bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType)
bool isPKFMACF16InlineConstant(uint32_t Literal, bool IsGFX11Plus)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Storecnt)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
bool isVOPCAsmOnly(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool getMTBUFHasSrsrc(unsigned Opc)
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool getWMMAIsXDL(unsigned Opc)
uint8_t wmmaScaleF8F6F4FormatToNumRegs(unsigned Fmt)
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isGFX10Before1030(const MCSubtargetInfo &STI)
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
const int OPR_ID_UNSUPPORTED
bool shouldEmitConstantsToTextSection(const Triple &TT)
bool isInlinableLiteralV2I16(uint32_t Literal)
bool isDPMACCInstruction(unsigned Opc)
int getMTBUFElements(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
unsigned getTemporalHintType(const MCInstrDesc TID)
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
bool isGFX10(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV216(bool IsFloat, uint32_t Literal)
FPType getFPDstSelType(unsigned Opc)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool hasA16(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
bool isGFX12Plus(const MCSubtargetInfo &STI)
unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler)
const MCRegisterClass * getVGPRPhysRegClass(MCRegister Reg, const MCRegisterInfo &MRI)
bool hasPackedD16(const MCSubtargetInfo &STI)
unsigned getStorecntBitMask(const IsaVersion &Version)
unsigned getLdsDwGranularity(const MCSubtargetInfo &ST)
bool isGFX940(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isHsaAbi(const MCSubtargetInfo &STI)
bool isGFX11(const MCSubtargetInfo &STI)
const int OPR_VAL_INVALID
bool getSMEMIsBuffer(unsigned Opc)
bool isGFX10_3_GFX11(const MCSubtargetInfo &STI)
bool isGFX13(const MCSubtargetInfo &STI)
bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val)
Checks if Val is inside MD, a !range-like metadata.
uint8_t mfmaScaleF8F6F4FormatToNumRegs(unsigned EncodingVal)
unsigned getVOPDOpcode(unsigned Opc, bool VOPD3)
bool isGroupSegment(const GlobalValue *GV)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
bool getMTBUFHasSoffset(unsigned Opc)
bool hasXNACK(const MCSubtargetInfo &STI)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
static unsigned getCombinedCountBitMask(const IsaVersion &Version, bool IsStore)
CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
bool isVOPC64DPP(unsigned Opc)
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool getMAIIsGFX940XDL(unsigned Opc)
bool isSI(const MCSubtargetInfo &STI)
unsigned getDefaultAMDHSACodeObjectVersion()
bool isReadOnlySegment(const GlobalValue *GV)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
int getMUBUFBaseOpcode(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getWaitcntBitMask(const IsaVersion &Version)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool isGFX9(const MCSubtargetInfo &STI)
bool isDPALU_DPP32BitOpc(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
static bool isDwordAligned(uint64_t ByteOffset)
unsigned getVOPDEncodingFamily(const MCSubtargetInfo &ST)
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool getHasColorExport(const Function &F)
int getMTBUFBaseOpcode(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
unsigned getSamplecntBitMask(const IsaVersion &Version)
unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion)
std::tuple< char, unsigned, unsigned > parseAsmPhysRegName(StringRef RegName)
Returns a valid charcode or 0 in the first entry if this is a valid physical register name.
bool hasSRAMECC(const MCSubtargetInfo &STI)
bool getHasDepthExport(const Function &F)
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
bool getMUBUFHasVAddr(unsigned Opc)
bool isTrue16Inst(unsigned Opc)
unsigned getVGPREncodingMSBs(MCRegister Reg, const MCRegisterInfo &MRI)
std::pair< unsigned, unsigned > getVOPDComponents(unsigned VOPDOpcode)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getInitialPSInputAddr(const Function &F)
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
bool isAsyncStore(unsigned Opc)
unsigned getDynamicVGPRBlockSize(const Function &F)
unsigned getKmcntBitMask(const IsaVersion &Version)
MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
unsigned getVmcntBitMask(const IsaVersion &Version)
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
bool hasMAIInsts(const MCSubtargetInfo &STI)
unsigned getBitOp2(unsigned Opc)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
unsigned getXcntBitMask(const IsaVersion &Version)
bool isGenericAtomic(unsigned Opc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
bool isGFX8Plus(const MCSubtargetInfo &STI)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
bool getMUBUFTfe(unsigned Opc)
unsigned getBvhcntBitMask(const IsaVersion &Version)
bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
bool hasMIMG_R128(const MCSubtargetInfo &STI)
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool isGFX13Plus(const MCSubtargetInfo &STI)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
bool getMUBUFHasSoffset(unsigned Opc)
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
std::tuple< char, unsigned, unsigned > parseAsmConstraintPhysReg(StringRef Constraint)
Returns a valid charcode or 0 in the first entry if this is a valid physical register constraint.
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion)
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Loadcnt)
bool isGFX10Plus(const MCSubtargetInfo &STI)
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
static bool isValidRegPrefix(char C)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer, bool HasSOffset)
bool isGlobalSegment(const GlobalValue *GV)
int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition SIDefines.h:233
@ OPERAND_REG_INLINE_C_LAST
Definition SIDefines.h:256
@ OPERAND_REG_IMM_V2FP16
Definition SIDefines.h:210
@ OPERAND_REG_INLINE_C_FP64
Definition SIDefines.h:224
@ OPERAND_REG_INLINE_C_BF16
Definition SIDefines.h:221
@ OPERAND_REG_INLINE_C_V2BF16
Definition SIDefines.h:226
@ OPERAND_REG_IMM_V2INT16
Definition SIDefines.h:212
@ OPERAND_REG_IMM_BF16
Definition SIDefines.h:207
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
Definition SIDefines.h:202
@ OPERAND_REG_IMM_V2BF16
Definition SIDefines.h:209
@ OPERAND_REG_INLINE_AC_FIRST
Definition SIDefines.h:258
@ OPERAND_REG_IMM_FP16
Definition SIDefines.h:208
@ OPERAND_REG_IMM_V2FP16_SPLAT
Definition SIDefines.h:211
@ OPERAND_REG_IMM_NOINLINE_V2FP16
Definition SIDefines.h:213
@ OPERAND_REG_IMM_FP64
Definition SIDefines.h:206
@ OPERAND_REG_INLINE_C_V2FP16
Definition SIDefines.h:227
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
Definition SIDefines.h:238
@ OPERAND_REG_INLINE_AC_FP32
Definition SIDefines.h:239
@ OPERAND_REG_IMM_V2INT32
Definition SIDefines.h:214
@ OPERAND_REG_IMM_FP32
Definition SIDefines.h:205
@ OPERAND_REG_INLINE_C_FIRST
Definition SIDefines.h:255
@ OPERAND_REG_INLINE_C_FP32
Definition SIDefines.h:223
@ OPERAND_REG_INLINE_AC_LAST
Definition SIDefines.h:259
@ OPERAND_REG_INLINE_C_INT32
Definition SIDefines.h:219
@ OPERAND_REG_INLINE_C_V2INT16
Definition SIDefines.h:225
@ OPERAND_REG_IMM_V2FP32
Definition SIDefines.h:215
@ OPERAND_REG_INLINE_AC_FP64
Definition SIDefines.h:240
@ OPERAND_REG_INLINE_C_FP16
Definition SIDefines.h:222
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition SIDefines.h:230
std::optional< unsigned > getPKFMACF16InlineEncoding(uint32_t Literal, bool IsGFX11Plus)
raw_ostream & operator<<(raw_ostream &OS, const AMDGPU::Waitcnt &Wait)
void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &KernelCode, const MCSubtargetInfo *STI)
bool isNotGFX9Plus(const MCSubtargetInfo &STI)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool hasGDS(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI)
const int OPR_ID_DUPLICATE
bool isVOPD(unsigned Opc)
VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool isGFX1250(const MCSubtargetInfo &STI)
int getMCOpcode(uint16_t Opcode, unsigned Gen)
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
bool isVI(const MCSubtargetInfo &STI)
bool isTensorStore(unsigned Opc)
bool getMUBUFIsBufferInv(unsigned Opc)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool supportsWGP(const MCSubtargetInfo &STI)
bool isMAC(unsigned Opc)
bool isCI(const MCSubtargetInfo &STI)
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
bool getVOP2IsSingle(unsigned Opc)
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
const int OPR_ID_UNKNOWN
unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion)
SmallVector< unsigned > getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size, unsigned DefaultVal)
bool isGFX1250Plus(const MCSubtargetInfo &STI)
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
bool isNotGFX12Plus(const MCSubtargetInfo &STI)
bool getMTBUFHasVAddr(unsigned Opc)
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
unsigned getLoadcntBitMask(const IsaVersion &Version)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
bool hasVOPD(const MCSubtargetInfo &STI)
int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily, bool VOPD3)
static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Dscnt)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion)
bool isGFX9_GFX10_GFX11(const MCSubtargetInfo &STI)
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
int getMUBUFElements(unsigned Opc)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
bool isPermlane16(unsigned Opc)
bool getMUBUFHasSrsrc(unsigned Opc)
unsigned getDscntBitMask(const IsaVersion &Version)
bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ ELFABIVERSION_AMDGPU_HSA_V4
Definition ELF.h:384
@ ELFABIVERSION_AMDGPU_HSA_V5
Definition ELF.h:385
@ ELFABIVERSION_AMDGPU_HSA_V6
Definition ELF.h:386
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:683
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
@ Wait
Definition Threading.h:60
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:546
std::string utostr(uint64_t X, bool isNeg=false)
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
Definition STLExtras.h:2163
Op::Description Desc
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition MathExtras.h:150
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition MathExtras.h:155
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
To bit_cast(const From &from) noexcept
Definition bit.h:90
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
@ AlwaysUniform
The result values are always uniform.
Definition Uniformity.h:23
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
#define N
AMD Kernel Code Object (amd_kernel_code_t).
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.