LLVM 22.0.0git
AMDGPUBaseInfo.cpp
Go to the documentation of this file.
1//===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AMDGPUBaseInfo.h"
10#include "AMDGPU.h"
11#include "AMDGPUAsmUtils.h"
12#include "AMDKernelCodeT.h"
17#include "llvm/IR/Attributes.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Function.h"
20#include "llvm/IR/GlobalValue.h"
21#include "llvm/IR/IntrinsicsAMDGPU.h"
22#include "llvm/IR/IntrinsicsR600.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/Metadata.h"
25#include "llvm/MC/MCInstrInfo.h"
30#include <optional>
31
32#define GET_INSTRINFO_NAMED_OPS
33#define GET_INSTRMAP_INFO
34#include "AMDGPUGenInstrInfo.inc"
35
37 "amdhsa-code-object-version", llvm::cl::Hidden,
39 llvm::cl::desc("Set default AMDHSA Code Object Version (module flag "
40 "or asm directive still take priority if present)"));
41
42namespace {
43
44/// \returns Bit mask for given bit \p Shift and bit \p Width.
45unsigned getBitMask(unsigned Shift, unsigned Width) {
46 return ((1 << Width) - 1) << Shift;
47}
48
49/// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
50///
51/// \returns Packed \p Dst.
52unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
53 unsigned Mask = getBitMask(Shift, Width);
54 return ((Src << Shift) & Mask) | (Dst & ~Mask);
55}
56
57/// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
58///
59/// \returns Unpacked bits.
60unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
61 return (Src & getBitMask(Shift, Width)) >> Shift;
62}
63
64/// \returns Vmcnt bit shift (lower bits).
65unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
66 return VersionMajor >= 11 ? 10 : 0;
67}
68
69/// \returns Vmcnt bit width (lower bits).
70unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
71 return VersionMajor >= 11 ? 6 : 4;
72}
73
74/// \returns Expcnt bit shift.
75unsigned getExpcntBitShift(unsigned VersionMajor) {
76 return VersionMajor >= 11 ? 0 : 4;
77}
78
79/// \returns Expcnt bit width.
80unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
81
82/// \returns Lgkmcnt bit shift.
83unsigned getLgkmcntBitShift(unsigned VersionMajor) {
84 return VersionMajor >= 11 ? 4 : 8;
85}
86
87/// \returns Lgkmcnt bit width.
88unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
89 return VersionMajor >= 10 ? 6 : 4;
90}
91
92/// \returns Vmcnt bit shift (higher bits).
93unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
94
95/// \returns Vmcnt bit width (higher bits).
96unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
97 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
98}
99
100/// \returns Loadcnt bit width
101unsigned getLoadcntBitWidth(unsigned VersionMajor) {
102 return VersionMajor >= 12 ? 6 : 0;
103}
104
105/// \returns Samplecnt bit width.
106unsigned getSamplecntBitWidth(unsigned VersionMajor) {
107 return VersionMajor >= 12 ? 6 : 0;
108}
109
110/// \returns Bvhcnt bit width.
111unsigned getBvhcntBitWidth(unsigned VersionMajor) {
112 return VersionMajor >= 12 ? 3 : 0;
113}
114
115/// \returns Dscnt bit width.
116unsigned getDscntBitWidth(unsigned VersionMajor) {
117 return VersionMajor >= 12 ? 6 : 0;
118}
119
120/// \returns Dscnt bit shift in combined S_WAIT instructions.
121unsigned getDscntBitShift(unsigned VersionMajor) { return 0; }
122
123/// \returns Storecnt or Vscnt bit width, depending on VersionMajor.
124unsigned getStorecntBitWidth(unsigned VersionMajor) {
125 return VersionMajor >= 10 ? 6 : 0;
126}
127
128/// \returns Kmcnt bit width.
129unsigned getKmcntBitWidth(unsigned VersionMajor) {
130 return VersionMajor >= 12 ? 5 : 0;
131}
132
133/// \returns Xcnt bit width.
134unsigned getXcntBitWidth(unsigned VersionMajor, unsigned VersionMinor) {
135 return VersionMajor == 12 && VersionMinor == 5 ? 6 : 0;
136}
137
138/// \returns shift for Loadcnt/Storecnt in combined S_WAIT instructions.
139unsigned getLoadcntStorecntBitShift(unsigned VersionMajor) {
140 return VersionMajor >= 12 ? 8 : 0;
141}
142
143/// \returns VaSdst bit width
144inline unsigned getVaSdstBitWidth() { return 3; }
145
146/// \returns VaSdst bit shift
147inline unsigned getVaSdstBitShift() { return 9; }
148
149/// \returns VmVsrc bit width
150inline unsigned getVmVsrcBitWidth() { return 3; }
151
152/// \returns VmVsrc bit shift
153inline unsigned getVmVsrcBitShift() { return 2; }
154
155/// \returns VaVdst bit width
156inline unsigned getVaVdstBitWidth() { return 4; }
157
158/// \returns VaVdst bit shift
159inline unsigned getVaVdstBitShift() { return 12; }
160
161/// \returns VaVcc bit width
162inline unsigned getVaVccBitWidth() { return 1; }
163
164/// \returns VaVcc bit shift
165inline unsigned getVaVccBitShift() { return 1; }
166
167/// \returns SaSdst bit width
168inline unsigned getSaSdstBitWidth() { return 1; }
169
170/// \returns SaSdst bit shift
171inline unsigned getSaSdstBitShift() { return 0; }
172
173/// \returns VaSsrc width
174inline unsigned getVaSsrcBitWidth() { return 1; }
175
176/// \returns VaSsrc bit shift
177inline unsigned getVaSsrcBitShift() { return 8; }
178
179/// \returns HoldCnt bit shift
180inline unsigned getHoldCntWidth() { return 1; }
181
182/// \returns HoldCnt bit shift
183inline unsigned getHoldCntBitShift() { return 7; }
184
185} // end anonymous namespace
186
187namespace llvm {
188
189namespace AMDGPU {
190
191/// \returns true if the target supports signed immediate offset for SMRD
192/// instructions.
194 return isGFX9Plus(ST);
195}
196
197/// \returns True if \p STI is AMDHSA.
198bool isHsaAbi(const MCSubtargetInfo &STI) {
199 return STI.getTargetTriple().getOS() == Triple::AMDHSA;
200}
201
204 M.getModuleFlag("amdhsa_code_object_version"))) {
205 return (unsigned)Ver->getZExtValue() / 100;
206 }
207
209}
210
214
215unsigned getAMDHSACodeObjectVersion(unsigned ABIVersion) {
216 switch (ABIVersion) {
218 return 4;
220 return 5;
222 return 6;
223 default:
225 }
226}
227
228uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion) {
229 if (T.getOS() != Triple::AMDHSA)
230 return 0;
231
232 switch (CodeObjectVersion) {
233 case 4:
235 case 5:
237 case 6:
239 default:
240 report_fatal_error("Unsupported AMDHSA Code Object Version " +
241 Twine(CodeObjectVersion));
242 }
243}
244
245unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion) {
246 switch (CodeObjectVersion) {
247 case AMDHSA_COV4:
248 return 48;
249 case AMDHSA_COV5:
250 case AMDHSA_COV6:
251 default:
253 }
254}
255
256// FIXME: All such magic numbers about the ABI should be in a
257// central TD file.
258unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion) {
259 switch (CodeObjectVersion) {
260 case AMDHSA_COV4:
261 return 24;
262 case AMDHSA_COV5:
263 case AMDHSA_COV6:
264 default:
266 }
267}
268
269unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion) {
270 switch (CodeObjectVersion) {
271 case AMDHSA_COV4:
272 return 32;
273 case AMDHSA_COV5:
274 case AMDHSA_COV6:
275 default:
277 }
278}
279
280unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
281 switch (CodeObjectVersion) {
282 case AMDHSA_COV4:
283 return 40;
284 case AMDHSA_COV5:
285 case AMDHSA_COV6:
286 default:
288 }
289}
290
291#define GET_MIMGBaseOpcodesTable_IMPL
292#define GET_MIMGDimInfoTable_IMPL
293#define GET_MIMGInfoTable_IMPL
294#define GET_MIMGLZMappingTable_IMPL
295#define GET_MIMGMIPMappingTable_IMPL
296#define GET_MIMGBiasMappingTable_IMPL
297#define GET_MIMGOffsetMappingTable_IMPL
298#define GET_MIMGG16MappingTable_IMPL
299#define GET_MAIInstInfoTable_IMPL
300#define GET_WMMAInstInfoTable_IMPL
301#include "AMDGPUGenSearchableTables.inc"
302
303int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
304 unsigned VDataDwords, unsigned VAddrDwords) {
305 const MIMGInfo *Info =
306 getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
307 return Info ? Info->Opcode : -1;
308}
309
311 const MIMGInfo *Info = getMIMGInfo(Opc);
312 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
313}
314
315int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
316 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
317 const MIMGInfo *NewInfo =
318 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
319 NewChannels, OrigInfo->VAddrDwords);
320 return NewInfo ? NewInfo->Opcode : -1;
321}
322
323unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
324 const MIMGDimInfo *Dim, bool IsA16,
325 bool IsG16Supported) {
326 unsigned AddrWords = BaseOpcode->NumExtraArgs;
327 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
328 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
329 if (IsA16)
330 AddrWords += divideCeil(AddrComponents, 2);
331 else
332 AddrWords += AddrComponents;
333
334 // Note: For subtargets that support A16 but not G16, enabling A16 also
335 // enables 16 bit gradients.
336 // For subtargets that support A16 (operand) and G16 (done with a different
337 // instruction encoding), they are independent.
338
339 if (BaseOpcode->Gradients) {
340 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
341 // There are two gradients per coordinate, we pack them separately.
342 // For the 3d case,
343 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
344 AddrWords += alignTo<2>(Dim->NumGradients / 2);
345 else
346 AddrWords += Dim->NumGradients;
347 }
348 return AddrWords;
349}
350
361
370
375
380
384
388
392
399
407
412
413#define GET_FP4FP8DstByteSelTable_DECL
414#define GET_FP4FP8DstByteSelTable_IMPL
415
420
426
427#define GET_MTBUFInfoTable_DECL
428#define GET_MTBUFInfoTable_IMPL
429#define GET_MUBUFInfoTable_DECL
430#define GET_MUBUFInfoTable_IMPL
431#define GET_SMInfoTable_DECL
432#define GET_SMInfoTable_IMPL
433#define GET_VOP1InfoTable_DECL
434#define GET_VOP1InfoTable_IMPL
435#define GET_VOP2InfoTable_DECL
436#define GET_VOP2InfoTable_IMPL
437#define GET_VOP3InfoTable_DECL
438#define GET_VOP3InfoTable_IMPL
439#define GET_VOPC64DPPTable_DECL
440#define GET_VOPC64DPPTable_IMPL
441#define GET_VOPC64DPP8Table_DECL
442#define GET_VOPC64DPP8Table_IMPL
443#define GET_VOPCAsmOnlyInfoTable_DECL
444#define GET_VOPCAsmOnlyInfoTable_IMPL
445#define GET_VOP3CAsmOnlyInfoTable_DECL
446#define GET_VOP3CAsmOnlyInfoTable_IMPL
447#define GET_VOPDComponentTable_DECL
448#define GET_VOPDComponentTable_IMPL
449#define GET_VOPDPairs_DECL
450#define GET_VOPDPairs_IMPL
451#define GET_VOPTrue16Table_DECL
452#define GET_VOPTrue16Table_IMPL
453#define GET_True16D16Table_IMPL
454#define GET_WMMAOpcode2AddrMappingTable_DECL
455#define GET_WMMAOpcode2AddrMappingTable_IMPL
456#define GET_WMMAOpcode3AddrMappingTable_DECL
457#define GET_WMMAOpcode3AddrMappingTable_IMPL
458#define GET_getMFMA_F8F6F4_WithSize_DECL
459#define GET_getMFMA_F8F6F4_WithSize_IMPL
460#define GET_isMFMA_F8F6F4Table_IMPL
461#define GET_isCvtScaleF32_F32F16ToF8F4Table_IMPL
462
463#include "AMDGPUGenSearchableTables.inc"
464
465int getMTBUFBaseOpcode(unsigned Opc) {
466 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
467 return Info ? Info->BaseOpcode : -1;
468}
469
470int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
471 const MTBUFInfo *Info =
472 getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
473 return Info ? Info->Opcode : -1;
474}
475
476int getMTBUFElements(unsigned Opc) {
477 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
478 return Info ? Info->elements : 0;
479}
480
481bool getMTBUFHasVAddr(unsigned Opc) {
482 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
483 return Info && Info->has_vaddr;
484}
485
486bool getMTBUFHasSrsrc(unsigned Opc) {
487 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
488 return Info && Info->has_srsrc;
489}
490
491bool getMTBUFHasSoffset(unsigned Opc) {
492 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
493 return Info && Info->has_soffset;
494}
495
496int getMUBUFBaseOpcode(unsigned Opc) {
497 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
498 return Info ? Info->BaseOpcode : -1;
499}
500
501int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
502 const MUBUFInfo *Info =
503 getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
504 return Info ? Info->Opcode : -1;
505}
506
507int getMUBUFElements(unsigned Opc) {
508 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
509 return Info ? Info->elements : 0;
510}
511
512bool getMUBUFHasVAddr(unsigned Opc) {
513 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
514 return Info && Info->has_vaddr;
515}
516
517bool getMUBUFHasSrsrc(unsigned Opc) {
518 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
519 return Info && Info->has_srsrc;
520}
521
522bool getMUBUFHasSoffset(unsigned Opc) {
523 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
524 return Info && Info->has_soffset;
525}
526
527bool getMUBUFIsBufferInv(unsigned Opc) {
528 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
529 return Info && Info->IsBufferInv;
530}
531
532bool getMUBUFTfe(unsigned Opc) {
533 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
534 return Info && Info->tfe;
535}
536
537bool getSMEMIsBuffer(unsigned Opc) {
538 const SMInfo *Info = getSMEMOpcodeHelper(Opc);
539 return Info && Info->IsBuffer;
540}
541
542bool getVOP1IsSingle(unsigned Opc) {
543 const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
544 return !Info || Info->IsSingle;
545}
546
547bool getVOP2IsSingle(unsigned Opc) {
548 const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
549 return !Info || Info->IsSingle;
550}
551
552bool getVOP3IsSingle(unsigned Opc) {
553 const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
554 return !Info || Info->IsSingle;
555}
556
557bool isVOPC64DPP(unsigned Opc) {
558 return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
559}
560
561bool isVOPCAsmOnly(unsigned Opc) { return isVOPCAsmOnlyOpcodeHelper(Opc); }
562
563bool getMAIIsDGEMM(unsigned Opc) {
564 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
565 return Info && Info->is_dgemm;
566}
567
568bool getMAIIsGFX940XDL(unsigned Opc) {
569 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
570 return Info && Info->is_gfx940_xdl;
571}
572
573bool getWMMAIsXDL(unsigned Opc) {
574 const WMMAInstInfo *Info = getWMMAInstInfoHelper(Opc);
575 return Info ? Info->is_wmma_xdl : false;
576}
577
579 switch (EncodingVal) {
582 return 6;
584 return 4;
587 default:
588 return 8;
589 }
590
591 llvm_unreachable("covered switch over mfma scale formats");
592}
593
595 unsigned BLGP,
596 unsigned F8F8Opcode) {
597 uint8_t SrcANumRegs = mfmaScaleF8F6F4FormatToNumRegs(CBSZ);
598 uint8_t SrcBNumRegs = mfmaScaleF8F6F4FormatToNumRegs(BLGP);
599 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
600}
601
603 switch (Fmt) {
606 return 16;
609 return 12;
611 return 8;
612 }
613
614 llvm_unreachable("covered switch over wmma scale formats");
615}
616
618 unsigned FmtB,
619 unsigned F8F8Opcode) {
620 uint8_t SrcANumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtA);
621 uint8_t SrcBNumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtB);
622 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
623}
624
626 if (ST.hasFeature(AMDGPU::FeatureGFX1250Insts))
628 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts))
630 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts))
632 llvm_unreachable("Subtarget generation does not support VOPD!");
633}
634
635CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3) {
636 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
637 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
638 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
639 if (Info) {
640 // Check that Opc can be used as VOPDY for this encoding. V_MOV_B32 as a
641 // VOPDX is just a placeholder here, it is supported on all encodings.
642 // TODO: This can be optimized by creating tables of supported VOPDY
643 // opcodes per encoding.
644 unsigned VOPDMov = AMDGPU::getVOPDOpcode(AMDGPU::V_MOV_B32_e32, VOPD3);
645 bool CanBeVOPDY = getVOPDFull(VOPDMov, AMDGPU::getVOPDOpcode(Opc, VOPD3),
646 EncodingFamily, VOPD3) != -1;
647 return {VOPD3 ? Info->CanBeVOPD3X : Info->CanBeVOPDX, CanBeVOPDY};
648 }
649
650 return {false, false};
651}
652
653unsigned getVOPDOpcode(unsigned Opc, bool VOPD3) {
654 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
655 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
656 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
657 return Info ? Info->VOPDOp : ~0u;
658}
659
660bool isVOPD(unsigned Opc) {
661 return AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0X);
662}
663
664bool isMAC(unsigned Opc) {
665 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
666 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
667 Opc == AMDGPU::V_MAC_F32_e64_vi ||
668 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
669 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
670 Opc == AMDGPU::V_MAC_F16_e64_vi ||
671 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
672 Opc == AMDGPU::V_FMAC_F64_e64_gfx12 ||
673 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
674 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
675 Opc == AMDGPU::V_FMAC_F32_e64_gfx12 ||
676 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
677 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
678 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
679 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
680 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
681 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx11 ||
682 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx12 ||
683 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx12 ||
684 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
685 Opc == AMDGPU::V_DOT2C_F32_BF16_e64_vi ||
686 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
687 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
688 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
689}
690
691bool isPermlane16(unsigned Opc) {
692 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
693 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
694 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
695 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11 ||
696 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx12 ||
697 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx12 ||
698 Opc == AMDGPU::V_PERMLANE16_VAR_B32_e64_gfx12 ||
699 Opc == AMDGPU::V_PERMLANEX16_VAR_B32_e64_gfx12;
700}
701
703 return Opc == AMDGPU::V_CVT_F32_BF8_e64_gfx12 ||
704 Opc == AMDGPU::V_CVT_F32_FP8_e64_gfx12 ||
705 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp_gfx12 ||
706 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp_gfx12 ||
707 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp8_gfx12 ||
708 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp8_gfx12 ||
709 Opc == AMDGPU::V_CVT_PK_F32_BF8_fake16_e64_gfx12 ||
710 Opc == AMDGPU::V_CVT_PK_F32_FP8_fake16_e64_gfx12 ||
711 Opc == AMDGPU::V_CVT_PK_F32_BF8_t16_e64_gfx12 ||
712 Opc == AMDGPU::V_CVT_PK_F32_FP8_t16_e64_gfx12;
713}
714
715bool isGenericAtomic(unsigned Opc) {
716 return Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP ||
717 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD ||
718 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB ||
719 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN ||
720 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN ||
721 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX ||
722 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX ||
723 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND ||
724 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR ||
725 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR ||
726 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC ||
727 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC ||
728 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD ||
729 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
730 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
731 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
732 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32 ||
733 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32 ||
734 Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
735}
736
737bool isAsyncStore(unsigned Opc) {
738 return Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_gfx1250 ||
739 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_gfx1250 ||
740 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_gfx1250 ||
741 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_gfx1250 ||
742 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_SADDR_gfx1250 ||
743 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_SADDR_gfx1250 ||
744 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_SADDR_gfx1250 ||
745 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_SADDR_gfx1250;
746}
747
748bool isTensorStore(unsigned Opc) {
749 return Opc == TENSOR_STORE_FROM_LDS_gfx1250 ||
750 Opc == TENSOR_STORE_FROM_LDS_D2_gfx1250;
751}
752
753unsigned getTemporalHintType(const MCInstrDesc TID) {
756 unsigned Opc = TID.getOpcode();
757 // Async and Tensor store should have the temporal hint type of TH_TYPE_STORE
758 if (TID.mayStore() &&
759 (isAsyncStore(Opc) || isTensorStore(Opc) || !TID.mayLoad()))
760 return CPol::TH_TYPE_STORE;
761
762 // This will default to returning TH_TYPE_LOAD when neither MayStore nor
763 // MayLoad flag is present which is the case with instructions like
764 // image_get_resinfo.
765 return CPol::TH_TYPE_LOAD;
766}
767
768bool isTrue16Inst(unsigned Opc) {
769 const VOPTrue16Info *Info = getTrue16OpcodeHelper(Opc);
770 return Info && Info->IsTrue16;
771}
772
774 const FP4FP8DstByteSelInfo *Info = getFP4FP8DstByteSelHelper(Opc);
775 if (!Info)
776 return FPType::None;
777 if (Info->HasFP8DstByteSel)
778 return FPType::FP8;
779 if (Info->HasFP4DstByteSel)
780 return FPType::FP4;
781
782 return FPType::None;
783}
784
785unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
786 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
787 return Info ? Info->Opcode3Addr : ~0u;
788}
789
790unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc) {
791 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom3AddrOpcode(Opc);
792 return Info ? Info->Opcode2Addr : ~0u;
793}
794
795// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
796// header files, so we need to wrap it in a function that takes unsigned
797// instead.
798int getMCOpcode(uint16_t Opcode, unsigned Gen) {
799 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
800}
801
802unsigned getBitOp2(unsigned Opc) {
803 switch (Opc) {
804 default:
805 return 0;
806 case AMDGPU::V_AND_B32_e32:
807 return 0x40;
808 case AMDGPU::V_OR_B32_e32:
809 return 0x54;
810 case AMDGPU::V_XOR_B32_e32:
811 return 0x14;
812 case AMDGPU::V_XNOR_B32_e32:
813 return 0x41;
814 }
815}
816
817int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily,
818 bool VOPD3) {
819 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(OpY) : 0;
820 OpY = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : OpY;
821 const VOPDInfo *Info =
822 getVOPDInfoFromComponentOpcodes(OpX, OpY, EncodingFamily, VOPD3);
823 return Info ? Info->Opcode : -1;
824}
825
826std::pair<unsigned, unsigned> getVOPDComponents(unsigned VOPDOpcode) {
827 const VOPDInfo *Info = getVOPDOpcodeHelper(VOPDOpcode);
828 assert(Info);
829 const auto *OpX = getVOPDBaseFromComponent(Info->OpX);
830 const auto *OpY = getVOPDBaseFromComponent(Info->OpY);
831 assert(OpX && OpY);
832 return {OpX->BaseVOP, OpY->BaseVOP};
833}
834
835namespace VOPD {
836
837ComponentProps::ComponentProps(const MCInstrDesc &OpDesc, bool VOP3Layout) {
839
842 auto TiedIdx = OpDesc.getOperandConstraint(Component::SRC2, MCOI::TIED_TO);
843 assert(TiedIdx == -1 || TiedIdx == Component::DST);
844 HasSrc2Acc = TiedIdx != -1;
845 Opcode = OpDesc.getOpcode();
846
847 IsVOP3 = VOP3Layout || (OpDesc.TSFlags & SIInstrFlags::VOP3);
848 SrcOperandsNum = AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2) ? 3
849 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::imm) ? 3
850 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1) ? 2
851 : 1;
852 assert(SrcOperandsNum <= Component::MAX_SRC_NUM);
853
854 if (Opcode == AMDGPU::V_CNDMASK_B32_e32 ||
855 Opcode == AMDGPU::V_CNDMASK_B32_e64) {
856 // CNDMASK is an awkward exception, it has FP modifiers, but not FP
857 // operands.
858 NumVOPD3Mods = 2;
859 if (IsVOP3)
860 SrcOperandsNum = 3;
861 } else if (isSISrcFPOperand(OpDesc,
862 getNamedOperandIdx(Opcode, OpName::src0))) {
863 // All FP VOPD instructions have Neg modifiers for all operands except
864 // for tied src2.
865 NumVOPD3Mods = SrcOperandsNum;
866 if (HasSrc2Acc)
867 --NumVOPD3Mods;
868 }
869
870 if (OpDesc.TSFlags & SIInstrFlags::VOP3)
871 return;
872
873 auto OperandsNum = OpDesc.getNumOperands();
874 unsigned CompOprIdx;
875 for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
876 if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
877 MandatoryLiteralIdx = CompOprIdx;
878 break;
879 }
880 }
881}
882
884 return getNamedOperandIdx(Opcode, OpName::bitop3);
885}
886
887unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
888 assert(CompOprIdx < Component::MAX_OPR_NUM);
889
890 if (CompOprIdx == Component::DST)
892
893 auto CompSrcIdx = CompOprIdx - Component::DST_NUM;
894 if (CompSrcIdx < getCompParsedSrcOperandsNum())
895 return getIndexOfSrcInParsedOperands(CompSrcIdx);
896
897 // The specified operand does not exist.
898 return 0;
899}
900
902 std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
903 const MCRegisterInfo &MRI, bool SkipSrc, bool AllowSameVGPR,
904 bool VOPD3) const {
905
906 auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx,
907 CompInfo[ComponentIndex::X].isVOP3());
908 auto OpYRegs = getRegIndices(ComponentIndex::Y, GetRegIdx,
909 CompInfo[ComponentIndex::Y].isVOP3());
910
911 const auto banksOverlap = [&MRI](MCRegister X, MCRegister Y,
912 unsigned BanksMask) -> bool {
913 MCRegister BaseX = MRI.getSubReg(X, AMDGPU::sub0);
914 MCRegister BaseY = MRI.getSubReg(Y, AMDGPU::sub0);
915 if (!BaseX)
916 BaseX = X;
917 if (!BaseY)
918 BaseY = Y;
919 if ((BaseX.id() & BanksMask) == (BaseY.id() & BanksMask))
920 return true;
921 if (BaseX != X /* This is 64-bit register */ &&
922 ((BaseX.id() + 1) & BanksMask) == (BaseY.id() & BanksMask))
923 return true;
924 if (BaseY != Y &&
925 (BaseX.id() & BanksMask) == ((BaseY.id() + 1) & BanksMask))
926 return true;
927
928 // If both are 64-bit bank conflict will be detected yet while checking
929 // the first subreg.
930 return false;
931 };
932
933 unsigned CompOprIdx;
934 for (CompOprIdx = 0; CompOprIdx < Component::MAX_OPR_NUM; ++CompOprIdx) {
935 unsigned BanksMasks = VOPD3 ? VOPD3_VGPR_BANK_MASKS[CompOprIdx]
936 : VOPD_VGPR_BANK_MASKS[CompOprIdx];
937 if (!OpXRegs[CompOprIdx] || !OpYRegs[CompOprIdx])
938 continue;
939
940 if (getVGPREncodingMSBs(OpXRegs[CompOprIdx], MRI) !=
941 getVGPREncodingMSBs(OpYRegs[CompOprIdx], MRI))
942 return CompOprIdx;
943
944 if (SkipSrc && CompOprIdx >= Component::DST_NUM)
945 continue;
946
947 if (CompOprIdx < Component::DST_NUM) {
948 // Even if we do not check vdst parity, vdst operands still shall not
949 // overlap.
950 if (MRI.regsOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx]))
951 return CompOprIdx;
952 if (VOPD3) // No need to check dst parity.
953 continue;
954 }
955
956 if (banksOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx], BanksMasks) &&
957 (!AllowSameVGPR || CompOprIdx < Component::DST_NUM ||
958 OpXRegs[CompOprIdx] != OpYRegs[CompOprIdx]))
959 return CompOprIdx;
960 }
961
962 return {};
963}
964
965// Return an array of VGPR registers [DST,SRC0,SRC1,SRC2] used
966// by the specified component. If an operand is unused
967// or is not a VGPR, the corresponding value is 0.
968//
969// GetRegIdx(Component, MCOperandIdx) must return a VGPR register index
970// for the specified component and MC operand. The callback must return 0
971// if the operand is not a register or not a VGPR.
973InstInfo::getRegIndices(unsigned CompIdx,
974 std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
975 bool VOPD3) const {
976 assert(CompIdx < COMPONENTS_NUM);
977
978 const auto &Comp = CompInfo[CompIdx];
980
981 RegIndices[DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
982
983 for (unsigned CompOprIdx : {SRC0, SRC1, SRC2}) {
984 unsigned CompSrcIdx = CompOprIdx - DST_NUM;
985 RegIndices[CompOprIdx] =
986 Comp.hasRegSrcOperand(CompSrcIdx)
987 ? GetRegIdx(CompIdx,
988 Comp.getIndexOfSrcInMCOperands(CompSrcIdx, VOPD3))
989 : MCRegister();
990 }
991 return RegIndices;
992}
993
994} // namespace VOPD
995
997 return VOPD::InstInfo(OpX, OpY);
998}
999
1001 const MCInstrInfo *InstrInfo) {
1002 auto [OpX, OpY] = getVOPDComponents(VOPDOpcode);
1003 const auto &OpXDesc = InstrInfo->get(OpX);
1004 const auto &OpYDesc = InstrInfo->get(OpY);
1005 bool VOPD3 = InstrInfo->get(VOPDOpcode).TSFlags & SIInstrFlags::VOPD3;
1007 VOPD::ComponentInfo OpYInfo(OpYDesc, OpXInfo, VOPD3);
1008 return VOPD::InstInfo(OpXInfo, OpYInfo);
1009}
1010
1011namespace IsaInfo {
1012
1014 : STI(STI), XnackSetting(TargetIDSetting::Any),
1015 SramEccSetting(TargetIDSetting::Any) {
1016 if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
1017 XnackSetting = TargetIDSetting::Unsupported;
1018 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
1019 SramEccSetting = TargetIDSetting::Unsupported;
1020}
1021
1023 // Check if xnack or sramecc is explicitly enabled or disabled. In the
1024 // absence of the target features we assume we must generate code that can run
1025 // in any environment.
1026 SubtargetFeatures Features(FS);
1027 std::optional<bool> XnackRequested;
1028 std::optional<bool> SramEccRequested;
1029
1030 for (const std::string &Feature : Features.getFeatures()) {
1031 if (Feature == "+xnack")
1032 XnackRequested = true;
1033 else if (Feature == "-xnack")
1034 XnackRequested = false;
1035 else if (Feature == "+sramecc")
1036 SramEccRequested = true;
1037 else if (Feature == "-sramecc")
1038 SramEccRequested = false;
1039 }
1040
1041 bool XnackSupported = isXnackSupported();
1042 bool SramEccSupported = isSramEccSupported();
1043
1044 if (XnackRequested) {
1045 if (XnackSupported) {
1046 XnackSetting =
1047 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1048 } else {
1049 // If a specific xnack setting was requested and this GPU does not support
1050 // xnack emit a warning. Setting will remain set to "Unsupported".
1051 if (*XnackRequested) {
1052 errs() << "warning: xnack 'On' was requested for a processor that does "
1053 "not support it!\n";
1054 } else {
1055 errs() << "warning: xnack 'Off' was requested for a processor that "
1056 "does not support it!\n";
1057 }
1058 }
1059 }
1060
1061 if (SramEccRequested) {
1062 if (SramEccSupported) {
1063 SramEccSetting =
1064 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1065 } else {
1066 // If a specific sramecc setting was requested and this GPU does not
1067 // support sramecc emit a warning. Setting will remain set to
1068 // "Unsupported".
1069 if (*SramEccRequested) {
1070 errs() << "warning: sramecc 'On' was requested for a processor that "
1071 "does not support it!\n";
1072 } else {
1073 errs() << "warning: sramecc 'Off' was requested for a processor that "
1074 "does not support it!\n";
1075 }
1076 }
1077 }
1078}
1079
1080static TargetIDSetting
1082 if (FeatureString.ends_with("-"))
1083 return TargetIDSetting::Off;
1084 if (FeatureString.ends_with("+"))
1085 return TargetIDSetting::On;
1086
1087 llvm_unreachable("Malformed feature string");
1088}
1089
1091 SmallVector<StringRef, 3> TargetIDSplit;
1092 TargetID.split(TargetIDSplit, ':');
1093
1094 for (const auto &FeatureString : TargetIDSplit) {
1095 if (FeatureString.starts_with("xnack"))
1096 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
1097 if (FeatureString.starts_with("sramecc"))
1098 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
1099 }
1100}
1101
1102std::string AMDGPUTargetID::toString() const {
1103 std::string StringRep;
1104 raw_string_ostream StreamRep(StringRep);
1105
1106 auto TargetTriple = STI.getTargetTriple();
1107 auto Version = getIsaVersion(STI.getCPU());
1108
1109 StreamRep << TargetTriple.getArchName() << '-' << TargetTriple.getVendorName()
1110 << '-' << TargetTriple.getOSName() << '-'
1111 << TargetTriple.getEnvironmentName() << '-';
1112
1113 std::string Processor;
1114 // TODO: Following else statement is present here because we used various
1115 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
1116 // Remove once all aliases are removed from GCNProcessors.td.
1117 if (Version.Major >= 9)
1118 Processor = STI.getCPU().str();
1119 else
1120 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
1121 Twine(Version.Stepping))
1122 .str();
1123
1124 std::string Features;
1125 if (STI.getTargetTriple().getOS() == Triple::AMDHSA) {
1126 // sramecc.
1128 Features += ":sramecc-";
1130 Features += ":sramecc+";
1131 // xnack.
1133 Features += ":xnack-";
1135 Features += ":xnack+";
1136 }
1137
1138 StreamRep << Processor << Features;
1139
1140 return StringRep;
1141}
1142
1143unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
1144 if (STI->getFeatureBits().test(FeatureWavefrontSize16))
1145 return 16;
1146 if (STI->getFeatureBits().test(FeatureWavefrontSize32))
1147 return 32;
1148
1149 return 64;
1150}
1151
1153 unsigned BytesPerCU = getAddressableLocalMemorySize(STI);
1154
1155 // "Per CU" really means "per whatever functional block the waves of a
1156 // workgroup must share". So the effective local memory size is doubled in
1157 // WGP mode on gfx10.
1158 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1159 BytesPerCU *= 2;
1160
1161 return BytesPerCU;
1162}
1163
1165 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
1166 return 32768;
1167 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
1168 return 65536;
1169 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
1170 return 163840;
1171 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
1172 return 327680;
1173 return 32768;
1174}
1175
1176unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
1177 // "Per CU" really means "per whatever functional block the waves of a
1178 // workgroup must share".
1179
1180 // GFX12.5 only supports CU mode, which contains four SIMDs.
1181 if (isGFX1250(*STI)) {
1182 assert(STI->getFeatureBits().test(FeatureCuMode));
1183 return 4;
1184 }
1185
1186 // For gfx10 in CU mode the functional block is the CU, which contains
1187 // two SIMDs.
1188 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
1189 return 2;
1190
1191 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP
1192 // contains two CUs, so a total of four SIMDs.
1193 return 4;
1194}
1195
1197 unsigned FlatWorkGroupSize) {
1198 assert(FlatWorkGroupSize != 0);
1199 if (!STI->getTargetTriple().isAMDGCN())
1200 return 8;
1201 unsigned MaxWaves = getMaxWavesPerEU(STI) * getEUsPerCU(STI);
1202 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
1203 if (N == 1) {
1204 // Single-wave workgroups don't consume barrier resources.
1205 return MaxWaves;
1206 }
1207
1208 unsigned MaxBarriers = 16;
1209 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1210 MaxBarriers = 32;
1211
1212 return std::min(MaxWaves / N, MaxBarriers);
1213}
1214
1215unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { return 1; }
1216
1217unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
1218 // FIXME: Need to take scratch memory into account.
1219 if (isGFX90A(*STI))
1220 return 8;
1221 if (!isGFX10Plus(*STI))
1222 return 10;
1223 return hasGFX10_3Insts(*STI) ? 16 : 20;
1224}
1225
1227 unsigned FlatWorkGroupSize) {
1228 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
1229 getEUsPerCU(STI));
1230}
1231
1232unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { return 1; }
1233
1235 // Some subtargets allow encoding 2048, but this isn't tested or supported.
1236 return 1024;
1237}
1238
1240 unsigned FlatWorkGroupSize) {
1241 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
1242}
1243
1246 if (Version.Major >= 10)
1247 return getAddressableNumSGPRs(STI);
1248 if (Version.Major >= 8)
1249 return 16;
1250 return 8;
1251}
1252
1253unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { return 8; }
1254
1255unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
1257 if (Version.Major >= 8)
1258 return 800;
1259 return 512;
1260}
1261
1263 if (STI->getFeatureBits().test(FeatureSGPRInitBug))
1265
1267 if (Version.Major >= 10)
1268 return 106;
1269 if (Version.Major >= 8)
1270 return 102;
1271 return 104;
1272}
1273
1274unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
1275 assert(WavesPerEU != 0);
1276
1278 if (Version.Major >= 10)
1279 return 0;
1280
1281 if (WavesPerEU >= getMaxWavesPerEU(STI))
1282 return 0;
1283
1284 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
1285 if (STI->getFeatureBits().test(FeatureTrapHandler))
1286 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1287 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
1288 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
1289}
1290
1291unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1292 bool Addressable) {
1293 assert(WavesPerEU != 0);
1294
1295 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
1297 if (Version.Major >= 10)
1298 return Addressable ? AddressableNumSGPRs : 108;
1299 if (Version.Major >= 8 && !Addressable)
1300 AddressableNumSGPRs = 112;
1301 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
1302 if (STI->getFeatureBits().test(FeatureTrapHandler))
1303 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1304 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
1305 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1306}
1307
1308unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1309 bool FlatScrUsed, bool XNACKUsed) {
1310 unsigned ExtraSGPRs = 0;
1311 if (VCCUsed)
1312 ExtraSGPRs = 2;
1313
1315 if (Version.Major >= 10)
1316 return ExtraSGPRs;
1317
1318 if (Version.Major < 8) {
1319 if (FlatScrUsed)
1320 ExtraSGPRs = 4;
1321 } else {
1322 if (XNACKUsed)
1323 ExtraSGPRs = 4;
1324
1325 if (FlatScrUsed ||
1326 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
1327 ExtraSGPRs = 6;
1328 }
1329
1330 return ExtraSGPRs;
1331}
1332
1333unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1334 bool FlatScrUsed) {
1335 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
1336 STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
1337}
1338
1339static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs,
1340 unsigned Granule) {
1341 return divideCeil(std::max(1u, NumRegs), Granule);
1342}
1343
1344unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
1345 // SGPRBlocks is actual number of SGPR blocks minus 1.
1347 1;
1348}
1349
1351 unsigned DynamicVGPRBlockSize,
1352 std::optional<bool> EnableWavefrontSize32) {
1353 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1354 return 8;
1355
1356 if (DynamicVGPRBlockSize != 0)
1357 return DynamicVGPRBlockSize;
1358
1359 bool IsWave32 = EnableWavefrontSize32
1360 ? *EnableWavefrontSize32
1361 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1362
1363 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1364 return IsWave32 ? 24 : 12;
1365
1366 if (hasGFX10_3Insts(*STI))
1367 return IsWave32 ? 16 : 8;
1368
1369 return IsWave32 ? 8 : 4;
1370}
1371
1373 std::optional<bool> EnableWavefrontSize32) {
1374 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1375 return 8;
1376
1377 bool IsWave32 = EnableWavefrontSize32
1378 ? *EnableWavefrontSize32
1379 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1380
1381 if (STI->getFeatureBits().test(Feature1024AddressableVGPRs))
1382 return IsWave32 ? 16 : 8;
1383
1384 return IsWave32 ? 8 : 4;
1385}
1386
1387unsigned getArchVGPRAllocGranule() { return 4; }
1388
1389unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
1390 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1391 return 512;
1392 if (!isGFX10Plus(*STI))
1393 return 256;
1394 bool IsWave32 = STI->getFeatureBits().test(FeatureWavefrontSize32);
1395 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1396 return IsWave32 ? 1536 : 768;
1397 return IsWave32 ? 1024 : 512;
1398}
1399
1401 const auto &Features = STI->getFeatureBits();
1402 if (Features.test(Feature1024AddressableVGPRs))
1403 return Features.test(FeatureWavefrontSize32) ? 1024 : 512;
1404 return 256;
1405}
1406
1408 unsigned DynamicVGPRBlockSize) {
1409 const auto &Features = STI->getFeatureBits();
1410 if (Features.test(FeatureGFX90AInsts))
1411 return 512;
1412
1413 if (DynamicVGPRBlockSize != 0)
1414 // On GFX12 we can allocate at most 8 blocks of VGPRs.
1415 return 8 * getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1416 return getAddressableNumArchVGPRs(STI);
1417}
1418
1420 unsigned NumVGPRs,
1421 unsigned DynamicVGPRBlockSize) {
1423 NumVGPRs, getVGPRAllocGranule(STI, DynamicVGPRBlockSize),
1425}
1426
1427unsigned getNumWavesPerEUWithNumVGPRs(unsigned NumVGPRs, unsigned Granule,
1428 unsigned MaxWaves,
1429 unsigned TotalNumVGPRs) {
1430 if (NumVGPRs < Granule)
1431 return MaxWaves;
1432 unsigned RoundedRegs = alignTo(NumVGPRs, Granule);
1433 return std::min(std::max(TotalNumVGPRs / RoundedRegs, 1u), MaxWaves);
1434}
1435
1436unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves,
1438 if (Gen >= AMDGPUSubtarget::GFX10)
1439 return MaxWaves;
1440
1442 if (SGPRs <= 80)
1443 return 10;
1444 if (SGPRs <= 88)
1445 return 9;
1446 if (SGPRs <= 100)
1447 return 8;
1448 return 7;
1449 }
1450 if (SGPRs <= 48)
1451 return 10;
1452 if (SGPRs <= 56)
1453 return 9;
1454 if (SGPRs <= 64)
1455 return 8;
1456 if (SGPRs <= 72)
1457 return 7;
1458 if (SGPRs <= 80)
1459 return 6;
1460 return 5;
1461}
1462
1463unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1464 unsigned DynamicVGPRBlockSize) {
1465 assert(WavesPerEU != 0);
1466
1467 unsigned MaxWavesPerEU = getMaxWavesPerEU(STI);
1468 if (WavesPerEU >= MaxWavesPerEU)
1469 return 0;
1470
1471 unsigned TotNumVGPRs = getTotalNumVGPRs(STI);
1472 unsigned AddrsableNumVGPRs =
1473 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1474 unsigned Granule = getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1475 unsigned MaxNumVGPRs = alignDown(TotNumVGPRs / WavesPerEU, Granule);
1476
1477 if (MaxNumVGPRs == alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1478 return 0;
1479
1480 unsigned MinWavesPerEU = getNumWavesPerEUWithNumVGPRs(STI, AddrsableNumVGPRs,
1481 DynamicVGPRBlockSize);
1482 if (WavesPerEU < MinWavesPerEU)
1483 return getMinNumVGPRs(STI, MinWavesPerEU, DynamicVGPRBlockSize);
1484
1485 unsigned MaxNumVGPRsNext = alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1486 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1487 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1488}
1489
1490unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1491 unsigned DynamicVGPRBlockSize) {
1492 assert(WavesPerEU != 0);
1493
1494 unsigned MaxNumVGPRs =
1495 alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
1496 getVGPRAllocGranule(STI, DynamicVGPRBlockSize));
1497 unsigned AddressableNumVGPRs =
1498 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1499 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1500}
1501
1502unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
1503 std::optional<bool> EnableWavefrontSize32) {
1505 NumVGPRs, getVGPREncodingGranule(STI, EnableWavefrontSize32)) -
1506 1;
1507}
1508
1510 unsigned NumVGPRs,
1511 unsigned DynamicVGPRBlockSize,
1512 std::optional<bool> EnableWavefrontSize32) {
1514 NumVGPRs,
1515 getVGPRAllocGranule(STI, DynamicVGPRBlockSize, EnableWavefrontSize32));
1516}
1517} // end namespace IsaInfo
1518
1520 const MCSubtargetInfo *STI) {
1522 KernelCode.amd_kernel_code_version_major = 1;
1523 KernelCode.amd_kernel_code_version_minor = 2;
1524 KernelCode.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
1525 KernelCode.amd_machine_version_major = Version.Major;
1526 KernelCode.amd_machine_version_minor = Version.Minor;
1527 KernelCode.amd_machine_version_stepping = Version.Stepping;
1529 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
1530 KernelCode.wavefront_size = 5;
1532 } else {
1533 KernelCode.wavefront_size = 6;
1534 }
1535
1536 // If the code object does not support indirect functions, then the value must
1537 // be 0xffffffff.
1538 KernelCode.call_convention = -1;
1539
1540 // These alignment values are specified in powers of two, so alignment =
1541 // 2^n. The minimum alignment is 2^4 = 16.
1542 KernelCode.kernarg_segment_alignment = 4;
1543 KernelCode.group_segment_alignment = 4;
1544 KernelCode.private_segment_alignment = 4;
1545
1546 if (Version.Major >= 10) {
1547 KernelCode.compute_pgm_resource_registers |=
1548 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
1550 }
1551}
1552
1555}
1556
1559}
1560
1562 unsigned AS = GV->getAddressSpace();
1563 return AS == AMDGPUAS::CONSTANT_ADDRESS ||
1565}
1566
1568 return TT.getArch() == Triple::r600;
1569}
1570
1571static bool isValidRegPrefix(char C) {
1572 return C == 'v' || C == 's' || C == 'a';
1573}
1574
1575std::tuple<char, unsigned, unsigned> parseAsmPhysRegName(StringRef RegName) {
1576 char Kind = RegName.front();
1577 if (!isValidRegPrefix(Kind))
1578 return {};
1579
1580 RegName = RegName.drop_front();
1581 if (RegName.consume_front("[")) {
1582 unsigned Idx, End;
1583 bool Failed = RegName.consumeInteger(10, Idx);
1584 Failed |= !RegName.consume_front(":");
1585 Failed |= RegName.consumeInteger(10, End);
1586 Failed |= !RegName.consume_back("]");
1587 if (!Failed) {
1588 unsigned NumRegs = End - Idx + 1;
1589 if (NumRegs > 1)
1590 return {Kind, Idx, NumRegs};
1591 }
1592 } else {
1593 unsigned Idx;
1594 bool Failed = RegName.getAsInteger(10, Idx);
1595 if (!Failed)
1596 return {Kind, Idx, 1};
1597 }
1598
1599 return {};
1600}
1601
1602std::tuple<char, unsigned, unsigned>
1604 StringRef RegName = Constraint;
1605 if (!RegName.consume_front("{") || !RegName.consume_back("}"))
1606 return {};
1608}
1609
1610std::pair<unsigned, unsigned>
1612 std::pair<unsigned, unsigned> Default,
1613 bool OnlyFirstRequired) {
1614 if (auto Attr = getIntegerPairAttribute(F, Name, OnlyFirstRequired))
1615 return {Attr->first, Attr->second.value_or(Default.second)};
1616 return Default;
1617}
1618
1619std::optional<std::pair<unsigned, std::optional<unsigned>>>
1621 bool OnlyFirstRequired) {
1622 Attribute A = F.getFnAttribute(Name);
1623 if (!A.isStringAttribute())
1624 return std::nullopt;
1625
1626 LLVMContext &Ctx = F.getContext();
1627 std::pair<unsigned, std::optional<unsigned>> Ints;
1628 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
1629 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1630 Ctx.emitError("can't parse first integer attribute " + Name);
1631 return std::nullopt;
1632 }
1633 unsigned Second = 0;
1634 if (Strs.second.trim().getAsInteger(0, Second)) {
1635 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1636 Ctx.emitError("can't parse second integer attribute " + Name);
1637 return std::nullopt;
1638 }
1639 } else {
1640 Ints.second = Second;
1641 }
1642
1643 return Ints;
1644}
1645
1647 unsigned Size,
1648 unsigned DefaultVal) {
1649 std::optional<SmallVector<unsigned>> R =
1651 return R.has_value() ? *R : SmallVector<unsigned>(Size, DefaultVal);
1652}
1653
1654std::optional<SmallVector<unsigned>>
1656 assert(Size > 2);
1657 LLVMContext &Ctx = F.getContext();
1658
1659 Attribute A = F.getFnAttribute(Name);
1660 if (!A.isValid())
1661 return std::nullopt;
1662 if (!A.isStringAttribute()) {
1663 Ctx.emitError(Name + " is not a string attribute");
1664 return std::nullopt;
1665 }
1666
1668
1669 StringRef S = A.getValueAsString();
1670 unsigned i = 0;
1671 for (; !S.empty() && i < Size; i++) {
1672 std::pair<StringRef, StringRef> Strs = S.split(',');
1673 unsigned IntVal;
1674 if (Strs.first.trim().getAsInteger(0, IntVal)) {
1675 Ctx.emitError("can't parse integer attribute " + Strs.first + " in " +
1676 Name);
1677 return std::nullopt;
1678 }
1679 Vals[i] = IntVal;
1680 S = Strs.second;
1681 }
1682
1683 if (!S.empty() || i < Size) {
1684 Ctx.emitError("attribute " + Name +
1685 " has incorrect number of integers; expected " +
1687 return std::nullopt;
1688 }
1689 return Vals;
1690}
1691
1692bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val) {
1693 assert((MD.getNumOperands() % 2 == 0) && "invalid number of operands!");
1694 for (unsigned I = 0, E = MD.getNumOperands() / 2; I != E; ++I) {
1695 auto Low =
1696 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 0))->getValue();
1697 auto High =
1698 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 1))->getValue();
1699 // There are two types of [A; B) ranges:
1700 // A < B, e.g. [4; 5) which is a range that only includes 4.
1701 // A > B, e.g. [5; 4) which is a range that wraps around and includes
1702 // everything except 4.
1703 if (Low.ult(High)) {
1704 if (Low.ule(Val) && High.ugt(Val))
1705 return true;
1706 } else {
1707 if (Low.uge(Val) && High.ult(Val))
1708 return true;
1709 }
1710 }
1711
1712 return false;
1713}
1714
1716 return (1 << (getVmcntBitWidthLo(Version.Major) +
1717 getVmcntBitWidthHi(Version.Major))) -
1718 1;
1719}
1720
1722 return (1 << getLoadcntBitWidth(Version.Major)) - 1;
1723}
1724
1726 return (1 << getSamplecntBitWidth(Version.Major)) - 1;
1727}
1728
1730 return (1 << getBvhcntBitWidth(Version.Major)) - 1;
1731}
1732
1734 return (1 << getExpcntBitWidth(Version.Major)) - 1;
1735}
1736
1738 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1739}
1740
1742 return (1 << getDscntBitWidth(Version.Major)) - 1;
1743}
1744
1746 return (1 << getKmcntBitWidth(Version.Major)) - 1;
1747}
1748
1750 return (1 << getXcntBitWidth(Version.Major, Version.Minor)) - 1;
1751}
1752
1754 return (1 << getStorecntBitWidth(Version.Major)) - 1;
1755}
1756
1758 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1759 getVmcntBitWidthLo(Version.Major));
1760 unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1761 getExpcntBitWidth(Version.Major));
1762 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1763 getLgkmcntBitWidth(Version.Major));
1764 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1765 getVmcntBitWidthHi(Version.Major));
1766 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1767}
1768
1769unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1770 unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1771 getVmcntBitWidthLo(Version.Major));
1772 unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1773 getVmcntBitWidthHi(Version.Major));
1774 return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1775}
1776
1777unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1778 return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1779 getExpcntBitWidth(Version.Major));
1780}
1781
1782unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1783 return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1784 getLgkmcntBitWidth(Version.Major));
1785}
1786
1787void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt,
1788 unsigned &Expcnt, unsigned &Lgkmcnt) {
1789 Vmcnt = decodeVmcnt(Version, Waitcnt);
1790 Expcnt = decodeExpcnt(Version, Waitcnt);
1791 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1792}
1793
1794Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1795 Waitcnt Decoded;
1796 Decoded.LoadCnt = decodeVmcnt(Version, Encoded);
1797 Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1798 Decoded.DsCnt = decodeLgkmcnt(Version, Encoded);
1799 return Decoded;
1800}
1801
1802unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1803 unsigned Vmcnt) {
1804 Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1805 getVmcntBitWidthLo(Version.Major));
1806 return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1807 getVmcntBitShiftHi(Version.Major),
1808 getVmcntBitWidthHi(Version.Major));
1809}
1810
1811unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1812 unsigned Expcnt) {
1813 return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1814 getExpcntBitWidth(Version.Major));
1815}
1816
1817unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1818 unsigned Lgkmcnt) {
1819 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1820 getLgkmcntBitWidth(Version.Major));
1821}
1822
1823unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt,
1824 unsigned Expcnt, unsigned Lgkmcnt) {
1825 unsigned Waitcnt = getWaitcntBitMask(Version);
1827 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1828 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1829 return Waitcnt;
1830}
1831
1832unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1833 return encodeWaitcnt(Version, Decoded.LoadCnt, Decoded.ExpCnt, Decoded.DsCnt);
1834}
1835
1837 bool IsStore) {
1838 unsigned Dscnt = getBitMask(getDscntBitShift(Version.Major),
1839 getDscntBitWidth(Version.Major));
1840 if (IsStore) {
1841 unsigned Storecnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1842 getStorecntBitWidth(Version.Major));
1843 return Dscnt | Storecnt;
1844 }
1845 unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1846 getLoadcntBitWidth(Version.Major));
1847 return Dscnt | Loadcnt;
1848}
1849
1850Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt) {
1851 Waitcnt Decoded;
1852 Decoded.LoadCnt =
1853 unpackBits(LoadcntDscnt, getLoadcntStorecntBitShift(Version.Major),
1854 getLoadcntBitWidth(Version.Major));
1855 Decoded.DsCnt = unpackBits(LoadcntDscnt, getDscntBitShift(Version.Major),
1856 getDscntBitWidth(Version.Major));
1857 return Decoded;
1858}
1859
1860Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt) {
1861 Waitcnt Decoded;
1862 Decoded.StoreCnt =
1863 unpackBits(StorecntDscnt, getLoadcntStorecntBitShift(Version.Major),
1864 getStorecntBitWidth(Version.Major));
1865 Decoded.DsCnt = unpackBits(StorecntDscnt, getDscntBitShift(Version.Major),
1866 getDscntBitWidth(Version.Major));
1867 return Decoded;
1868}
1869
1870static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt,
1871 unsigned Loadcnt) {
1872 return packBits(Loadcnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1873 getLoadcntBitWidth(Version.Major));
1874}
1875
1876static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt,
1877 unsigned Storecnt) {
1878 return packBits(Storecnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1879 getStorecntBitWidth(Version.Major));
1880}
1881
1882static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt,
1883 unsigned Dscnt) {
1884 return packBits(Dscnt, Waitcnt, getDscntBitShift(Version.Major),
1885 getDscntBitWidth(Version.Major));
1886}
1887
1888static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt,
1889 unsigned Dscnt) {
1890 unsigned Waitcnt = getCombinedCountBitMask(Version, false);
1891 Waitcnt = encodeLoadcnt(Version, Waitcnt, Loadcnt);
1893 return Waitcnt;
1894}
1895
1896unsigned encodeLoadcntDscnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1897 return encodeLoadcntDscnt(Version, Decoded.LoadCnt, Decoded.DsCnt);
1898}
1899
1901 unsigned Storecnt, unsigned Dscnt) {
1902 unsigned Waitcnt = getCombinedCountBitMask(Version, true);
1903 Waitcnt = encodeStorecnt(Version, Waitcnt, Storecnt);
1905 return Waitcnt;
1906}
1907
1909 const Waitcnt &Decoded) {
1910 return encodeStorecntDscnt(Version, Decoded.StoreCnt, Decoded.DsCnt);
1911}
1912
1913//===----------------------------------------------------------------------===//
1914// Custom Operand Values
1915//===----------------------------------------------------------------------===//
1916
1918 int Size,
1919 const MCSubtargetInfo &STI) {
1920 unsigned Enc = 0;
1921 for (int Idx = 0; Idx < Size; ++Idx) {
1922 const auto &Op = Opr[Idx];
1923 if (Op.isSupported(STI))
1924 Enc |= Op.encode(Op.Default);
1925 }
1926 return Enc;
1927}
1928
1930 int Size, unsigned Code,
1931 bool &HasNonDefaultVal,
1932 const MCSubtargetInfo &STI) {
1933 unsigned UsedOprMask = 0;
1934 HasNonDefaultVal = false;
1935 for (int Idx = 0; Idx < Size; ++Idx) {
1936 const auto &Op = Opr[Idx];
1937 if (!Op.isSupported(STI))
1938 continue;
1939 UsedOprMask |= Op.getMask();
1940 unsigned Val = Op.decode(Code);
1941 if (!Op.isValid(Val))
1942 return false;
1943 HasNonDefaultVal |= (Val != Op.Default);
1944 }
1945 return (Code & ~UsedOprMask) == 0;
1946}
1947
1948static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1949 unsigned Code, int &Idx, StringRef &Name,
1950 unsigned &Val, bool &IsDefault,
1951 const MCSubtargetInfo &STI) {
1952 while (Idx < Size) {
1953 const auto &Op = Opr[Idx++];
1954 if (Op.isSupported(STI)) {
1955 Name = Op.Name;
1956 Val = Op.decode(Code);
1957 IsDefault = (Val == Op.Default);
1958 return true;
1959 }
1960 }
1961
1962 return false;
1963}
1964
1966 int64_t InputVal) {
1967 if (InputVal < 0 || InputVal > Op.Max)
1968 return OPR_VAL_INVALID;
1969 return Op.encode(InputVal);
1970}
1971
1972static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1973 const StringRef Name, int64_t InputVal,
1974 unsigned &UsedOprMask,
1975 const MCSubtargetInfo &STI) {
1976 int InvalidId = OPR_ID_UNKNOWN;
1977 for (int Idx = 0; Idx < Size; ++Idx) {
1978 const auto &Op = Opr[Idx];
1979 if (Op.Name == Name) {
1980 if (!Op.isSupported(STI)) {
1981 InvalidId = OPR_ID_UNSUPPORTED;
1982 continue;
1983 }
1984 auto OprMask = Op.getMask();
1985 if (OprMask & UsedOprMask)
1986 return OPR_ID_DUPLICATE;
1987 UsedOprMask |= OprMask;
1988 return encodeCustomOperandVal(Op, InputVal);
1989 }
1990 }
1991 return InvalidId;
1992}
1993
1994//===----------------------------------------------------------------------===//
1995// DepCtr
1996//===----------------------------------------------------------------------===//
1997
1998namespace DepCtr {
1999
2001 static int Default = -1;
2002 if (Default == -1)
2004 return Default;
2005}
2006
2007bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
2008 const MCSubtargetInfo &STI) {
2010 HasNonDefaultVal, STI);
2011}
2012
2013bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
2014 bool &IsDefault, const MCSubtargetInfo &STI) {
2015 return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
2016 IsDefault, STI);
2017}
2018
2019int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
2020 const MCSubtargetInfo &STI) {
2021 return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
2022 STI);
2023}
2024
2025unsigned decodeFieldVmVsrc(unsigned Encoded) {
2026 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2027}
2028
2029unsigned decodeFieldVaVdst(unsigned Encoded) {
2030 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2031}
2032
2033unsigned decodeFieldSaSdst(unsigned Encoded) {
2034 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2035}
2036
2037unsigned decodeFieldVaSdst(unsigned Encoded) {
2038 return unpackBits(Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2039}
2040
2041unsigned decodeFieldVaVcc(unsigned Encoded) {
2042 return unpackBits(Encoded, getVaVccBitShift(), getVaVccBitWidth());
2043}
2044
2045unsigned decodeFieldVaSsrc(unsigned Encoded) {
2046 return unpackBits(Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2047}
2048
2049unsigned decodeFieldHoldCnt(unsigned Encoded) {
2050 return unpackBits(Encoded, getHoldCntBitShift(), getHoldCntWidth());
2051}
2052
2053unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc) {
2054 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2055}
2056
2057unsigned encodeFieldVmVsrc(unsigned VmVsrc, const MCSubtargetInfo &STI) {
2058 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2059 return encodeFieldVmVsrc(Encoded, VmVsrc);
2060}
2061
2062unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst) {
2063 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2064}
2065
2066unsigned encodeFieldVaVdst(unsigned VaVdst, const MCSubtargetInfo &STI) {
2067 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2068 return encodeFieldVaVdst(Encoded, VaVdst);
2069}
2070
2071unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst) {
2072 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2073}
2074
2075unsigned encodeFieldSaSdst(unsigned SaSdst, const MCSubtargetInfo &STI) {
2076 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2077 return encodeFieldSaSdst(Encoded, SaSdst);
2078}
2079
2080unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst) {
2081 return packBits(VaSdst, Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2082}
2083
2084unsigned encodeFieldVaSdst(unsigned VaSdst, const MCSubtargetInfo &STI) {
2085 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2086 return encodeFieldVaSdst(Encoded, VaSdst);
2087}
2088
2089unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc) {
2090 return packBits(VaVcc, Encoded, getVaVccBitShift(), getVaVccBitWidth());
2091}
2092
2093unsigned encodeFieldVaVcc(unsigned VaVcc, const MCSubtargetInfo &STI) {
2094 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2095 return encodeFieldVaVcc(Encoded, VaVcc);
2096}
2097
2098unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc) {
2099 return packBits(VaSsrc, Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2100}
2101
2102unsigned encodeFieldVaSsrc(unsigned VaSsrc, const MCSubtargetInfo &STI) {
2103 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2104 return encodeFieldVaSsrc(Encoded, VaSsrc);
2105}
2106
2107unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt) {
2108 return packBits(HoldCnt, Encoded, getHoldCntBitShift(), getHoldCntWidth());
2109}
2110
2111unsigned encodeFieldHoldCnt(unsigned HoldCnt, const MCSubtargetInfo &STI) {
2112 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2113 return encodeFieldHoldCnt(Encoded, HoldCnt);
2114}
2115
2116} // namespace DepCtr
2117
2118//===----------------------------------------------------------------------===//
2119// exp tgt
2120//===----------------------------------------------------------------------===//
2121
2122namespace Exp {
2123
2124struct ExpTgt {
2126 unsigned Tgt;
2127 unsigned MaxIndex;
2128};
2129
2130// clang-format off
2131static constexpr ExpTgt ExpTgtInfo[] = {
2132 {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
2133 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
2134 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
2135 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
2136 {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
2137 {{"dual_src_blend"},ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
2138 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
2139};
2140// clang-format on
2141
2142bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
2143 for (const ExpTgt &Val : ExpTgtInfo) {
2144 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
2145 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
2146 Name = Val.Name;
2147 return true;
2148 }
2149 }
2150 return false;
2151}
2152
2153unsigned getTgtId(const StringRef Name) {
2154
2155 for (const ExpTgt &Val : ExpTgtInfo) {
2156 if (Val.MaxIndex == 0 && Name == Val.Name)
2157 return Val.Tgt;
2158
2159 if (Val.MaxIndex > 0 && Name.starts_with(Val.Name)) {
2160 StringRef Suffix = Name.drop_front(Val.Name.size());
2161
2162 unsigned Id;
2163 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
2164 return ET_INVALID;
2165
2166 // Disable leading zeroes
2167 if (Suffix.size() > 1 && Suffix[0] == '0')
2168 return ET_INVALID;
2169
2170 return Val.Tgt + Id;
2171 }
2172 }
2173 return ET_INVALID;
2174}
2175
2176bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
2177 switch (Id) {
2178 case ET_NULL:
2179 return !isGFX11Plus(STI);
2180 case ET_POS4:
2181 case ET_PRIM:
2182 return isGFX10Plus(STI);
2183 case ET_DUAL_SRC_BLEND0:
2184 case ET_DUAL_SRC_BLEND1:
2185 return isGFX11Plus(STI);
2186 default:
2187 if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
2188 return !isGFX11Plus(STI);
2189 return true;
2190 }
2191}
2192
2193} // namespace Exp
2194
2195//===----------------------------------------------------------------------===//
2196// MTBUF Format
2197//===----------------------------------------------------------------------===//
2198
2199namespace MTBUFFormat {
2200
2201int64_t getDfmt(const StringRef Name) {
2202 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
2203 if (Name == DfmtSymbolic[Id])
2204 return Id;
2205 }
2206 return DFMT_UNDEF;
2207}
2208
2210 assert(Id <= DFMT_MAX);
2211 return DfmtSymbolic[Id];
2212}
2213
2215 if (isSI(STI) || isCI(STI))
2216 return NfmtSymbolicSICI;
2217 if (isVI(STI) || isGFX9(STI))
2218 return NfmtSymbolicVI;
2219 return NfmtSymbolicGFX10;
2220}
2221
2222int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
2223 const auto *lookupTable = getNfmtLookupTable(STI);
2224 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
2225 if (Name == lookupTable[Id])
2226 return Id;
2227 }
2228 return NFMT_UNDEF;
2229}
2230
2231StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
2232 assert(Id <= NFMT_MAX);
2233 return getNfmtLookupTable(STI)[Id];
2234}
2235
2236bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2237 unsigned Dfmt;
2238 unsigned Nfmt;
2239 decodeDfmtNfmt(Id, Dfmt, Nfmt);
2240 return isValidNfmt(Nfmt, STI);
2241}
2242
2243bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2244 return !getNfmtName(Id, STI).empty();
2245}
2246
2247int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
2248 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
2249}
2250
2251void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
2252 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
2253 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
2254}
2255
2256int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
2257 if (isGFX11Plus(STI)) {
2258 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2259 if (Name == UfmtSymbolicGFX11[Id])
2260 return Id;
2261 }
2262 } else {
2263 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2264 if (Name == UfmtSymbolicGFX10[Id])
2265 return Id;
2266 }
2267 }
2268 return UFMT_UNDEF;
2269}
2270
2272 if (isValidUnifiedFormat(Id, STI))
2273 return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
2274 return "";
2275}
2276
2277bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
2278 return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
2279}
2280
2281int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
2282 const MCSubtargetInfo &STI) {
2283 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
2284 if (isGFX11Plus(STI)) {
2285 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2286 if (Fmt == DfmtNfmt2UFmtGFX11[Id])
2287 return Id;
2288 }
2289 } else {
2290 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2291 if (Fmt == DfmtNfmt2UFmtGFX10[Id])
2292 return Id;
2293 }
2294 }
2295 return UFMT_UNDEF;
2296}
2297
2298bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
2299 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
2300}
2301
2303 if (isGFX10Plus(STI))
2304 return UFMT_DEFAULT;
2305 return DFMT_NFMT_DEFAULT;
2306}
2307
2308} // namespace MTBUFFormat
2309
2310//===----------------------------------------------------------------------===//
2311// SendMsg
2312//===----------------------------------------------------------------------===//
2313
2314namespace SendMsg {
2315
2319
2320bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
2321 return (MsgId & ~(getMsgIdMask(STI))) == 0;
2322}
2323
2324bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
2325 bool Strict) {
2326 assert(isValidMsgId(MsgId, STI));
2327
2328 if (!Strict)
2329 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
2330
2331 if (msgRequiresOp(MsgId, STI)) {
2332 if (MsgId == ID_GS_PreGFX11 && OpId == OP_GS_NOP)
2333 return false;
2334
2335 return !getMsgOpName(MsgId, OpId, STI).empty();
2336 }
2337
2338 return OpId == OP_NONE_;
2339}
2340
2341bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
2342 const MCSubtargetInfo &STI, bool Strict) {
2343 assert(isValidMsgOp(MsgId, OpId, STI, Strict));
2344
2345 if (!Strict)
2347
2348 if (!isGFX11Plus(STI)) {
2349 switch (MsgId) {
2350 case ID_GS_PreGFX11:
2353 return (OpId == OP_GS_NOP)
2356 }
2357 }
2358 return StreamId == STREAM_ID_NONE_;
2359}
2360
2361bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
2362 return MsgId == ID_SYSMSG ||
2363 (!isGFX11Plus(STI) &&
2364 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
2365}
2366
2367bool msgSupportsStream(int64_t MsgId, int64_t OpId,
2368 const MCSubtargetInfo &STI) {
2369 return !isGFX11Plus(STI) &&
2370 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
2371 OpId != OP_GS_NOP;
2372}
2373
2374void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
2375 uint16_t &StreamId, const MCSubtargetInfo &STI) {
2376 MsgId = Val & getMsgIdMask(STI);
2377 if (isGFX11Plus(STI)) {
2378 OpId = 0;
2379 StreamId = 0;
2380 } else {
2381 OpId = (Val & OP_MASK_) >> OP_SHIFT_;
2383 }
2384}
2385
2387 return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
2388}
2389
2390} // namespace SendMsg
2391
2392//===----------------------------------------------------------------------===//
2393//
2394//===----------------------------------------------------------------------===//
2395
2397 return F.getFnAttributeAsParsedInteger("InitialPSInputAddr", 0);
2398}
2399
2401 // As a safe default always respond as if PS has color exports.
2402 return F.getFnAttributeAsParsedInteger(
2403 "amdgpu-color-export",
2404 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
2405}
2406
2408 return F.getFnAttributeAsParsedInteger("amdgpu-depth-export", 0) != 0;
2409}
2410
2412 unsigned BlockSize =
2413 F.getFnAttributeAsParsedInteger("amdgpu-dynamic-vgpr-block-size", 0);
2414
2415 if (BlockSize == 16 || BlockSize == 32)
2416 return BlockSize;
2417
2418 return 0;
2419}
2420
2421bool hasXNACK(const MCSubtargetInfo &STI) {
2422 return STI.hasFeature(AMDGPU::FeatureXNACK);
2423}
2424
2425bool hasSRAMECC(const MCSubtargetInfo &STI) {
2426 return STI.hasFeature(AMDGPU::FeatureSRAMECC);
2427}
2428
2430 return STI.hasFeature(AMDGPU::FeatureMIMG_R128) &&
2431 !STI.hasFeature(AMDGPU::FeatureR128A16);
2432}
2433
2434bool hasA16(const MCSubtargetInfo &STI) {
2435 return STI.hasFeature(AMDGPU::FeatureA16);
2436}
2437
2438bool hasG16(const MCSubtargetInfo &STI) {
2439 return STI.hasFeature(AMDGPU::FeatureG16);
2440}
2441
2443 return !STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !isCI(STI) &&
2444 !isSI(STI);
2445}
2446
2447bool hasGDS(const MCSubtargetInfo &STI) {
2448 return STI.hasFeature(AMDGPU::FeatureGDS);
2449}
2450
2451unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler) {
2452 auto Version = getIsaVersion(STI.getCPU());
2453 if (Version.Major == 10)
2454 return Version.Minor >= 3 ? 13 : 5;
2455 if (Version.Major == 11)
2456 return 5;
2457 if (Version.Major >= 12)
2458 return HasSampler ? 4 : 5;
2459 return 0;
2460}
2461
2463 if (isGFX1250(STI))
2464 return 32;
2465 return 16;
2466}
2467
2468bool isSI(const MCSubtargetInfo &STI) {
2469 return STI.hasFeature(AMDGPU::FeatureSouthernIslands);
2470}
2471
2472bool isCI(const MCSubtargetInfo &STI) {
2473 return STI.hasFeature(AMDGPU::FeatureSeaIslands);
2474}
2475
2476bool isVI(const MCSubtargetInfo &STI) {
2477 return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2478}
2479
2480bool isGFX9(const MCSubtargetInfo &STI) {
2481 return STI.hasFeature(AMDGPU::FeatureGFX9);
2482}
2483
2485 return isGFX9(STI) || isGFX10(STI);
2486}
2487
2489 return isGFX9(STI) || isGFX10(STI) || isGFX11(STI);
2490}
2491
2493 return isVI(STI) || isGFX9(STI) || isGFX10(STI);
2494}
2495
2496bool isGFX8Plus(const MCSubtargetInfo &STI) {
2497 return isVI(STI) || isGFX9Plus(STI);
2498}
2499
2500bool isGFX9Plus(const MCSubtargetInfo &STI) {
2501 return isGFX9(STI) || isGFX10Plus(STI);
2502}
2503
2504bool isNotGFX9Plus(const MCSubtargetInfo &STI) { return !isGFX9Plus(STI); }
2505
2506bool isGFX10(const MCSubtargetInfo &STI) {
2507 return STI.hasFeature(AMDGPU::FeatureGFX10);
2508}
2509
2511 return isGFX10(STI) || isGFX11(STI);
2512}
2513
2515 return isGFX10(STI) || isGFX11Plus(STI);
2516}
2517
2518bool isGFX11(const MCSubtargetInfo &STI) {
2519 return STI.hasFeature(AMDGPU::FeatureGFX11);
2520}
2521
2523 return isGFX11(STI) || isGFX12Plus(STI);
2524}
2525
2526bool isGFX12(const MCSubtargetInfo &STI) {
2527 return STI.getFeatureBits()[AMDGPU::FeatureGFX12];
2528}
2529
2530bool isGFX12Plus(const MCSubtargetInfo &STI) { return isGFX12(STI); }
2531
2532bool isNotGFX12Plus(const MCSubtargetInfo &STI) { return !isGFX12Plus(STI); }
2533
2534bool isGFX1250(const MCSubtargetInfo &STI) {
2535 return STI.getFeatureBits()[AMDGPU::FeatureGFX1250Insts];
2536}
2537
2539 if (isGFX1250(STI))
2540 return false;
2541 return isGFX10Plus(STI);
2542}
2543
2544bool isNotGFX11Plus(const MCSubtargetInfo &STI) { return !isGFX11Plus(STI); }
2545
2547 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
2548}
2549
2551 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
2552}
2553
2555 return STI.hasFeature(AMDGPU::FeatureGCN3Encoding);
2556}
2557
2559 return STI.hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2560}
2561
2563 return STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2564}
2565
2567 return STI.hasFeature(AMDGPU::FeatureGFX10_3Insts);
2568}
2569
2571 return isGFX10_BEncoding(STI) && !isGFX12Plus(STI);
2572}
2573
2574bool isGFX90A(const MCSubtargetInfo &STI) {
2575 return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2576}
2577
2578bool isGFX940(const MCSubtargetInfo &STI) {
2579 return STI.hasFeature(AMDGPU::FeatureGFX940Insts);
2580}
2581
2583 return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2584}
2585
2587 return STI.hasFeature(AMDGPU::FeatureMAIInsts);
2588}
2589
2590bool hasVOPD(const MCSubtargetInfo &STI) {
2591 return STI.hasFeature(AMDGPU::FeatureVOPD);
2592}
2593
2595 return STI.hasFeature(AMDGPU::FeatureDPPSrc1SGPR);
2596}
2597
2599 return STI.hasFeature(AMDGPU::FeatureKernargPreload);
2600}
2601
2602int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
2603 int32_t ArgNumVGPR) {
2604 if (has90AInsts && ArgNumAGPR)
2605 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2606 return std::max(ArgNumVGPR, ArgNumAGPR);
2607}
2608
2610 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
2611 const MCRegister FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
2612 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
2613 Reg == AMDGPU::SCC;
2614}
2615
2617 return MRI.getEncodingValue(Reg) & AMDGPU::HWEncoding::IS_HI16;
2618}
2619
2620#define MAP_REG2REG \
2621 using namespace AMDGPU; \
2622 switch (Reg.id()) { \
2623 default: \
2624 return Reg; \
2625 CASE_CI_VI(FLAT_SCR) \
2626 CASE_CI_VI(FLAT_SCR_LO) \
2627 CASE_CI_VI(FLAT_SCR_HI) \
2628 CASE_VI_GFX9PLUS(TTMP0) \
2629 CASE_VI_GFX9PLUS(TTMP1) \
2630 CASE_VI_GFX9PLUS(TTMP2) \
2631 CASE_VI_GFX9PLUS(TTMP3) \
2632 CASE_VI_GFX9PLUS(TTMP4) \
2633 CASE_VI_GFX9PLUS(TTMP5) \
2634 CASE_VI_GFX9PLUS(TTMP6) \
2635 CASE_VI_GFX9PLUS(TTMP7) \
2636 CASE_VI_GFX9PLUS(TTMP8) \
2637 CASE_VI_GFX9PLUS(TTMP9) \
2638 CASE_VI_GFX9PLUS(TTMP10) \
2639 CASE_VI_GFX9PLUS(TTMP11) \
2640 CASE_VI_GFX9PLUS(TTMP12) \
2641 CASE_VI_GFX9PLUS(TTMP13) \
2642 CASE_VI_GFX9PLUS(TTMP14) \
2643 CASE_VI_GFX9PLUS(TTMP15) \
2644 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2645 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2646 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2647 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2648 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2649 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2650 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2651 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2652 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2653 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2654 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2655 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2656 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2657 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2658 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2659 CASE_VI_GFX9PLUS( \
2660 TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2661 CASE_GFXPRE11_GFX11PLUS(M0) \
2662 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2663 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2664 }
2665
2666#define CASE_CI_VI(node) \
2667 assert(!isSI(STI)); \
2668 case node: \
2669 return isCI(STI) ? node##_ci : node##_vi;
2670
2671#define CASE_VI_GFX9PLUS(node) \
2672 case node: \
2673 return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2674
2675#define CASE_GFXPRE11_GFX11PLUS(node) \
2676 case node: \
2677 return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2678
2679#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2680 case node: \
2681 return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2682
2684 if (STI.getTargetTriple().getArch() == Triple::r600)
2685 return Reg;
2687}
2688
2689#undef CASE_CI_VI
2690#undef CASE_VI_GFX9PLUS
2691#undef CASE_GFXPRE11_GFX11PLUS
2692#undef CASE_GFXPRE11_GFX11PLUS_TO
2693
2694#define CASE_CI_VI(node) \
2695 case node##_ci: \
2696 case node##_vi: \
2697 return node;
2698#define CASE_VI_GFX9PLUS(node) \
2699 case node##_vi: \
2700 case node##_gfx9plus: \
2701 return node;
2702#define CASE_GFXPRE11_GFX11PLUS(node) \
2703 case node##_gfx11plus: \
2704 case node##_gfxpre11: \
2705 return node;
2706#define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2707
2709
2711 switch (Reg.id()) {
2712 case AMDGPU::SRC_SHARED_BASE_LO:
2713 case AMDGPU::SRC_SHARED_BASE:
2714 case AMDGPU::SRC_SHARED_LIMIT_LO:
2715 case AMDGPU::SRC_SHARED_LIMIT:
2716 case AMDGPU::SRC_PRIVATE_BASE_LO:
2717 case AMDGPU::SRC_PRIVATE_BASE:
2718 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2719 case AMDGPU::SRC_PRIVATE_LIMIT:
2720 case AMDGPU::SRC_FLAT_SCRATCH_BASE_LO:
2721 case AMDGPU::SRC_FLAT_SCRATCH_BASE_HI:
2722 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2723 return true;
2724 case AMDGPU::SRC_VCCZ:
2725 case AMDGPU::SRC_EXECZ:
2726 case AMDGPU::SRC_SCC:
2727 return true;
2728 case AMDGPU::SGPR_NULL:
2729 return true;
2730 default:
2731 return false;
2732 }
2733}
2734
2735#undef CASE_CI_VI
2736#undef CASE_VI_GFX9PLUS
2737#undef CASE_GFXPRE11_GFX11PLUS
2738#undef CASE_GFXPRE11_GFX11PLUS_TO
2739#undef MAP_REG2REG
2740
2741bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2742 assert(OpNo < Desc.NumOperands);
2743 unsigned OpType = Desc.operands()[OpNo].OperandType;
2744 return OpType >= AMDGPU::OPERAND_KIMM_FIRST &&
2745 OpType <= AMDGPU::OPERAND_KIMM_LAST;
2746}
2747
2748bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2749 assert(OpNo < Desc.NumOperands);
2750 unsigned OpType = Desc.operands()[OpNo].OperandType;
2751 switch (OpType) {
2764 return true;
2765 default:
2766 return false;
2767 }
2768}
2769
2770bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2771 assert(OpNo < Desc.NumOperands);
2772 unsigned OpType = Desc.operands()[OpNo].OperandType;
2773 return (OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
2777}
2778
2779// Avoid using MCRegisterClass::getSize, since that function will go away
2780// (move from MC* level to Target* level). Return size in bits.
2781unsigned getRegBitWidth(unsigned RCID) {
2782 switch (RCID) {
2783 case AMDGPU::VGPR_16RegClassID:
2784 case AMDGPU::VGPR_16_Lo128RegClassID:
2785 case AMDGPU::SGPR_LO16RegClassID:
2786 case AMDGPU::AGPR_LO16RegClassID:
2787 return 16;
2788 case AMDGPU::SGPR_32RegClassID:
2789 case AMDGPU::VGPR_32RegClassID:
2790 case AMDGPU::VGPR_32_Lo256RegClassID:
2791 case AMDGPU::VRegOrLds_32RegClassID:
2792 case AMDGPU::AGPR_32RegClassID:
2793 case AMDGPU::VS_32RegClassID:
2794 case AMDGPU::AV_32RegClassID:
2795 case AMDGPU::SReg_32RegClassID:
2796 case AMDGPU::SReg_32_XM0RegClassID:
2797 case AMDGPU::SRegOrLds_32RegClassID:
2798 return 32;
2799 case AMDGPU::SGPR_64RegClassID:
2800 case AMDGPU::VS_64RegClassID:
2801 case AMDGPU::SReg_64RegClassID:
2802 case AMDGPU::VReg_64RegClassID:
2803 case AMDGPU::AReg_64RegClassID:
2804 case AMDGPU::SReg_64_XEXECRegClassID:
2805 case AMDGPU::VReg_64_Align2RegClassID:
2806 case AMDGPU::AReg_64_Align2RegClassID:
2807 case AMDGPU::AV_64RegClassID:
2808 case AMDGPU::AV_64_Align2RegClassID:
2809 case AMDGPU::VReg_64_Lo256_Align2RegClassID:
2810 case AMDGPU::VS_64_Lo256RegClassID:
2811 return 64;
2812 case AMDGPU::SGPR_96RegClassID:
2813 case AMDGPU::SReg_96RegClassID:
2814 case AMDGPU::VReg_96RegClassID:
2815 case AMDGPU::AReg_96RegClassID:
2816 case AMDGPU::VReg_96_Align2RegClassID:
2817 case AMDGPU::AReg_96_Align2RegClassID:
2818 case AMDGPU::AV_96RegClassID:
2819 case AMDGPU::AV_96_Align2RegClassID:
2820 case AMDGPU::VReg_96_Lo256_Align2RegClassID:
2821 return 96;
2822 case AMDGPU::SGPR_128RegClassID:
2823 case AMDGPU::SReg_128RegClassID:
2824 case AMDGPU::VReg_128RegClassID:
2825 case AMDGPU::AReg_128RegClassID:
2826 case AMDGPU::VReg_128_Align2RegClassID:
2827 case AMDGPU::AReg_128_Align2RegClassID:
2828 case AMDGPU::AV_128RegClassID:
2829 case AMDGPU::AV_128_Align2RegClassID:
2830 case AMDGPU::SReg_128_XNULLRegClassID:
2831 case AMDGPU::VReg_128_Lo256_Align2RegClassID:
2832 return 128;
2833 case AMDGPU::SGPR_160RegClassID:
2834 case AMDGPU::SReg_160RegClassID:
2835 case AMDGPU::VReg_160RegClassID:
2836 case AMDGPU::AReg_160RegClassID:
2837 case AMDGPU::VReg_160_Align2RegClassID:
2838 case AMDGPU::AReg_160_Align2RegClassID:
2839 case AMDGPU::AV_160RegClassID:
2840 case AMDGPU::AV_160_Align2RegClassID:
2841 case AMDGPU::VReg_160_Lo256_Align2RegClassID:
2842 return 160;
2843 case AMDGPU::SGPR_192RegClassID:
2844 case AMDGPU::SReg_192RegClassID:
2845 case AMDGPU::VReg_192RegClassID:
2846 case AMDGPU::AReg_192RegClassID:
2847 case AMDGPU::VReg_192_Align2RegClassID:
2848 case AMDGPU::AReg_192_Align2RegClassID:
2849 case AMDGPU::AV_192RegClassID:
2850 case AMDGPU::AV_192_Align2RegClassID:
2851 case AMDGPU::VReg_192_Lo256_Align2RegClassID:
2852 return 192;
2853 case AMDGPU::SGPR_224RegClassID:
2854 case AMDGPU::SReg_224RegClassID:
2855 case AMDGPU::VReg_224RegClassID:
2856 case AMDGPU::AReg_224RegClassID:
2857 case AMDGPU::VReg_224_Align2RegClassID:
2858 case AMDGPU::AReg_224_Align2RegClassID:
2859 case AMDGPU::AV_224RegClassID:
2860 case AMDGPU::AV_224_Align2RegClassID:
2861 case AMDGPU::VReg_224_Lo256_Align2RegClassID:
2862 return 224;
2863 case AMDGPU::SGPR_256RegClassID:
2864 case AMDGPU::SReg_256RegClassID:
2865 case AMDGPU::VReg_256RegClassID:
2866 case AMDGPU::AReg_256RegClassID:
2867 case AMDGPU::VReg_256_Align2RegClassID:
2868 case AMDGPU::AReg_256_Align2RegClassID:
2869 case AMDGPU::AV_256RegClassID:
2870 case AMDGPU::AV_256_Align2RegClassID:
2871 case AMDGPU::SReg_256_XNULLRegClassID:
2872 case AMDGPU::VReg_256_Lo256_Align2RegClassID:
2873 return 256;
2874 case AMDGPU::SGPR_288RegClassID:
2875 case AMDGPU::SReg_288RegClassID:
2876 case AMDGPU::VReg_288RegClassID:
2877 case AMDGPU::AReg_288RegClassID:
2878 case AMDGPU::VReg_288_Align2RegClassID:
2879 case AMDGPU::AReg_288_Align2RegClassID:
2880 case AMDGPU::AV_288RegClassID:
2881 case AMDGPU::AV_288_Align2RegClassID:
2882 case AMDGPU::VReg_288_Lo256_Align2RegClassID:
2883 return 288;
2884 case AMDGPU::SGPR_320RegClassID:
2885 case AMDGPU::SReg_320RegClassID:
2886 case AMDGPU::VReg_320RegClassID:
2887 case AMDGPU::AReg_320RegClassID:
2888 case AMDGPU::VReg_320_Align2RegClassID:
2889 case AMDGPU::AReg_320_Align2RegClassID:
2890 case AMDGPU::AV_320RegClassID:
2891 case AMDGPU::AV_320_Align2RegClassID:
2892 case AMDGPU::VReg_320_Lo256_Align2RegClassID:
2893 return 320;
2894 case AMDGPU::SGPR_352RegClassID:
2895 case AMDGPU::SReg_352RegClassID:
2896 case AMDGPU::VReg_352RegClassID:
2897 case AMDGPU::AReg_352RegClassID:
2898 case AMDGPU::VReg_352_Align2RegClassID:
2899 case AMDGPU::AReg_352_Align2RegClassID:
2900 case AMDGPU::AV_352RegClassID:
2901 case AMDGPU::AV_352_Align2RegClassID:
2902 case AMDGPU::VReg_352_Lo256_Align2RegClassID:
2903 return 352;
2904 case AMDGPU::SGPR_384RegClassID:
2905 case AMDGPU::SReg_384RegClassID:
2906 case AMDGPU::VReg_384RegClassID:
2907 case AMDGPU::AReg_384RegClassID:
2908 case AMDGPU::VReg_384_Align2RegClassID:
2909 case AMDGPU::AReg_384_Align2RegClassID:
2910 case AMDGPU::AV_384RegClassID:
2911 case AMDGPU::AV_384_Align2RegClassID:
2912 case AMDGPU::VReg_384_Lo256_Align2RegClassID:
2913 return 384;
2914 case AMDGPU::SGPR_512RegClassID:
2915 case AMDGPU::SReg_512RegClassID:
2916 case AMDGPU::VReg_512RegClassID:
2917 case AMDGPU::AReg_512RegClassID:
2918 case AMDGPU::VReg_512_Align2RegClassID:
2919 case AMDGPU::AReg_512_Align2RegClassID:
2920 case AMDGPU::AV_512RegClassID:
2921 case AMDGPU::AV_512_Align2RegClassID:
2922 case AMDGPU::VReg_512_Lo256_Align2RegClassID:
2923 return 512;
2924 case AMDGPU::SGPR_1024RegClassID:
2925 case AMDGPU::SReg_1024RegClassID:
2926 case AMDGPU::VReg_1024RegClassID:
2927 case AMDGPU::AReg_1024RegClassID:
2928 case AMDGPU::VReg_1024_Align2RegClassID:
2929 case AMDGPU::AReg_1024_Align2RegClassID:
2930 case AMDGPU::AV_1024RegClassID:
2931 case AMDGPU::AV_1024_Align2RegClassID:
2932 case AMDGPU::VReg_1024_Lo256_Align2RegClassID:
2933 return 1024;
2934 default:
2935 llvm_unreachable("Unexpected register class");
2936 }
2937}
2938
2939unsigned getRegBitWidth(const MCRegisterClass &RC) {
2940 return getRegBitWidth(RC.getID());
2941}
2942
2943bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
2945 return true;
2946
2947 uint64_t Val = static_cast<uint64_t>(Literal);
2948 return (Val == llvm::bit_cast<uint64_t>(0.0)) ||
2949 (Val == llvm::bit_cast<uint64_t>(1.0)) ||
2950 (Val == llvm::bit_cast<uint64_t>(-1.0)) ||
2951 (Val == llvm::bit_cast<uint64_t>(0.5)) ||
2952 (Val == llvm::bit_cast<uint64_t>(-0.5)) ||
2953 (Val == llvm::bit_cast<uint64_t>(2.0)) ||
2954 (Val == llvm::bit_cast<uint64_t>(-2.0)) ||
2955 (Val == llvm::bit_cast<uint64_t>(4.0)) ||
2956 (Val == llvm::bit_cast<uint64_t>(-4.0)) ||
2957 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2958}
2959
2960bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
2962 return true;
2963
2964 // The actual type of the operand does not seem to matter as long
2965 // as the bits match one of the inline immediate values. For example:
2966 //
2967 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
2968 // so it is a legal inline immediate.
2969 //
2970 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
2971 // floating-point, so it is a legal inline immediate.
2972
2973 uint32_t Val = static_cast<uint32_t>(Literal);
2974 return (Val == llvm::bit_cast<uint32_t>(0.0f)) ||
2975 (Val == llvm::bit_cast<uint32_t>(1.0f)) ||
2976 (Val == llvm::bit_cast<uint32_t>(-1.0f)) ||
2977 (Val == llvm::bit_cast<uint32_t>(0.5f)) ||
2978 (Val == llvm::bit_cast<uint32_t>(-0.5f)) ||
2979 (Val == llvm::bit_cast<uint32_t>(2.0f)) ||
2980 (Val == llvm::bit_cast<uint32_t>(-2.0f)) ||
2981 (Val == llvm::bit_cast<uint32_t>(4.0f)) ||
2982 (Val == llvm::bit_cast<uint32_t>(-4.0f)) ||
2983 (Val == 0x3e22f983 && HasInv2Pi);
2984}
2985
2986bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi) {
2987 if (!HasInv2Pi)
2988 return false;
2990 return true;
2991 uint16_t Val = static_cast<uint16_t>(Literal);
2992 return Val == 0x3F00 || // 0.5
2993 Val == 0xBF00 || // -0.5
2994 Val == 0x3F80 || // 1.0
2995 Val == 0xBF80 || // -1.0
2996 Val == 0x4000 || // 2.0
2997 Val == 0xC000 || // -2.0
2998 Val == 0x4080 || // 4.0
2999 Val == 0xC080 || // -4.0
3000 Val == 0x3E22; // 1.0 / (2.0 * pi)
3001}
3002
3003bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi) {
3004 return isInlinableLiteral32(Literal, HasInv2Pi);
3005}
3006
3007bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi) {
3008 if (!HasInv2Pi)
3009 return false;
3011 return true;
3012 uint16_t Val = static_cast<uint16_t>(Literal);
3013 return Val == 0x3C00 || // 1.0
3014 Val == 0xBC00 || // -1.0
3015 Val == 0x3800 || // 0.5
3016 Val == 0xB800 || // -0.5
3017 Val == 0x4000 || // 2.0
3018 Val == 0xC000 || // -2.0
3019 Val == 0x4400 || // 4.0
3020 Val == 0xC400 || // -4.0
3021 Val == 0x3118; // 1/2pi
3022}
3023
3024std::optional<unsigned> getInlineEncodingV216(bool IsFloat, uint32_t Literal) {
3025 // Unfortunately, the Instruction Set Architecture Reference Guide is
3026 // misleading about how the inline operands work for (packed) 16-bit
3027 // instructions. In a nutshell, the actual HW behavior is:
3028 //
3029 // - integer encodings (-16 .. 64) are always produced as sign-extended
3030 // 32-bit values
3031 // - float encodings are produced as:
3032 // - for F16 instructions: corresponding half-precision float values in
3033 // the LSBs, 0 in the MSBs
3034 // - for UI16 instructions: corresponding single-precision float value
3035 int32_t Signed = static_cast<int32_t>(Literal);
3036 if (Signed >= 0 && Signed <= 64)
3037 return 128 + Signed;
3038
3039 if (Signed >= -16 && Signed <= -1)
3040 return 192 + std::abs(Signed);
3041
3042 if (IsFloat) {
3043 // clang-format off
3044 switch (Literal) {
3045 case 0x3800: return 240; // 0.5
3046 case 0xB800: return 241; // -0.5
3047 case 0x3C00: return 242; // 1.0
3048 case 0xBC00: return 243; // -1.0
3049 case 0x4000: return 244; // 2.0
3050 case 0xC000: return 245; // -2.0
3051 case 0x4400: return 246; // 4.0
3052 case 0xC400: return 247; // -4.0
3053 case 0x3118: return 248; // 1.0 / (2.0 * pi)
3054 default: break;
3055 }
3056 // clang-format on
3057 } else {
3058 // clang-format off
3059 switch (Literal) {
3060 case 0x3F000000: return 240; // 0.5
3061 case 0xBF000000: return 241; // -0.5
3062 case 0x3F800000: return 242; // 1.0
3063 case 0xBF800000: return 243; // -1.0
3064 case 0x40000000: return 244; // 2.0
3065 case 0xC0000000: return 245; // -2.0
3066 case 0x40800000: return 246; // 4.0
3067 case 0xC0800000: return 247; // -4.0
3068 case 0x3E22F983: return 248; // 1.0 / (2.0 * pi)
3069 default: break;
3070 }
3071 // clang-format on
3072 }
3073
3074 return {};
3075}
3076
3077// Encoding of the literal as an inline constant for a V_PK_*_IU16 instruction
3078// or nullopt.
3079std::optional<unsigned> getInlineEncodingV2I16(uint32_t Literal) {
3080 return getInlineEncodingV216(false, Literal);
3081}
3082
3083// Encoding of the literal as an inline constant for a V_PK_*_BF16 instruction
3084// or nullopt.
3085std::optional<unsigned> getInlineEncodingV2BF16(uint32_t Literal) {
3086 int32_t Signed = static_cast<int32_t>(Literal);
3087 if (Signed >= 0 && Signed <= 64)
3088 return 128 + Signed;
3089
3090 if (Signed >= -16 && Signed <= -1)
3091 return 192 + std::abs(Signed);
3092
3093 // clang-format off
3094 switch (Literal) {
3095 case 0x3F00: return 240; // 0.5
3096 case 0xBF00: return 241; // -0.5
3097 case 0x3F80: return 242; // 1.0
3098 case 0xBF80: return 243; // -1.0
3099 case 0x4000: return 244; // 2.0
3100 case 0xC000: return 245; // -2.0
3101 case 0x4080: return 246; // 4.0
3102 case 0xC080: return 247; // -4.0
3103 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
3104 default: break;
3105 }
3106 // clang-format on
3107
3108 return std::nullopt;
3109}
3110
3111// Encoding of the literal as an inline constant for a V_PK_*_F16 instruction
3112// or nullopt.
3113std::optional<unsigned> getInlineEncodingV2F16(uint32_t Literal) {
3114 return getInlineEncodingV216(true, Literal);
3115}
3116
3117// Whether the given literal can be inlined for a V_PK_* instruction.
3119 switch (OpType) {
3122 return getInlineEncodingV216(false, Literal).has_value();
3125 return getInlineEncodingV216(true, Literal).has_value();
3130 return false;
3131 default:
3132 llvm_unreachable("bad packed operand type");
3133 }
3134}
3135
3136// Whether the given literal can be inlined for a V_PK_*_IU16 instruction.
3140
3141// Whether the given literal can be inlined for a V_PK_*_BF16 instruction.
3145
3146// Whether the given literal can be inlined for a V_PK_*_F16 instruction.
3150
3151bool isValid32BitLiteral(uint64_t Val, bool IsFP64) {
3152 if (IsFP64)
3153 return !Lo_32(Val);
3154
3155 return isUInt<32>(Val) || isInt<32>(Val);
3156}
3157
3158int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit) {
3159 switch (Type) {
3160 default:
3161 break;
3166 return Imm & 0xffff;
3179 return Lo_32(Imm);
3181 return IsLit ? Imm : Hi_32(Imm);
3182 }
3183 return Imm;
3184}
3185
3187 const Function *F = A->getParent();
3188
3189 // Arguments to compute shaders are never a source of divergence.
3190 CallingConv::ID CC = F->getCallingConv();
3191 switch (CC) {
3194 return true;
3205 // For non-compute shaders, SGPR inputs are marked with either inreg or
3206 // byval. Everything else is in VGPRs.
3207 return A->hasAttribute(Attribute::InReg) ||
3208 A->hasAttribute(Attribute::ByVal);
3209 default:
3210 // TODO: treat i1 as divergent?
3211 return A->hasAttribute(Attribute::InReg);
3212 }
3213}
3214
3215bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo) {
3216 // Arguments to compute shaders are never a source of divergence.
3218 switch (CC) {
3221 return true;
3232 // For non-compute shaders, SGPR inputs are marked with either inreg or
3233 // byval. Everything else is in VGPRs.
3234 return CB->paramHasAttr(ArgNo, Attribute::InReg) ||
3235 CB->paramHasAttr(ArgNo, Attribute::ByVal);
3236 default:
3237 return CB->paramHasAttr(ArgNo, Attribute::InReg);
3238 }
3239}
3240
3241static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
3242 return isGCN3Encoding(ST) || isGFX10Plus(ST);
3243}
3244
3246 int64_t EncodedOffset) {
3247 if (isGFX12Plus(ST))
3248 return isUInt<23>(EncodedOffset);
3249
3250 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
3251 : isUInt<8>(EncodedOffset);
3252}
3253
3255 int64_t EncodedOffset, bool IsBuffer) {
3256 if (isGFX12Plus(ST)) {
3257 if (IsBuffer && EncodedOffset < 0)
3258 return false;
3259 return isInt<24>(EncodedOffset);
3260 }
3261
3262 return !IsBuffer && hasSMRDSignedImmOffset(ST) && isInt<21>(EncodedOffset);
3263}
3264
3265static bool isDwordAligned(uint64_t ByteOffset) {
3266 return (ByteOffset & 3) == 0;
3267}
3268
3270 uint64_t ByteOffset) {
3271 if (hasSMEMByteOffset(ST))
3272 return ByteOffset;
3273
3274 assert(isDwordAligned(ByteOffset));
3275 return ByteOffset >> 2;
3276}
3277
3278std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
3279 int64_t ByteOffset, bool IsBuffer,
3280 bool HasSOffset) {
3281 // For unbuffered smem loads, it is illegal for the Immediate Offset to be
3282 // negative if the resulting (Offset + (M0 or SOffset or zero) is negative.
3283 // Handle case where SOffset is not present.
3284 if (!IsBuffer && !HasSOffset && ByteOffset < 0 && hasSMRDSignedImmOffset(ST))
3285 return std::nullopt;
3286
3287 if (isGFX12Plus(ST)) // 24 bit signed offsets
3288 return isInt<24>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3289 : std::nullopt;
3290
3291 // The signed version is always a byte offset.
3292 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
3294 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3295 : std::nullopt;
3296 }
3297
3298 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
3299 return std::nullopt;
3300
3301 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3302 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
3303 ? std::optional<int64_t>(EncodedOffset)
3304 : std::nullopt;
3305}
3306
3307std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
3308 int64_t ByteOffset) {
3309 if (!isCI(ST) || !isDwordAligned(ByteOffset))
3310 return std::nullopt;
3311
3312 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3313 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
3314 : std::nullopt;
3315}
3316
3318 if (AMDGPU::isGFX10(ST))
3319 return 12;
3320
3321 if (AMDGPU::isGFX12(ST))
3322 return 24;
3323 return 13;
3324}
3325
3326namespace {
3327
3328struct SourceOfDivergence {
3329 unsigned Intr;
3330};
3331const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
3332
3333struct AlwaysUniform {
3334 unsigned Intr;
3335};
3336const AlwaysUniform *lookupAlwaysUniform(unsigned Intr);
3337
3338#define GET_SourcesOfDivergence_IMPL
3339#define GET_UniformIntrinsics_IMPL
3340#define GET_Gfx9BufferFormat_IMPL
3341#define GET_Gfx10BufferFormat_IMPL
3342#define GET_Gfx11PlusBufferFormat_IMPL
3343
3344#include "AMDGPUGenSearchableTables.inc"
3345
3346} // end anonymous namespace
3347
3348bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
3349 return lookupSourceOfDivergence(IntrID);
3350}
3351
3352bool isIntrinsicAlwaysUniform(unsigned IntrID) {
3353 return lookupAlwaysUniform(IntrID);
3354}
3355
3357 uint8_t NumComponents,
3358 uint8_t NumFormat,
3359 const MCSubtargetInfo &STI) {
3360 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(
3361 BitsPerComp, NumComponents, NumFormat)
3362 : isGFX10(STI)
3363 ? getGfx10BufferFormatInfo(BitsPerComp, NumComponents, NumFormat)
3364 : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat);
3365}
3366
3368 const MCSubtargetInfo &STI) {
3369 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
3370 : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
3371 : getGfx9BufferFormatInfo(Format);
3372}
3373
3375 const MCRegisterInfo &MRI) {
3376 const unsigned VGPRClasses[] = {
3377 AMDGPU::VGPR_16RegClassID, AMDGPU::VGPR_32RegClassID,
3378 AMDGPU::VReg_64RegClassID, AMDGPU::VReg_96RegClassID,
3379 AMDGPU::VReg_128RegClassID, AMDGPU::VReg_160RegClassID,
3380 AMDGPU::VReg_192RegClassID, AMDGPU::VReg_224RegClassID,
3381 AMDGPU::VReg_256RegClassID, AMDGPU::VReg_288RegClassID,
3382 AMDGPU::VReg_320RegClassID, AMDGPU::VReg_352RegClassID,
3383 AMDGPU::VReg_384RegClassID, AMDGPU::VReg_512RegClassID,
3384 AMDGPU::VReg_1024RegClassID};
3385
3386 for (unsigned RCID : VGPRClasses) {
3387 const MCRegisterClass &RC = MRI.getRegClass(RCID);
3388 if (RC.contains(Reg))
3389 return &RC;
3390 }
3391
3392 return nullptr;
3393}
3394
3396 unsigned Enc = MRI.getEncodingValue(Reg);
3397 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
3398 return Idx >> 8;
3399}
3400
3402 const MCRegisterInfo &MRI) {
3403 unsigned Enc = MRI.getEncodingValue(Reg);
3404 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
3405 if (Idx >= 0x100)
3406 return MCRegister();
3407
3409 if (!RC)
3410 return MCRegister();
3411
3412 Idx |= MSBs << 8;
3413 if (RC->getID() == AMDGPU::VGPR_16RegClassID) {
3414 // This class has 2048 registers with interleaved lo16 and hi16.
3415 Idx *= 2;
3417 ++Idx;
3418 }
3419
3420 return RC->getRegister(Idx);
3421}
3422
3423std::pair<const AMDGPU::OpName *, const AMDGPU::OpName *>
3425 static const AMDGPU::OpName VOPOps[4] = {
3426 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2,
3427 AMDGPU::OpName::vdst};
3428 static const AMDGPU::OpName VDSOps[4] = {
3429 AMDGPU::OpName::addr, AMDGPU::OpName::data0, AMDGPU::OpName::data1,
3430 AMDGPU::OpName::vdst};
3431 static const AMDGPU::OpName FLATOps[4] = {
3432 AMDGPU::OpName::vaddr, AMDGPU::OpName::vdata,
3433 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdst};
3434 static const AMDGPU::OpName BUFOps[4] = {
3435 AMDGPU::OpName::vaddr, AMDGPU::OpName::NUM_OPERAND_NAMES,
3436 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdata};
3437 static const AMDGPU::OpName VIMGOps[4] = {
3438 AMDGPU::OpName::vaddr0, AMDGPU::OpName::vaddr1, AMDGPU::OpName::vaddr2,
3439 AMDGPU::OpName::vdata};
3440
3441 // For VOPD instructions MSB of a corresponding Y component operand VGPR
3442 // address is supposed to match X operand, otherwise VOPD shall not be
3443 // combined.
3444 static const AMDGPU::OpName VOPDOpsX[4] = {
3445 AMDGPU::OpName::src0X, AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vsrc2X,
3446 AMDGPU::OpName::vdstX};
3447 static const AMDGPU::OpName VOPDOpsY[4] = {
3448 AMDGPU::OpName::src0Y, AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vsrc2Y,
3449 AMDGPU::OpName::vdstY};
3450
3451 // VOP2 MADMK instructions use src0, imm, src1 scheme.
3452 static const AMDGPU::OpName VOP2MADMKOps[4] = {
3453 AMDGPU::OpName::src0, AMDGPU::OpName::NUM_OPERAND_NAMES,
3454 AMDGPU::OpName::src1, AMDGPU::OpName::vdst};
3455 static const AMDGPU::OpName VOPDFMAMKOpsX[4] = {
3456 AMDGPU::OpName::src0X, AMDGPU::OpName::NUM_OPERAND_NAMES,
3457 AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vdstX};
3458 static const AMDGPU::OpName VOPDFMAMKOpsY[4] = {
3459 AMDGPU::OpName::src0Y, AMDGPU::OpName::NUM_OPERAND_NAMES,
3460 AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vdstY};
3461
3462 unsigned TSFlags = Desc.TSFlags;
3463
3464 if (TSFlags &
3467 switch (Desc.getOpcode()) {
3468 // LD_SCALE operands ignore MSB.
3469 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32:
3470 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32_gfx1250:
3471 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64:
3472 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64_gfx1250:
3473 return {};
3474 case AMDGPU::V_FMAMK_F16:
3475 case AMDGPU::V_FMAMK_F16_t16:
3476 case AMDGPU::V_FMAMK_F16_t16_gfx12:
3477 case AMDGPU::V_FMAMK_F16_fake16:
3478 case AMDGPU::V_FMAMK_F16_fake16_gfx12:
3479 case AMDGPU::V_FMAMK_F32:
3480 case AMDGPU::V_FMAMK_F32_gfx12:
3481 case AMDGPU::V_FMAMK_F64:
3482 case AMDGPU::V_FMAMK_F64_gfx1250:
3483 return {VOP2MADMKOps, nullptr};
3484 default:
3485 break;
3486 }
3487 return {VOPOps, nullptr};
3488 }
3489
3490 if (TSFlags & SIInstrFlags::DS)
3491 return {VDSOps, nullptr};
3492
3493 if (TSFlags & SIInstrFlags::FLAT)
3494 return {FLATOps, nullptr};
3495
3496 if (TSFlags & (SIInstrFlags::MUBUF | SIInstrFlags::MTBUF))
3497 return {BUFOps, nullptr};
3498
3499 if (TSFlags & SIInstrFlags::VIMAGE)
3500 return {VIMGOps, nullptr};
3501
3502 if (AMDGPU::isVOPD(Desc.getOpcode())) {
3503 auto [OpX, OpY] = getVOPDComponents(Desc.getOpcode());
3504 return {(OpX == AMDGPU::V_FMAMK_F32) ? VOPDFMAMKOpsX : VOPDOpsX,
3505 (OpY == AMDGPU::V_FMAMK_F32) ? VOPDFMAMKOpsY : VOPDOpsY};
3506 }
3507
3508 assert(!(TSFlags & SIInstrFlags::MIMG));
3509
3510 if (TSFlags & (SIInstrFlags::VSAMPLE | SIInstrFlags::EXP))
3511 llvm_unreachable("Sample and export VGPR lowering is not implemented and"
3512 " these instructions are not expected on gfx1250");
3513
3514 return {};
3515}
3516
3517bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode) {
3518 uint64_t TSFlags = MII.get(Opcode).TSFlags;
3519
3520 if (TSFlags & SIInstrFlags::SMRD)
3521 return !getSMEMIsBuffer(Opcode);
3522 if (!(TSFlags & SIInstrFlags::FLAT))
3523 return false;
3524
3525 // Only SV and SVS modes are supported.
3526 if (TSFlags & SIInstrFlags::FlatScratch)
3527 return hasNamedOperand(Opcode, OpName::vaddr);
3528
3529 // Only GVS mode is supported.
3530 return hasNamedOperand(Opcode, OpName::vaddr) &&
3531 hasNamedOperand(Opcode, OpName::saddr);
3532
3533 return false;
3534}
3535
3536bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
3537 const MCSubtargetInfo &ST) {
3538 for (auto OpName : {OpName::vdst, OpName::src0, OpName::src1, OpName::src2}) {
3539 int Idx = getNamedOperandIdx(OpDesc.getOpcode(), OpName);
3540 if (Idx == -1)
3541 continue;
3542
3543 const MCOperandInfo &OpInfo = OpDesc.operands()[Idx];
3544 int16_t RegClass = MII.getOpRegClassID(
3545 OpInfo, ST.getHwMode(MCSubtargetInfo::HwMode_RegInfo));
3546 if (RegClass == AMDGPU::VReg_64RegClassID ||
3547 RegClass == AMDGPU::VReg_64_Align2RegClassID)
3548 return true;
3549 }
3550
3551 return false;
3552}
3553
3554bool isDPALU_DPP32BitOpc(unsigned Opc) {
3555 switch (Opc) {
3556 case AMDGPU::V_MUL_LO_U32_e64:
3557 case AMDGPU::V_MUL_LO_U32_e64_dpp:
3558 case AMDGPU::V_MUL_LO_U32_e64_dpp_gfx1250:
3559 case AMDGPU::V_MUL_HI_U32_e64:
3560 case AMDGPU::V_MUL_HI_U32_e64_dpp:
3561 case AMDGPU::V_MUL_HI_U32_e64_dpp_gfx1250:
3562 case AMDGPU::V_MUL_HI_I32_e64:
3563 case AMDGPU::V_MUL_HI_I32_e64_dpp:
3564 case AMDGPU::V_MUL_HI_I32_e64_dpp_gfx1250:
3565 case AMDGPU::V_MAD_U32_e64:
3566 case AMDGPU::V_MAD_U32_e64_dpp:
3567 case AMDGPU::V_MAD_U32_e64_dpp_gfx1250:
3568 return true;
3569 default:
3570 return false;
3571 }
3572}
3573
3574bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
3575 const MCSubtargetInfo &ST) {
3576 if (!ST.hasFeature(AMDGPU::FeatureDPALU_DPP))
3577 return false;
3578
3579 if (isDPALU_DPP32BitOpc(OpDesc.getOpcode()))
3580 return ST.hasFeature(AMDGPU::FeatureGFX1250Insts);
3581
3582 return hasAny64BitVGPROperands(OpDesc, MII, ST);
3583}
3584
3586 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
3587 return 64;
3588 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
3589 return 128;
3590 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
3591 return 320;
3592 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
3593 return 512;
3594 return 64; // In sync with getAddressableLocalMemorySize
3595}
3596
3597bool isPackedFP32Inst(unsigned Opc) {
3598 switch (Opc) {
3599 case AMDGPU::V_PK_ADD_F32:
3600 case AMDGPU::V_PK_ADD_F32_gfx12:
3601 case AMDGPU::V_PK_MUL_F32:
3602 case AMDGPU::V_PK_MUL_F32_gfx12:
3603 case AMDGPU::V_PK_FMA_F32:
3604 case AMDGPU::V_PK_FMA_F32_gfx12:
3605 return true;
3606 default:
3607 return false;
3608 }
3609}
3610
3611const std::array<unsigned, 3> &ClusterDimsAttr::getDims() const {
3612 assert(isFixedDims() && "expect kind to be FixedDims");
3613 return Dims;
3614}
3615
3616std::string ClusterDimsAttr::to_string() const {
3617 SmallString<10> Buffer;
3618 raw_svector_ostream OS(Buffer);
3619
3620 switch (getKind()) {
3621 case Kind::Unknown:
3622 return "";
3623 case Kind::NoCluster: {
3624 OS << EncoNoCluster << ',' << EncoNoCluster << ',' << EncoNoCluster;
3625 return Buffer.c_str();
3626 }
3627 case Kind::VariableDims: {
3628 OS << EncoVariableDims << ',' << EncoVariableDims << ','
3629 << EncoVariableDims;
3630 return Buffer.c_str();
3631 }
3632 case Kind::FixedDims: {
3633 OS << Dims[0] << ',' << Dims[1] << ',' << Dims[2];
3634 return Buffer.c_str();
3635 }
3636 }
3637 llvm_unreachable("Unknown ClusterDimsAttr kind");
3638}
3639
3641 std::optional<SmallVector<unsigned>> Attr =
3642 getIntegerVecAttribute(F, "amdgpu-cluster-dims", /*Size=*/3);
3644
3645 if (!Attr.has_value())
3646 AttrKind = Kind::Unknown;
3647 else if (all_of(*Attr, [](unsigned V) { return V == EncoNoCluster; }))
3648 AttrKind = Kind::NoCluster;
3649 else if (all_of(*Attr, [](unsigned V) { return V == EncoVariableDims; }))
3650 AttrKind = Kind::VariableDims;
3651
3652 ClusterDimsAttr A(AttrKind);
3653 if (AttrKind == Kind::FixedDims)
3654 A.Dims = {(*Attr)[0], (*Attr)[1], (*Attr)[2]};
3655
3656 return A;
3657}
3658
3659} // namespace AMDGPU
3660
3663 switch (S) {
3665 OS << "Unsupported";
3666 break;
3668 OS << "Any";
3669 break;
3671 OS << "Off";
3672 break;
3674 OS << "On";
3675 break;
3676 }
3677 return OS;
3678}
3679
3680} // namespace llvm
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static llvm::cl::opt< unsigned > DefaultAMDHSACodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::init(llvm::AMDGPU::AMDHSA_COV6), llvm::cl::desc("Set default AMDHSA Code Object Version (module flag " "or asm directive still take priority if present)"))
#define MAP_REG2REG
Provides AMDGPU specific target descriptions.
MC layer struct for AMDGPUMCKernelCodeT, provides MCExpr functionality where required.
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define RegName(no)
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
#define T
uint64_t High
if(PassOpts->AAPipeline)
#define S_00B848_MEM_ORDERED(x)
Definition SIDefines.h:1237
#define S_00B848_WGP_MODE(x)
Definition SIDefines.h:1234
#define S_00B848_FWD_PROGRESS(x)
Definition SIDefines.h:1240
unsigned unsigned DefaultVal
This file contains some functions that are useful when dealing with strings.
static const int BlockSize
Definition TarWriter.cpp:33
static ClusterDimsAttr get(const Function &F)
const std::array< unsigned, 3 > & getDims() const
TargetIDSetting getXnackSetting() const
AMDGPUTargetID(const MCSubtargetInfo &STI)
void setTargetIDFromTargetIDStream(StringRef TargetID)
TargetIDSetting getSramEccSetting() const
unsigned getIndexInParsedOperands(unsigned CompOprIdx) const
unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const
std::optional< unsigned > getInvalidCompOperandIndex(std::function< MCRegister(unsigned, unsigned)> GetRegIdx, const MCRegisterInfo &MRI, bool SkipSrc=false, bool AllowSameVGPR=false, bool VOPD3=false) const
std::array< MCRegister, Component::MAX_OPR_NUM > RegIndices
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
constexpr bool test(unsigned I) const
unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayStore() const
Return true if this instruction could possibly modify memory.
bool mayLoad() const
Return true if this instruction could possibly read memory.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
unsigned getOpcode() const
Return the opcode number for this descriptor.
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
int16_t getOpRegClassID(const MCOperandInfo &OpInfo, unsigned HwModeId) const
Return the ID of the register class to use for OpInfo, for the active HwMode HwModeId.
Definition MCInstrInfo.h:80
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
StringRef getCPU() const
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
const char * c_str()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
Definition StringRef.h:854
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
Manages the enabling and disabling of subtarget specific features.
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
OSType getOS() const
Get the parsed operating system type of this triple.
Definition Triple.h:422
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:413
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Definition Triple.h:927
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
unsigned decodeFieldVaVcc(unsigned Encoded)
unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc)
unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt)
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc)
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned decodeFieldSaSdst(unsigned Encoded)
unsigned decodeFieldVaSdst(unsigned Encoded)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaSsrc(unsigned Encoded)
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
const CustomOperandVal DepCtrInfo[]
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
unsigned decodeFieldVaVdst(unsigned Encoded)
unsigned decodeFieldHoldCnt(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
static constexpr ExpTgt ExpTgtInfo[]
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
unsigned getTgtId(const StringRef Name)
constexpr uint32_t VersionMinor
HSA metadata minor version.
constexpr uint32_t VersionMajor
HSA metadata major version.
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
unsigned getArchVGPRAllocGranule()
For subtargets with a unified VGPR file and mixed ArchVGPR/AGPR usage, returns the allocation granule...
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize)
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getAllocatedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, std::optional< bool > EnableWavefrontSize32)
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves, AMDGPUSubtarget::Generation Gen)
static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs, unsigned Granule)
unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI)
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize)
StringLiteral const UfmtSymbolicGFX11[]
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI)
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX10[]
StringLiteral const DfmtSymbolic[]
static StringLiteral const * getNfmtLookupTable(const MCSubtargetInfo &STI)
bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI)
StringLiteral const NfmtSymbolicGFX10[]
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, const MCSubtargetInfo &STI)
StringRef getDfmtName(unsigned Id)
int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt)
int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI)
bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI)
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX11[]
StringLiteral const NfmtSymbolicVI[]
StringLiteral const NfmtSymbolicSICI[]
int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI)
int64_t getDfmt(const StringRef Name)
StringLiteral const UfmtSymbolicGFX10[]
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
constexpr unsigned VOPD_VGPR_BANK_MASKS[]
constexpr unsigned COMPONENTS_NUM
constexpr unsigned VOPD3_VGPR_BANK_MASKS[]
bool isPackedFP32Inst(unsigned Opc)
bool isGCN3Encoding(const MCSubtargetInfo &STI)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
bool isInlineValue(MCRegister Reg)
bool isGFX10_GFX11(const MCSubtargetInfo &STI)
bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Storecnt)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
bool isVOPCAsmOnly(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool getMTBUFHasSrsrc(unsigned Opc)
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool getWMMAIsXDL(unsigned Opc)
uint8_t wmmaScaleF8F6F4FormatToNumRegs(unsigned Fmt)
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isGFX10Before1030(const MCSubtargetInfo &STI)
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
const int OPR_ID_UNSUPPORTED
bool shouldEmitConstantsToTextSection(const Triple &TT)
bool isInlinableLiteralV2I16(uint32_t Literal)
int getMTBUFElements(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
unsigned getTemporalHintType(const MCInstrDesc TID)
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
bool isGFX10(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV216(bool IsFloat, uint32_t Literal)
FPType getFPDstSelType(unsigned Opc)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool hasA16(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
bool isGFX12Plus(const MCSubtargetInfo &STI)
unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler)
const MCRegisterClass * getVGPRPhysRegClass(MCRegister Reg, const MCRegisterInfo &MRI)
bool hasPackedD16(const MCSubtargetInfo &STI)
unsigned getStorecntBitMask(const IsaVersion &Version)
unsigned getLdsDwGranularity(const MCSubtargetInfo &ST)
bool isGFX940(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isHsaAbi(const MCSubtargetInfo &STI)
bool isGFX11(const MCSubtargetInfo &STI)
const int OPR_VAL_INVALID
bool getSMEMIsBuffer(unsigned Opc)
bool isGFX10_3_GFX11(const MCSubtargetInfo &STI)
bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val)
Checks if Val is inside MD, a !range-like metadata.
uint8_t mfmaScaleF8F6F4FormatToNumRegs(unsigned EncodingVal)
unsigned getVOPDOpcode(unsigned Opc, bool VOPD3)
bool isGroupSegment(const GlobalValue *GV)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
bool getMTBUFHasSoffset(unsigned Opc)
bool hasXNACK(const MCSubtargetInfo &STI)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
static unsigned getCombinedCountBitMask(const IsaVersion &Version, bool IsStore)
CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
bool isVOPC64DPP(unsigned Opc)
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool getMAIIsGFX940XDL(unsigned Opc)
bool isSI(const MCSubtargetInfo &STI)
unsigned getDefaultAMDHSACodeObjectVersion()
bool isReadOnlySegment(const GlobalValue *GV)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
int getMUBUFBaseOpcode(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getWaitcntBitMask(const IsaVersion &Version)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool isGFX9(const MCSubtargetInfo &STI)
bool isDPALU_DPP32BitOpc(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
static bool isDwordAligned(uint64_t ByteOffset)
unsigned getVOPDEncodingFamily(const MCSubtargetInfo &ST)
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool getHasColorExport(const Function &F)
int getMTBUFBaseOpcode(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
unsigned getSamplecntBitMask(const IsaVersion &Version)
unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion)
std::tuple< char, unsigned, unsigned > parseAsmPhysRegName(StringRef RegName)
Returns a valid charcode or 0 in the first entry if this is a valid physical register name.
bool hasSRAMECC(const MCSubtargetInfo &STI)
bool getHasDepthExport(const Function &F)
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
bool getMUBUFHasVAddr(unsigned Opc)
bool isTrue16Inst(unsigned Opc)
unsigned getVGPREncodingMSBs(MCRegister Reg, const MCRegisterInfo &MRI)
std::pair< unsigned, unsigned > getVOPDComponents(unsigned VOPDOpcode)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getInitialPSInputAddr(const Function &F)
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
bool isAsyncStore(unsigned Opc)
unsigned getDynamicVGPRBlockSize(const Function &F)
unsigned getKmcntBitMask(const IsaVersion &Version)
MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
unsigned getVmcntBitMask(const IsaVersion &Version)
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
bool hasMAIInsts(const MCSubtargetInfo &STI)
unsigned getBitOp2(unsigned Opc)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
unsigned getXcntBitMask(const IsaVersion &Version)
bool isGenericAtomic(unsigned Opc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
bool isGFX8Plus(const MCSubtargetInfo &STI)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
bool getMUBUFTfe(unsigned Opc)
unsigned getBvhcntBitMask(const IsaVersion &Version)
bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
bool hasMIMG_R128(const MCSubtargetInfo &STI)
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
bool getMUBUFHasSoffset(unsigned Opc)
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
std::tuple< char, unsigned, unsigned > parseAsmConstraintPhysReg(StringRef Constraint)
Returns a valid charcode or 0 in the first entry if this is a valid physical register constraint.
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion)
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Loadcnt)
bool isGFX10Plus(const MCSubtargetInfo &STI)
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
static bool isValidRegPrefix(char C)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer, bool HasSOffset)
bool isGlobalSegment(const GlobalValue *GV)
int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition SIDefines.h:231
@ OPERAND_REG_INLINE_C_LAST
Definition SIDefines.h:254
@ OPERAND_REG_IMM_V2FP16
Definition SIDefines.h:209
@ OPERAND_REG_INLINE_C_FP64
Definition SIDefines.h:222
@ OPERAND_REG_INLINE_C_BF16
Definition SIDefines.h:219
@ OPERAND_REG_INLINE_C_V2BF16
Definition SIDefines.h:224
@ OPERAND_REG_IMM_V2INT16
Definition SIDefines.h:210
@ OPERAND_REG_IMM_BF16
Definition SIDefines.h:206
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
Definition SIDefines.h:201
@ OPERAND_REG_IMM_V2BF16
Definition SIDefines.h:208
@ OPERAND_REG_INLINE_AC_FIRST
Definition SIDefines.h:256
@ OPERAND_REG_IMM_FP16
Definition SIDefines.h:207
@ OPERAND_REG_IMM_NOINLINE_V2FP16
Definition SIDefines.h:211
@ OPERAND_REG_IMM_FP64
Definition SIDefines.h:205
@ OPERAND_REG_INLINE_C_V2FP16
Definition SIDefines.h:225
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
Definition SIDefines.h:236
@ OPERAND_REG_INLINE_AC_FP32
Definition SIDefines.h:237
@ OPERAND_REG_IMM_V2INT32
Definition SIDefines.h:212
@ OPERAND_REG_IMM_FP32
Definition SIDefines.h:204
@ OPERAND_REG_INLINE_C_FIRST
Definition SIDefines.h:253
@ OPERAND_REG_INLINE_C_FP32
Definition SIDefines.h:221
@ OPERAND_REG_INLINE_AC_LAST
Definition SIDefines.h:257
@ OPERAND_REG_INLINE_C_INT32
Definition SIDefines.h:217
@ OPERAND_REG_INLINE_C_V2INT16
Definition SIDefines.h:223
@ OPERAND_REG_IMM_V2FP32
Definition SIDefines.h:213
@ OPERAND_REG_INLINE_AC_FP64
Definition SIDefines.h:238
@ OPERAND_REG_INLINE_C_FP16
Definition SIDefines.h:220
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition SIDefines.h:228
void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &KernelCode, const MCSubtargetInfo *STI)
bool isNotGFX9Plus(const MCSubtargetInfo &STI)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool hasGDS(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI)
const int OPR_ID_DUPLICATE
bool isVOPD(unsigned Opc)
VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool isGFX1250(const MCSubtargetInfo &STI)
int getMCOpcode(uint16_t Opcode, unsigned Gen)
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
bool isVI(const MCSubtargetInfo &STI)
bool isTensorStore(unsigned Opc)
bool getMUBUFIsBufferInv(unsigned Opc)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool supportsWGP(const MCSubtargetInfo &STI)
bool isMAC(unsigned Opc)
bool isCI(const MCSubtargetInfo &STI)
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
bool getVOP2IsSingle(unsigned Opc)
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
const int OPR_ID_UNKNOWN
unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion)
SmallVector< unsigned > getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size, unsigned DefaultVal)
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
bool isNotGFX12Plus(const MCSubtargetInfo &STI)
bool getMTBUFHasVAddr(unsigned Opc)
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
unsigned getLoadcntBitMask(const IsaVersion &Version)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
bool hasVOPD(const MCSubtargetInfo &STI)
int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily, bool VOPD3)
static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Dscnt)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion)
bool isGFX9_GFX10_GFX11(const MCSubtargetInfo &STI)
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
int getMUBUFElements(unsigned Opc)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
bool isPermlane16(unsigned Opc)
bool getMUBUFHasSrsrc(unsigned Opc)
unsigned getDscntBitMask(const IsaVersion &Version)
bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ ELFABIVERSION_AMDGPU_HSA_V4
Definition ELF.h:384
@ ELFABIVERSION_AMDGPU_HSA_V5
Definition ELF.h:385
@ ELFABIVERSION_AMDGPU_HSA_V6
Definition ELF.h:386
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:546
std::string utostr(uint64_t X, bool isNeg=false)
Op::Description Desc
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition MathExtras.h:150
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition MathExtras.h:155
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
To bit_cast(const From &from) noexcept
Definition bit.h:90
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
@ AlwaysUniform
The result values are always uniform.
Definition Uniformity.h:23
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
#define N
AMD Kernel Code Object (amd_kernel_code_t).
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.