LLVM 22.0.0git
AMDGPUBaseInfo.cpp
Go to the documentation of this file.
1//===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AMDGPUBaseInfo.h"
10#include "AMDGPU.h"
11#include "AMDGPUAsmUtils.h"
12#include "AMDKernelCodeT.h"
17#include "llvm/IR/Attributes.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Function.h"
20#include "llvm/IR/GlobalValue.h"
21#include "llvm/IR/IntrinsicsAMDGPU.h"
22#include "llvm/IR/IntrinsicsR600.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/Metadata.h"
25#include "llvm/MC/MCInstrInfo.h"
30#include <optional>
31
32#define GET_INSTRINFO_NAMED_OPS
33#define GET_INSTRMAP_INFO
34#include "AMDGPUGenInstrInfo.inc"
35
37 "amdhsa-code-object-version", llvm::cl::Hidden,
39 llvm::cl::desc("Set default AMDHSA Code Object Version (module flag "
40 "or asm directive still take priority if present)"));
41
42namespace {
43
44/// \returns Bit mask for given bit \p Shift and bit \p Width.
45unsigned getBitMask(unsigned Shift, unsigned Width) {
46 return ((1 << Width) - 1) << Shift;
47}
48
49/// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
50///
51/// \returns Packed \p Dst.
52unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
53 unsigned Mask = getBitMask(Shift, Width);
54 return ((Src << Shift) & Mask) | (Dst & ~Mask);
55}
56
57/// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
58///
59/// \returns Unpacked bits.
60unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
61 return (Src & getBitMask(Shift, Width)) >> Shift;
62}
63
64/// \returns Vmcnt bit shift (lower bits).
65unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
66 return VersionMajor >= 11 ? 10 : 0;
67}
68
69/// \returns Vmcnt bit width (lower bits).
70unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
71 return VersionMajor >= 11 ? 6 : 4;
72}
73
74/// \returns Expcnt bit shift.
75unsigned getExpcntBitShift(unsigned VersionMajor) {
76 return VersionMajor >= 11 ? 0 : 4;
77}
78
79/// \returns Expcnt bit width.
80unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
81
82/// \returns Lgkmcnt bit shift.
83unsigned getLgkmcntBitShift(unsigned VersionMajor) {
84 return VersionMajor >= 11 ? 4 : 8;
85}
86
87/// \returns Lgkmcnt bit width.
88unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
89 return VersionMajor >= 10 ? 6 : 4;
90}
91
92/// \returns Vmcnt bit shift (higher bits).
93unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
94
95/// \returns Vmcnt bit width (higher bits).
96unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
97 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
98}
99
100/// \returns Loadcnt bit width
101unsigned getLoadcntBitWidth(unsigned VersionMajor) {
102 return VersionMajor >= 12 ? 6 : 0;
103}
104
105/// \returns Samplecnt bit width.
106unsigned getSamplecntBitWidth(unsigned VersionMajor) {
107 return VersionMajor >= 12 ? 6 : 0;
108}
109
110/// \returns Bvhcnt bit width.
111unsigned getBvhcntBitWidth(unsigned VersionMajor) {
112 return VersionMajor >= 12 ? 3 : 0;
113}
114
115/// \returns Dscnt bit width.
116unsigned getDscntBitWidth(unsigned VersionMajor) {
117 return VersionMajor >= 12 ? 6 : 0;
118}
119
120/// \returns Dscnt bit shift in combined S_WAIT instructions.
121unsigned getDscntBitShift(unsigned VersionMajor) { return 0; }
122
123/// \returns Storecnt or Vscnt bit width, depending on VersionMajor.
124unsigned getStorecntBitWidth(unsigned VersionMajor) {
125 return VersionMajor >= 10 ? 6 : 0;
126}
127
128/// \returns Kmcnt bit width.
129unsigned getKmcntBitWidth(unsigned VersionMajor) {
130 return VersionMajor >= 12 ? 5 : 0;
131}
132
133/// \returns Xcnt bit width.
134unsigned getXcntBitWidth(unsigned VersionMajor, unsigned VersionMinor) {
135 return VersionMajor == 12 && VersionMinor == 5 ? 6 : 0;
136}
137
138/// \returns shift for Loadcnt/Storecnt in combined S_WAIT instructions.
139unsigned getLoadcntStorecntBitShift(unsigned VersionMajor) {
140 return VersionMajor >= 12 ? 8 : 0;
141}
142
143/// \returns VaSdst bit width
144inline unsigned getVaSdstBitWidth() { return 3; }
145
146/// \returns VaSdst bit shift
147inline unsigned getVaSdstBitShift() { return 9; }
148
149/// \returns VmVsrc bit width
150inline unsigned getVmVsrcBitWidth() { return 3; }
151
152/// \returns VmVsrc bit shift
153inline unsigned getVmVsrcBitShift() { return 2; }
154
155/// \returns VaVdst bit width
156inline unsigned getVaVdstBitWidth() { return 4; }
157
158/// \returns VaVdst bit shift
159inline unsigned getVaVdstBitShift() { return 12; }
160
161/// \returns VaVcc bit width
162inline unsigned getVaVccBitWidth() { return 1; }
163
164/// \returns VaVcc bit shift
165inline unsigned getVaVccBitShift() { return 1; }
166
167/// \returns SaSdst bit width
168inline unsigned getSaSdstBitWidth() { return 1; }
169
170/// \returns SaSdst bit shift
171inline unsigned getSaSdstBitShift() { return 0; }
172
173/// \returns VaSsrc width
174inline unsigned getVaSsrcBitWidth() { return 1; }
175
176/// \returns VaSsrc bit shift
177inline unsigned getVaSsrcBitShift() { return 8; }
178
179/// \returns HoldCnt bit shift
180inline unsigned getHoldCntWidth() { return 1; }
181
182/// \returns HoldCnt bit shift
183inline unsigned getHoldCntBitShift() { return 7; }
184
185} // end anonymous namespace
186
187namespace llvm {
188
189namespace AMDGPU {
190
191/// \returns true if the target supports signed immediate offset for SMRD
192/// instructions.
194 return isGFX9Plus(ST);
195}
196
197/// \returns True if \p STI is AMDHSA.
198bool isHsaAbi(const MCSubtargetInfo &STI) {
199 return STI.getTargetTriple().getOS() == Triple::AMDHSA;
200}
201
204 M.getModuleFlag("amdhsa_code_object_version"))) {
205 return (unsigned)Ver->getZExtValue() / 100;
206 }
207
209}
210
214
215unsigned getAMDHSACodeObjectVersion(unsigned ABIVersion) {
216 switch (ABIVersion) {
218 return 4;
220 return 5;
222 return 6;
223 default:
225 }
226}
227
228uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion) {
229 if (T.getOS() != Triple::AMDHSA)
230 return 0;
231
232 switch (CodeObjectVersion) {
233 case 4:
235 case 5:
237 case 6:
239 default:
240 report_fatal_error("Unsupported AMDHSA Code Object Version " +
241 Twine(CodeObjectVersion));
242 }
243}
244
245unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion) {
246 switch (CodeObjectVersion) {
247 case AMDHSA_COV4:
248 return 48;
249 case AMDHSA_COV5:
250 case AMDHSA_COV6:
251 default:
253 }
254}
255
256// FIXME: All such magic numbers about the ABI should be in a
257// central TD file.
258unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion) {
259 switch (CodeObjectVersion) {
260 case AMDHSA_COV4:
261 return 24;
262 case AMDHSA_COV5:
263 case AMDHSA_COV6:
264 default:
266 }
267}
268
269unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion) {
270 switch (CodeObjectVersion) {
271 case AMDHSA_COV4:
272 return 32;
273 case AMDHSA_COV5:
274 case AMDHSA_COV6:
275 default:
277 }
278}
279
280unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
281 switch (CodeObjectVersion) {
282 case AMDHSA_COV4:
283 return 40;
284 case AMDHSA_COV5:
285 case AMDHSA_COV6:
286 default:
288 }
289}
290
291#define GET_MIMGBaseOpcodesTable_IMPL
292#define GET_MIMGDimInfoTable_IMPL
293#define GET_MIMGInfoTable_IMPL
294#define GET_MIMGLZMappingTable_IMPL
295#define GET_MIMGMIPMappingTable_IMPL
296#define GET_MIMGBiasMappingTable_IMPL
297#define GET_MIMGOffsetMappingTable_IMPL
298#define GET_MIMGG16MappingTable_IMPL
299#define GET_MAIInstInfoTable_IMPL
300#define GET_WMMAInstInfoTable_IMPL
301#include "AMDGPUGenSearchableTables.inc"
302
303int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
304 unsigned VDataDwords, unsigned VAddrDwords) {
305 const MIMGInfo *Info =
306 getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
307 return Info ? Info->Opcode : -1;
308}
309
311 const MIMGInfo *Info = getMIMGInfo(Opc);
312 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
313}
314
315int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
316 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
317 const MIMGInfo *NewInfo =
318 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
319 NewChannels, OrigInfo->VAddrDwords);
320 return NewInfo ? NewInfo->Opcode : -1;
321}
322
323unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
324 const MIMGDimInfo *Dim, bool IsA16,
325 bool IsG16Supported) {
326 unsigned AddrWords = BaseOpcode->NumExtraArgs;
327 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
328 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
329 if (IsA16)
330 AddrWords += divideCeil(AddrComponents, 2);
331 else
332 AddrWords += AddrComponents;
333
334 // Note: For subtargets that support A16 but not G16, enabling A16 also
335 // enables 16 bit gradients.
336 // For subtargets that support A16 (operand) and G16 (done with a different
337 // instruction encoding), they are independent.
338
339 if (BaseOpcode->Gradients) {
340 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
341 // There are two gradients per coordinate, we pack them separately.
342 // For the 3d case,
343 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
344 AddrWords += alignTo<2>(Dim->NumGradients / 2);
345 else
346 AddrWords += Dim->NumGradients;
347 }
348 return AddrWords;
349}
350
361
370
375
380
384
388
392
399
407
412
413#define GET_FP4FP8DstByteSelTable_DECL
414#define GET_FP4FP8DstByteSelTable_IMPL
415
420
426
427#define GET_DPMACCInstructionTable_DECL
428#define GET_DPMACCInstructionTable_IMPL
429#define GET_MTBUFInfoTable_DECL
430#define GET_MTBUFInfoTable_IMPL
431#define GET_MUBUFInfoTable_DECL
432#define GET_MUBUFInfoTable_IMPL
433#define GET_SMInfoTable_DECL
434#define GET_SMInfoTable_IMPL
435#define GET_VOP1InfoTable_DECL
436#define GET_VOP1InfoTable_IMPL
437#define GET_VOP2InfoTable_DECL
438#define GET_VOP2InfoTable_IMPL
439#define GET_VOP3InfoTable_DECL
440#define GET_VOP3InfoTable_IMPL
441#define GET_VOPC64DPPTable_DECL
442#define GET_VOPC64DPPTable_IMPL
443#define GET_VOPC64DPP8Table_DECL
444#define GET_VOPC64DPP8Table_IMPL
445#define GET_VOPCAsmOnlyInfoTable_DECL
446#define GET_VOPCAsmOnlyInfoTable_IMPL
447#define GET_VOP3CAsmOnlyInfoTable_DECL
448#define GET_VOP3CAsmOnlyInfoTable_IMPL
449#define GET_VOPDComponentTable_DECL
450#define GET_VOPDComponentTable_IMPL
451#define GET_VOPDPairs_DECL
452#define GET_VOPDPairs_IMPL
453#define GET_VOPTrue16Table_DECL
454#define GET_VOPTrue16Table_IMPL
455#define GET_True16D16Table_IMPL
456#define GET_WMMAOpcode2AddrMappingTable_DECL
457#define GET_WMMAOpcode2AddrMappingTable_IMPL
458#define GET_WMMAOpcode3AddrMappingTable_DECL
459#define GET_WMMAOpcode3AddrMappingTable_IMPL
460#define GET_getMFMA_F8F6F4_WithSize_DECL
461#define GET_getMFMA_F8F6F4_WithSize_IMPL
462#define GET_isMFMA_F8F6F4Table_IMPL
463#define GET_isCvtScaleF32_F32F16ToF8F4Table_IMPL
464
465#include "AMDGPUGenSearchableTables.inc"
466
467int getMTBUFBaseOpcode(unsigned Opc) {
468 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
469 return Info ? Info->BaseOpcode : -1;
470}
471
472int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
473 const MTBUFInfo *Info =
474 getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
475 return Info ? Info->Opcode : -1;
476}
477
478int getMTBUFElements(unsigned Opc) {
479 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
480 return Info ? Info->elements : 0;
481}
482
483bool getMTBUFHasVAddr(unsigned Opc) {
484 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
485 return Info && Info->has_vaddr;
486}
487
488bool getMTBUFHasSrsrc(unsigned Opc) {
489 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
490 return Info && Info->has_srsrc;
491}
492
493bool getMTBUFHasSoffset(unsigned Opc) {
494 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
495 return Info && Info->has_soffset;
496}
497
498int getMUBUFBaseOpcode(unsigned Opc) {
499 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
500 return Info ? Info->BaseOpcode : -1;
501}
502
503int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
504 const MUBUFInfo *Info =
505 getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
506 return Info ? Info->Opcode : -1;
507}
508
509int getMUBUFElements(unsigned Opc) {
510 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
511 return Info ? Info->elements : 0;
512}
513
514bool getMUBUFHasVAddr(unsigned Opc) {
515 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
516 return Info && Info->has_vaddr;
517}
518
519bool getMUBUFHasSrsrc(unsigned Opc) {
520 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
521 return Info && Info->has_srsrc;
522}
523
524bool getMUBUFHasSoffset(unsigned Opc) {
525 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
526 return Info && Info->has_soffset;
527}
528
529bool getMUBUFIsBufferInv(unsigned Opc) {
530 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
531 return Info && Info->IsBufferInv;
532}
533
534bool getMUBUFTfe(unsigned Opc) {
535 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
536 return Info && Info->tfe;
537}
538
539bool getSMEMIsBuffer(unsigned Opc) {
540 const SMInfo *Info = getSMEMOpcodeHelper(Opc);
541 return Info && Info->IsBuffer;
542}
543
544bool getVOP1IsSingle(unsigned Opc) {
545 const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
546 return !Info || Info->IsSingle;
547}
548
549bool getVOP2IsSingle(unsigned Opc) {
550 const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
551 return !Info || Info->IsSingle;
552}
553
554bool getVOP3IsSingle(unsigned Opc) {
555 const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
556 return !Info || Info->IsSingle;
557}
558
559bool isVOPC64DPP(unsigned Opc) {
560 return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
561}
562
563bool isVOPCAsmOnly(unsigned Opc) { return isVOPCAsmOnlyOpcodeHelper(Opc); }
564
565bool getMAIIsDGEMM(unsigned Opc) {
566 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
567 return Info && Info->is_dgemm;
568}
569
570bool getMAIIsGFX940XDL(unsigned Opc) {
571 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
572 return Info && Info->is_gfx940_xdl;
573}
574
575bool getWMMAIsXDL(unsigned Opc) {
576 const WMMAInstInfo *Info = getWMMAInstInfoHelper(Opc);
577 return Info ? Info->is_wmma_xdl : false;
578}
579
581 switch (EncodingVal) {
584 return 6;
586 return 4;
589 default:
590 return 8;
591 }
592
593 llvm_unreachable("covered switch over mfma scale formats");
594}
595
597 unsigned BLGP,
598 unsigned F8F8Opcode) {
599 uint8_t SrcANumRegs = mfmaScaleF8F6F4FormatToNumRegs(CBSZ);
600 uint8_t SrcBNumRegs = mfmaScaleF8F6F4FormatToNumRegs(BLGP);
601 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
602}
603
605 switch (Fmt) {
608 return 16;
611 return 12;
613 return 8;
614 }
615
616 llvm_unreachable("covered switch over wmma scale formats");
617}
618
620 unsigned FmtB,
621 unsigned F8F8Opcode) {
622 uint8_t SrcANumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtA);
623 uint8_t SrcBNumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtB);
624 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
625}
626
628 if (ST.hasFeature(AMDGPU::FeatureGFX1250Insts))
630 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts))
632 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts))
634 llvm_unreachable("Subtarget generation does not support VOPD!");
635}
636
637CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3) {
638 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
639 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
640 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
641 if (Info) {
642 // Check that Opc can be used as VOPDY for this encoding. V_MOV_B32 as a
643 // VOPDX is just a placeholder here, it is supported on all encodings.
644 // TODO: This can be optimized by creating tables of supported VOPDY
645 // opcodes per encoding.
646 unsigned VOPDMov = AMDGPU::getVOPDOpcode(AMDGPU::V_MOV_B32_e32, VOPD3);
647 bool CanBeVOPDY = getVOPDFull(VOPDMov, AMDGPU::getVOPDOpcode(Opc, VOPD3),
648 EncodingFamily, VOPD3) != -1;
649 return {VOPD3 ? Info->CanBeVOPD3X : Info->CanBeVOPDX, CanBeVOPDY};
650 }
651
652 return {false, false};
653}
654
655unsigned getVOPDOpcode(unsigned Opc, bool VOPD3) {
656 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
657 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
658 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
659 return Info ? Info->VOPDOp : ~0u;
660}
661
662bool isVOPD(unsigned Opc) {
663 return AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0X);
664}
665
666bool isMAC(unsigned Opc) {
667 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
668 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
669 Opc == AMDGPU::V_MAC_F32_e64_vi ||
670 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
671 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
672 Opc == AMDGPU::V_MAC_F16_e64_vi ||
673 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
674 Opc == AMDGPU::V_FMAC_F64_e64_gfx12 ||
675 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
676 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
677 Opc == AMDGPU::V_FMAC_F32_e64_gfx12 ||
678 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
679 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
680 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
681 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
682 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
683 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx11 ||
684 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx12 ||
685 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx12 ||
686 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
687 Opc == AMDGPU::V_DOT2C_F32_BF16_e64_vi ||
688 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
689 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
690 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
691}
692
693bool isPermlane16(unsigned Opc) {
694 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
695 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
696 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
697 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11 ||
698 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx12 ||
699 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx12 ||
700 Opc == AMDGPU::V_PERMLANE16_VAR_B32_e64_gfx12 ||
701 Opc == AMDGPU::V_PERMLANEX16_VAR_B32_e64_gfx12;
702}
703
705 return Opc == AMDGPU::V_CVT_F32_BF8_e64_gfx12 ||
706 Opc == AMDGPU::V_CVT_F32_FP8_e64_gfx12 ||
707 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp_gfx12 ||
708 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp_gfx12 ||
709 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp8_gfx12 ||
710 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp8_gfx12 ||
711 Opc == AMDGPU::V_CVT_PK_F32_BF8_fake16_e64_gfx12 ||
712 Opc == AMDGPU::V_CVT_PK_F32_FP8_fake16_e64_gfx12 ||
713 Opc == AMDGPU::V_CVT_PK_F32_BF8_t16_e64_gfx12 ||
714 Opc == AMDGPU::V_CVT_PK_F32_FP8_t16_e64_gfx12;
715}
716
717bool isGenericAtomic(unsigned Opc) {
718 return Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP ||
719 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD ||
720 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB ||
721 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN ||
722 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN ||
723 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX ||
724 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX ||
725 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND ||
726 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR ||
727 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR ||
728 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC ||
729 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC ||
730 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD ||
731 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
732 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
733 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
734 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32 ||
735 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32 ||
736 Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
737}
738
739bool isAsyncStore(unsigned Opc) {
740 return Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_gfx1250 ||
741 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_gfx1250 ||
742 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_gfx1250 ||
743 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_gfx1250 ||
744 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_SADDR_gfx1250 ||
745 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_SADDR_gfx1250 ||
746 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_SADDR_gfx1250 ||
747 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_SADDR_gfx1250;
748}
749
750bool isTensorStore(unsigned Opc) {
751 return Opc == TENSOR_STORE_FROM_LDS_gfx1250 ||
752 Opc == TENSOR_STORE_FROM_LDS_D2_gfx1250;
753}
754
755unsigned getTemporalHintType(const MCInstrDesc TID) {
758 unsigned Opc = TID.getOpcode();
759 // Async and Tensor store should have the temporal hint type of TH_TYPE_STORE
760 if (TID.mayStore() &&
761 (isAsyncStore(Opc) || isTensorStore(Opc) || !TID.mayLoad()))
762 return CPol::TH_TYPE_STORE;
763
764 // This will default to returning TH_TYPE_LOAD when neither MayStore nor
765 // MayLoad flag is present which is the case with instructions like
766 // image_get_resinfo.
767 return CPol::TH_TYPE_LOAD;
768}
769
770bool isTrue16Inst(unsigned Opc) {
771 const VOPTrue16Info *Info = getTrue16OpcodeHelper(Opc);
772 return Info && Info->IsTrue16;
773}
774
776 const FP4FP8DstByteSelInfo *Info = getFP4FP8DstByteSelHelper(Opc);
777 if (!Info)
778 return FPType::None;
779 if (Info->HasFP8DstByteSel)
780 return FPType::FP8;
781 if (Info->HasFP4DstByteSel)
782 return FPType::FP4;
783
784 return FPType::None;
785}
786
787bool isDPMACCInstruction(unsigned Opc) {
788 const DPMACCInstructionInfo *Info = getDPMACCInstructionHelper(Opc);
789 return Info && Info->IsDPMACCInstruction;
790}
791
792unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
793 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
794 return Info ? Info->Opcode3Addr : ~0u;
795}
796
797unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc) {
798 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom3AddrOpcode(Opc);
799 return Info ? Info->Opcode2Addr : ~0u;
800}
801
802// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
803// header files, so we need to wrap it in a function that takes unsigned
804// instead.
805int getMCOpcode(uint16_t Opcode, unsigned Gen) {
806 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
807}
808
809unsigned getBitOp2(unsigned Opc) {
810 switch (Opc) {
811 default:
812 return 0;
813 case AMDGPU::V_AND_B32_e32:
814 return 0x40;
815 case AMDGPU::V_OR_B32_e32:
816 return 0x54;
817 case AMDGPU::V_XOR_B32_e32:
818 return 0x14;
819 case AMDGPU::V_XNOR_B32_e32:
820 return 0x41;
821 }
822}
823
824int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily,
825 bool VOPD3) {
826 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(OpY) : 0;
827 OpY = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : OpY;
828 const VOPDInfo *Info =
829 getVOPDInfoFromComponentOpcodes(OpX, OpY, EncodingFamily, VOPD3);
830 return Info ? Info->Opcode : -1;
831}
832
833std::pair<unsigned, unsigned> getVOPDComponents(unsigned VOPDOpcode) {
834 const VOPDInfo *Info = getVOPDOpcodeHelper(VOPDOpcode);
835 assert(Info);
836 const auto *OpX = getVOPDBaseFromComponent(Info->OpX);
837 const auto *OpY = getVOPDBaseFromComponent(Info->OpY);
838 assert(OpX && OpY);
839 return {OpX->BaseVOP, OpY->BaseVOP};
840}
841
842namespace VOPD {
843
844ComponentProps::ComponentProps(const MCInstrDesc &OpDesc, bool VOP3Layout) {
846
849 auto TiedIdx = OpDesc.getOperandConstraint(Component::SRC2, MCOI::TIED_TO);
850 assert(TiedIdx == -1 || TiedIdx == Component::DST);
851 HasSrc2Acc = TiedIdx != -1;
852 Opcode = OpDesc.getOpcode();
853
854 IsVOP3 = VOP3Layout || (OpDesc.TSFlags & SIInstrFlags::VOP3);
855 SrcOperandsNum = AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2) ? 3
856 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::imm) ? 3
857 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1) ? 2
858 : 1;
859 assert(SrcOperandsNum <= Component::MAX_SRC_NUM);
860
861 if (Opcode == AMDGPU::V_CNDMASK_B32_e32 ||
862 Opcode == AMDGPU::V_CNDMASK_B32_e64) {
863 // CNDMASK is an awkward exception, it has FP modifiers, but not FP
864 // operands.
865 NumVOPD3Mods = 2;
866 if (IsVOP3)
867 SrcOperandsNum = 3;
868 } else if (isSISrcFPOperand(OpDesc,
869 getNamedOperandIdx(Opcode, OpName::src0))) {
870 // All FP VOPD instructions have Neg modifiers for all operands except
871 // for tied src2.
872 NumVOPD3Mods = SrcOperandsNum;
873 if (HasSrc2Acc)
874 --NumVOPD3Mods;
875 }
876
877 if (OpDesc.TSFlags & SIInstrFlags::VOP3)
878 return;
879
880 auto OperandsNum = OpDesc.getNumOperands();
881 unsigned CompOprIdx;
882 for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
883 if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
884 MandatoryLiteralIdx = CompOprIdx;
885 break;
886 }
887 }
888}
889
891 return getNamedOperandIdx(Opcode, OpName::bitop3);
892}
893
894unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
895 assert(CompOprIdx < Component::MAX_OPR_NUM);
896
897 if (CompOprIdx == Component::DST)
899
900 auto CompSrcIdx = CompOprIdx - Component::DST_NUM;
901 if (CompSrcIdx < getCompParsedSrcOperandsNum())
902 return getIndexOfSrcInParsedOperands(CompSrcIdx);
903
904 // The specified operand does not exist.
905 return 0;
906}
907
909 std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
910 const MCRegisterInfo &MRI, bool SkipSrc, bool AllowSameVGPR,
911 bool VOPD3) const {
912
913 auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx,
914 CompInfo[ComponentIndex::X].isVOP3());
915 auto OpYRegs = getRegIndices(ComponentIndex::Y, GetRegIdx,
916 CompInfo[ComponentIndex::Y].isVOP3());
917
918 const auto banksOverlap = [&MRI](MCRegister X, MCRegister Y,
919 unsigned BanksMask) -> bool {
920 MCRegister BaseX = MRI.getSubReg(X, AMDGPU::sub0);
921 MCRegister BaseY = MRI.getSubReg(Y, AMDGPU::sub0);
922 if (!BaseX)
923 BaseX = X;
924 if (!BaseY)
925 BaseY = Y;
926 if ((BaseX.id() & BanksMask) == (BaseY.id() & BanksMask))
927 return true;
928 if (BaseX != X /* This is 64-bit register */ &&
929 ((BaseX.id() + 1) & BanksMask) == (BaseY.id() & BanksMask))
930 return true;
931 if (BaseY != Y &&
932 (BaseX.id() & BanksMask) == ((BaseY.id() + 1) & BanksMask))
933 return true;
934
935 // If both are 64-bit bank conflict will be detected yet while checking
936 // the first subreg.
937 return false;
938 };
939
940 unsigned CompOprIdx;
941 for (CompOprIdx = 0; CompOprIdx < Component::MAX_OPR_NUM; ++CompOprIdx) {
942 unsigned BanksMasks = VOPD3 ? VOPD3_VGPR_BANK_MASKS[CompOprIdx]
943 : VOPD_VGPR_BANK_MASKS[CompOprIdx];
944 if (!OpXRegs[CompOprIdx] || !OpYRegs[CompOprIdx])
945 continue;
946
947 if (getVGPREncodingMSBs(OpXRegs[CompOprIdx], MRI) !=
948 getVGPREncodingMSBs(OpYRegs[CompOprIdx], MRI))
949 return CompOprIdx;
950
951 if (SkipSrc && CompOprIdx >= Component::DST_NUM)
952 continue;
953
954 if (CompOprIdx < Component::DST_NUM) {
955 // Even if we do not check vdst parity, vdst operands still shall not
956 // overlap.
957 if (MRI.regsOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx]))
958 return CompOprIdx;
959 if (VOPD3) // No need to check dst parity.
960 continue;
961 }
962
963 if (banksOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx], BanksMasks) &&
964 (!AllowSameVGPR || CompOprIdx < Component::DST_NUM ||
965 OpXRegs[CompOprIdx] != OpYRegs[CompOprIdx]))
966 return CompOprIdx;
967 }
968
969 return {};
970}
971
972// Return an array of VGPR registers [DST,SRC0,SRC1,SRC2] used
973// by the specified component. If an operand is unused
974// or is not a VGPR, the corresponding value is 0.
975//
976// GetRegIdx(Component, MCOperandIdx) must return a VGPR register index
977// for the specified component and MC operand. The callback must return 0
978// if the operand is not a register or not a VGPR.
980InstInfo::getRegIndices(unsigned CompIdx,
981 std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
982 bool VOPD3) const {
983 assert(CompIdx < COMPONENTS_NUM);
984
985 const auto &Comp = CompInfo[CompIdx];
987
988 RegIndices[DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
989
990 for (unsigned CompOprIdx : {SRC0, SRC1, SRC2}) {
991 unsigned CompSrcIdx = CompOprIdx - DST_NUM;
992 RegIndices[CompOprIdx] =
993 Comp.hasRegSrcOperand(CompSrcIdx)
994 ? GetRegIdx(CompIdx,
995 Comp.getIndexOfSrcInMCOperands(CompSrcIdx, VOPD3))
996 : MCRegister();
997 }
998 return RegIndices;
999}
1000
1001} // namespace VOPD
1002
1004 return VOPD::InstInfo(OpX, OpY);
1005}
1006
1008 const MCInstrInfo *InstrInfo) {
1009 auto [OpX, OpY] = getVOPDComponents(VOPDOpcode);
1010 const auto &OpXDesc = InstrInfo->get(OpX);
1011 const auto &OpYDesc = InstrInfo->get(OpY);
1012 bool VOPD3 = InstrInfo->get(VOPDOpcode).TSFlags & SIInstrFlags::VOPD3;
1014 VOPD::ComponentInfo OpYInfo(OpYDesc, OpXInfo, VOPD3);
1015 return VOPD::InstInfo(OpXInfo, OpYInfo);
1016}
1017
1018namespace IsaInfo {
1019
1021 : STI(STI), XnackSetting(TargetIDSetting::Any),
1022 SramEccSetting(TargetIDSetting::Any) {
1023 if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
1024 XnackSetting = TargetIDSetting::Unsupported;
1025 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
1026 SramEccSetting = TargetIDSetting::Unsupported;
1027}
1028
1030 // Check if xnack or sramecc is explicitly enabled or disabled. In the
1031 // absence of the target features we assume we must generate code that can run
1032 // in any environment.
1033 SubtargetFeatures Features(FS);
1034 std::optional<bool> XnackRequested;
1035 std::optional<bool> SramEccRequested;
1036
1037 for (const std::string &Feature : Features.getFeatures()) {
1038 if (Feature == "+xnack")
1039 XnackRequested = true;
1040 else if (Feature == "-xnack")
1041 XnackRequested = false;
1042 else if (Feature == "+sramecc")
1043 SramEccRequested = true;
1044 else if (Feature == "-sramecc")
1045 SramEccRequested = false;
1046 }
1047
1048 bool XnackSupported = isXnackSupported();
1049 bool SramEccSupported = isSramEccSupported();
1050
1051 if (XnackRequested) {
1052 if (XnackSupported) {
1053 XnackSetting =
1054 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1055 } else {
1056 // If a specific xnack setting was requested and this GPU does not support
1057 // xnack emit a warning. Setting will remain set to "Unsupported".
1058 if (*XnackRequested) {
1059 errs() << "warning: xnack 'On' was requested for a processor that does "
1060 "not support it!\n";
1061 } else {
1062 errs() << "warning: xnack 'Off' was requested for a processor that "
1063 "does not support it!\n";
1064 }
1065 }
1066 }
1067
1068 if (SramEccRequested) {
1069 if (SramEccSupported) {
1070 SramEccSetting =
1071 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1072 } else {
1073 // If a specific sramecc setting was requested and this GPU does not
1074 // support sramecc emit a warning. Setting will remain set to
1075 // "Unsupported".
1076 if (*SramEccRequested) {
1077 errs() << "warning: sramecc 'On' was requested for a processor that "
1078 "does not support it!\n";
1079 } else {
1080 errs() << "warning: sramecc 'Off' was requested for a processor that "
1081 "does not support it!\n";
1082 }
1083 }
1084 }
1085}
1086
1087static TargetIDSetting
1089 if (FeatureString.ends_with("-"))
1090 return TargetIDSetting::Off;
1091 if (FeatureString.ends_with("+"))
1092 return TargetIDSetting::On;
1093
1094 llvm_unreachable("Malformed feature string");
1095}
1096
1098 SmallVector<StringRef, 3> TargetIDSplit;
1099 TargetID.split(TargetIDSplit, ':');
1100
1101 for (const auto &FeatureString : TargetIDSplit) {
1102 if (FeatureString.starts_with("xnack"))
1103 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
1104 if (FeatureString.starts_with("sramecc"))
1105 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
1106 }
1107}
1108
1109std::string AMDGPUTargetID::toString() const {
1110 std::string StringRep;
1111 raw_string_ostream StreamRep(StringRep);
1112
1113 auto TargetTriple = STI.getTargetTriple();
1114 auto Version = getIsaVersion(STI.getCPU());
1115
1116 StreamRep << TargetTriple.getArchName() << '-' << TargetTriple.getVendorName()
1117 << '-' << TargetTriple.getOSName() << '-'
1118 << TargetTriple.getEnvironmentName() << '-';
1119
1120 std::string Processor;
1121 // TODO: Following else statement is present here because we used various
1122 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
1123 // Remove once all aliases are removed from GCNProcessors.td.
1124 if (Version.Major >= 9)
1125 Processor = STI.getCPU().str();
1126 else
1127 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
1128 Twine(Version.Stepping))
1129 .str();
1130
1131 std::string Features;
1132 if (STI.getTargetTriple().getOS() == Triple::AMDHSA) {
1133 // sramecc.
1135 Features += ":sramecc-";
1137 Features += ":sramecc+";
1138 // xnack.
1140 Features += ":xnack-";
1142 Features += ":xnack+";
1143 }
1144
1145 StreamRep << Processor << Features;
1146
1147 return StringRep;
1148}
1149
1150unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
1151 if (STI->getFeatureBits().test(FeatureWavefrontSize16))
1152 return 16;
1153 if (STI->getFeatureBits().test(FeatureWavefrontSize32))
1154 return 32;
1155
1156 return 64;
1157}
1158
1160 unsigned BytesPerCU = getAddressableLocalMemorySize(STI);
1161
1162 // "Per CU" really means "per whatever functional block the waves of a
1163 // workgroup must share". So the effective local memory size is doubled in
1164 // WGP mode on gfx10.
1165 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1166 BytesPerCU *= 2;
1167
1168 return BytesPerCU;
1169}
1170
1172 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
1173 return 32768;
1174 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
1175 return 65536;
1176 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
1177 return 163840;
1178 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
1179 return 327680;
1180 return 32768;
1181}
1182
1183unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
1184 // "Per CU" really means "per whatever functional block the waves of a
1185 // workgroup must share".
1186
1187 // GFX12.5 only supports CU mode, which contains four SIMDs.
1188 if (isGFX1250(*STI)) {
1189 assert(STI->getFeatureBits().test(FeatureCuMode));
1190 return 4;
1191 }
1192
1193 // For gfx10 in CU mode the functional block is the CU, which contains
1194 // two SIMDs.
1195 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
1196 return 2;
1197
1198 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP
1199 // contains two CUs, so a total of four SIMDs.
1200 return 4;
1201}
1202
1204 unsigned FlatWorkGroupSize) {
1205 assert(FlatWorkGroupSize != 0);
1206 if (!STI->getTargetTriple().isAMDGCN())
1207 return 8;
1208 unsigned MaxWaves = getMaxWavesPerEU(STI) * getEUsPerCU(STI);
1209 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
1210 if (N == 1) {
1211 // Single-wave workgroups don't consume barrier resources.
1212 return MaxWaves;
1213 }
1214
1215 unsigned MaxBarriers = 16;
1216 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1217 MaxBarriers = 32;
1218
1219 return std::min(MaxWaves / N, MaxBarriers);
1220}
1221
1222unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { return 1; }
1223
1224unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
1225 // FIXME: Need to take scratch memory into account.
1226 if (isGFX90A(*STI))
1227 return 8;
1228 if (!isGFX10Plus(*STI))
1229 return 10;
1230 return hasGFX10_3Insts(*STI) ? 16 : 20;
1231}
1232
1234 unsigned FlatWorkGroupSize) {
1235 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
1236 getEUsPerCU(STI));
1237}
1238
1239unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { return 1; }
1240
1242 // Some subtargets allow encoding 2048, but this isn't tested or supported.
1243 return 1024;
1244}
1245
1247 unsigned FlatWorkGroupSize) {
1248 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
1249}
1250
1253 if (Version.Major >= 10)
1254 return getAddressableNumSGPRs(STI);
1255 if (Version.Major >= 8)
1256 return 16;
1257 return 8;
1258}
1259
1260unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { return 8; }
1261
1262unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
1264 if (Version.Major >= 8)
1265 return 800;
1266 return 512;
1267}
1268
1270 if (STI->getFeatureBits().test(FeatureSGPRInitBug))
1272
1274 if (Version.Major >= 10)
1275 return 106;
1276 if (Version.Major >= 8)
1277 return 102;
1278 return 104;
1279}
1280
1281unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
1282 assert(WavesPerEU != 0);
1283
1285 if (Version.Major >= 10)
1286 return 0;
1287
1288 if (WavesPerEU >= getMaxWavesPerEU(STI))
1289 return 0;
1290
1291 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
1292 if (STI->getFeatureBits().test(FeatureTrapHandler))
1293 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1294 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
1295 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
1296}
1297
1298unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1299 bool Addressable) {
1300 assert(WavesPerEU != 0);
1301
1302 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
1304 if (Version.Major >= 10)
1305 return Addressable ? AddressableNumSGPRs : 108;
1306 if (Version.Major >= 8 && !Addressable)
1307 AddressableNumSGPRs = 112;
1308 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
1309 if (STI->getFeatureBits().test(FeatureTrapHandler))
1310 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1311 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
1312 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1313}
1314
1315unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1316 bool FlatScrUsed, bool XNACKUsed) {
1317 unsigned ExtraSGPRs = 0;
1318 if (VCCUsed)
1319 ExtraSGPRs = 2;
1320
1322 if (Version.Major >= 10)
1323 return ExtraSGPRs;
1324
1325 if (Version.Major < 8) {
1326 if (FlatScrUsed)
1327 ExtraSGPRs = 4;
1328 } else {
1329 if (XNACKUsed)
1330 ExtraSGPRs = 4;
1331
1332 if (FlatScrUsed ||
1333 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
1334 ExtraSGPRs = 6;
1335 }
1336
1337 return ExtraSGPRs;
1338}
1339
1340unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1341 bool FlatScrUsed) {
1342 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
1343 STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
1344}
1345
1346static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs,
1347 unsigned Granule) {
1348 return divideCeil(std::max(1u, NumRegs), Granule);
1349}
1350
1351unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
1352 // SGPRBlocks is actual number of SGPR blocks minus 1.
1354 1;
1355}
1356
1358 unsigned DynamicVGPRBlockSize,
1359 std::optional<bool> EnableWavefrontSize32) {
1360 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1361 return 8;
1362
1363 if (DynamicVGPRBlockSize != 0)
1364 return DynamicVGPRBlockSize;
1365
1366 bool IsWave32 = EnableWavefrontSize32
1367 ? *EnableWavefrontSize32
1368 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1369
1370 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1371 return IsWave32 ? 24 : 12;
1372
1373 if (hasGFX10_3Insts(*STI))
1374 return IsWave32 ? 16 : 8;
1375
1376 return IsWave32 ? 8 : 4;
1377}
1378
1380 std::optional<bool> EnableWavefrontSize32) {
1381 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1382 return 8;
1383
1384 bool IsWave32 = EnableWavefrontSize32
1385 ? *EnableWavefrontSize32
1386 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1387
1388 if (STI->getFeatureBits().test(Feature1024AddressableVGPRs))
1389 return IsWave32 ? 16 : 8;
1390
1391 return IsWave32 ? 8 : 4;
1392}
1393
1394unsigned getArchVGPRAllocGranule() { return 4; }
1395
1396unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
1397 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1398 return 512;
1399 if (!isGFX10Plus(*STI))
1400 return 256;
1401 bool IsWave32 = STI->getFeatureBits().test(FeatureWavefrontSize32);
1402 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1403 return IsWave32 ? 1536 : 768;
1404 return IsWave32 ? 1024 : 512;
1405}
1406
1408 const auto &Features = STI->getFeatureBits();
1409 if (Features.test(Feature1024AddressableVGPRs))
1410 return Features.test(FeatureWavefrontSize32) ? 1024 : 512;
1411 return 256;
1412}
1413
1415 unsigned DynamicVGPRBlockSize) {
1416 const auto &Features = STI->getFeatureBits();
1417 if (Features.test(FeatureGFX90AInsts))
1418 return 512;
1419
1420 if (DynamicVGPRBlockSize != 0)
1421 // On GFX12 we can allocate at most 8 blocks of VGPRs.
1422 return 8 * getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1423 return getAddressableNumArchVGPRs(STI);
1424}
1425
1427 unsigned NumVGPRs,
1428 unsigned DynamicVGPRBlockSize) {
1430 NumVGPRs, getVGPRAllocGranule(STI, DynamicVGPRBlockSize),
1432}
1433
1434unsigned getNumWavesPerEUWithNumVGPRs(unsigned NumVGPRs, unsigned Granule,
1435 unsigned MaxWaves,
1436 unsigned TotalNumVGPRs) {
1437 if (NumVGPRs < Granule)
1438 return MaxWaves;
1439 unsigned RoundedRegs = alignTo(NumVGPRs, Granule);
1440 return std::min(std::max(TotalNumVGPRs / RoundedRegs, 1u), MaxWaves);
1441}
1442
1443unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves,
1445 if (Gen >= AMDGPUSubtarget::GFX10)
1446 return MaxWaves;
1447
1449 if (SGPRs <= 80)
1450 return 10;
1451 if (SGPRs <= 88)
1452 return 9;
1453 if (SGPRs <= 100)
1454 return 8;
1455 return 7;
1456 }
1457 if (SGPRs <= 48)
1458 return 10;
1459 if (SGPRs <= 56)
1460 return 9;
1461 if (SGPRs <= 64)
1462 return 8;
1463 if (SGPRs <= 72)
1464 return 7;
1465 if (SGPRs <= 80)
1466 return 6;
1467 return 5;
1468}
1469
1470unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1471 unsigned DynamicVGPRBlockSize) {
1472 assert(WavesPerEU != 0);
1473
1474 unsigned MaxWavesPerEU = getMaxWavesPerEU(STI);
1475 if (WavesPerEU >= MaxWavesPerEU)
1476 return 0;
1477
1478 unsigned TotNumVGPRs = getTotalNumVGPRs(STI);
1479 unsigned AddrsableNumVGPRs =
1480 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1481 unsigned Granule = getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1482 unsigned MaxNumVGPRs = alignDown(TotNumVGPRs / WavesPerEU, Granule);
1483
1484 if (MaxNumVGPRs == alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1485 return 0;
1486
1487 unsigned MinWavesPerEU = getNumWavesPerEUWithNumVGPRs(STI, AddrsableNumVGPRs,
1488 DynamicVGPRBlockSize);
1489 if (WavesPerEU < MinWavesPerEU)
1490 return getMinNumVGPRs(STI, MinWavesPerEU, DynamicVGPRBlockSize);
1491
1492 unsigned MaxNumVGPRsNext = alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1493 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1494 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1495}
1496
1497unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1498 unsigned DynamicVGPRBlockSize) {
1499 assert(WavesPerEU != 0);
1500
1501 unsigned MaxNumVGPRs =
1502 alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
1503 getVGPRAllocGranule(STI, DynamicVGPRBlockSize));
1504 unsigned AddressableNumVGPRs =
1505 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1506 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1507}
1508
1509unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
1510 std::optional<bool> EnableWavefrontSize32) {
1512 NumVGPRs, getVGPREncodingGranule(STI, EnableWavefrontSize32)) -
1513 1;
1514}
1515
1517 unsigned NumVGPRs,
1518 unsigned DynamicVGPRBlockSize,
1519 std::optional<bool> EnableWavefrontSize32) {
1521 NumVGPRs,
1522 getVGPRAllocGranule(STI, DynamicVGPRBlockSize, EnableWavefrontSize32));
1523}
1524} // end namespace IsaInfo
1525
1527 const MCSubtargetInfo *STI) {
1529 KernelCode.amd_kernel_code_version_major = 1;
1530 KernelCode.amd_kernel_code_version_minor = 2;
1531 KernelCode.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
1532 KernelCode.amd_machine_version_major = Version.Major;
1533 KernelCode.amd_machine_version_minor = Version.Minor;
1534 KernelCode.amd_machine_version_stepping = Version.Stepping;
1536 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
1537 KernelCode.wavefront_size = 5;
1539 } else {
1540 KernelCode.wavefront_size = 6;
1541 }
1542
1543 // If the code object does not support indirect functions, then the value must
1544 // be 0xffffffff.
1545 KernelCode.call_convention = -1;
1546
1547 // These alignment values are specified in powers of two, so alignment =
1548 // 2^n. The minimum alignment is 2^4 = 16.
1549 KernelCode.kernarg_segment_alignment = 4;
1550 KernelCode.group_segment_alignment = 4;
1551 KernelCode.private_segment_alignment = 4;
1552
1553 if (Version.Major >= 10) {
1554 KernelCode.compute_pgm_resource_registers |=
1555 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
1557 }
1558}
1559
1562}
1563
1566}
1567
1569 unsigned AS = GV->getAddressSpace();
1570 return AS == AMDGPUAS::CONSTANT_ADDRESS ||
1572}
1573
1575 return TT.getArch() == Triple::r600;
1576}
1577
1578static bool isValidRegPrefix(char C) {
1579 return C == 'v' || C == 's' || C == 'a';
1580}
1581
1582std::tuple<char, unsigned, unsigned> parseAsmPhysRegName(StringRef RegName) {
1583 char Kind = RegName.front();
1584 if (!isValidRegPrefix(Kind))
1585 return {};
1586
1587 RegName = RegName.drop_front();
1588 if (RegName.consume_front("[")) {
1589 unsigned Idx, End;
1590 bool Failed = RegName.consumeInteger(10, Idx);
1591 Failed |= !RegName.consume_front(":");
1592 Failed |= RegName.consumeInteger(10, End);
1593 Failed |= !RegName.consume_back("]");
1594 if (!Failed) {
1595 unsigned NumRegs = End - Idx + 1;
1596 if (NumRegs > 1)
1597 return {Kind, Idx, NumRegs};
1598 }
1599 } else {
1600 unsigned Idx;
1601 bool Failed = RegName.getAsInteger(10, Idx);
1602 if (!Failed)
1603 return {Kind, Idx, 1};
1604 }
1605
1606 return {};
1607}
1608
1609std::tuple<char, unsigned, unsigned>
1611 StringRef RegName = Constraint;
1612 if (!RegName.consume_front("{") || !RegName.consume_back("}"))
1613 return {};
1615}
1616
1617std::pair<unsigned, unsigned>
1619 std::pair<unsigned, unsigned> Default,
1620 bool OnlyFirstRequired) {
1621 if (auto Attr = getIntegerPairAttribute(F, Name, OnlyFirstRequired))
1622 return {Attr->first, Attr->second.value_or(Default.second)};
1623 return Default;
1624}
1625
1626std::optional<std::pair<unsigned, std::optional<unsigned>>>
1628 bool OnlyFirstRequired) {
1629 Attribute A = F.getFnAttribute(Name);
1630 if (!A.isStringAttribute())
1631 return std::nullopt;
1632
1633 LLVMContext &Ctx = F.getContext();
1634 std::pair<unsigned, std::optional<unsigned>> Ints;
1635 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
1636 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1637 Ctx.emitError("can't parse first integer attribute " + Name);
1638 return std::nullopt;
1639 }
1640 unsigned Second = 0;
1641 if (Strs.second.trim().getAsInteger(0, Second)) {
1642 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1643 Ctx.emitError("can't parse second integer attribute " + Name);
1644 return std::nullopt;
1645 }
1646 } else {
1647 Ints.second = Second;
1648 }
1649
1650 return Ints;
1651}
1652
1654 unsigned Size,
1655 unsigned DefaultVal) {
1656 std::optional<SmallVector<unsigned>> R =
1658 return R.has_value() ? *R : SmallVector<unsigned>(Size, DefaultVal);
1659}
1660
1661std::optional<SmallVector<unsigned>>
1663 assert(Size > 2);
1664 LLVMContext &Ctx = F.getContext();
1665
1666 Attribute A = F.getFnAttribute(Name);
1667 if (!A.isValid())
1668 return std::nullopt;
1669 if (!A.isStringAttribute()) {
1670 Ctx.emitError(Name + " is not a string attribute");
1671 return std::nullopt;
1672 }
1673
1675
1676 StringRef S = A.getValueAsString();
1677 unsigned i = 0;
1678 for (; !S.empty() && i < Size; i++) {
1679 std::pair<StringRef, StringRef> Strs = S.split(',');
1680 unsigned IntVal;
1681 if (Strs.first.trim().getAsInteger(0, IntVal)) {
1682 Ctx.emitError("can't parse integer attribute " + Strs.first + " in " +
1683 Name);
1684 return std::nullopt;
1685 }
1686 Vals[i] = IntVal;
1687 S = Strs.second;
1688 }
1689
1690 if (!S.empty() || i < Size) {
1691 Ctx.emitError("attribute " + Name +
1692 " has incorrect number of integers; expected " +
1694 return std::nullopt;
1695 }
1696 return Vals;
1697}
1698
1699bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val) {
1700 assert((MD.getNumOperands() % 2 == 0) && "invalid number of operands!");
1701 for (unsigned I = 0, E = MD.getNumOperands() / 2; I != E; ++I) {
1702 auto Low =
1703 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 0))->getValue();
1704 auto High =
1705 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 1))->getValue();
1706 // There are two types of [A; B) ranges:
1707 // A < B, e.g. [4; 5) which is a range that only includes 4.
1708 // A > B, e.g. [5; 4) which is a range that wraps around and includes
1709 // everything except 4.
1710 if (Low.ult(High)) {
1711 if (Low.ule(Val) && High.ugt(Val))
1712 return true;
1713 } else {
1714 if (Low.uge(Val) && High.ult(Val))
1715 return true;
1716 }
1717 }
1718
1719 return false;
1720}
1721
1723 ListSeparator LS;
1724 if (Wait.LoadCnt != ~0u)
1725 OS << LS << "LoadCnt: " << Wait.LoadCnt;
1726 if (Wait.ExpCnt != ~0u)
1727 OS << LS << "ExpCnt: " << Wait.ExpCnt;
1728 if (Wait.DsCnt != ~0u)
1729 OS << LS << "DsCnt: " << Wait.DsCnt;
1730 if (Wait.StoreCnt != ~0u)
1731 OS << LS << "StoreCnt: " << Wait.StoreCnt;
1732 if (Wait.SampleCnt != ~0u)
1733 OS << LS << "SampleCnt: " << Wait.SampleCnt;
1734 if (Wait.BvhCnt != ~0u)
1735 OS << LS << "BvhCnt: " << Wait.BvhCnt;
1736 if (Wait.KmCnt != ~0u)
1737 OS << LS << "KmCnt: " << Wait.KmCnt;
1738 if (Wait.XCnt != ~0u)
1739 OS << LS << "XCnt: " << Wait.XCnt;
1740 if (LS.unused())
1741 OS << "none";
1742 OS << '\n';
1743 return OS;
1744}
1745
1747 return (1 << (getVmcntBitWidthLo(Version.Major) +
1748 getVmcntBitWidthHi(Version.Major))) -
1749 1;
1750}
1751
1753 return (1 << getLoadcntBitWidth(Version.Major)) - 1;
1754}
1755
1757 return (1 << getSamplecntBitWidth(Version.Major)) - 1;
1758}
1759
1761 return (1 << getBvhcntBitWidth(Version.Major)) - 1;
1762}
1763
1765 return (1 << getExpcntBitWidth(Version.Major)) - 1;
1766}
1767
1769 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1770}
1771
1773 return (1 << getDscntBitWidth(Version.Major)) - 1;
1774}
1775
1777 return (1 << getKmcntBitWidth(Version.Major)) - 1;
1778}
1779
1781 return (1 << getXcntBitWidth(Version.Major, Version.Minor)) - 1;
1782}
1783
1785 return (1 << getStorecntBitWidth(Version.Major)) - 1;
1786}
1787
1806
1808 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1809 getVmcntBitWidthLo(Version.Major));
1810 unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1811 getExpcntBitWidth(Version.Major));
1812 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1813 getLgkmcntBitWidth(Version.Major));
1814 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1815 getVmcntBitWidthHi(Version.Major));
1816 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1817}
1818
1819unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1820 unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1821 getVmcntBitWidthLo(Version.Major));
1822 unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1823 getVmcntBitWidthHi(Version.Major));
1824 return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1825}
1826
1827unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1828 return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1829 getExpcntBitWidth(Version.Major));
1830}
1831
1832unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1833 return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1834 getLgkmcntBitWidth(Version.Major));
1835}
1836
1837void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt,
1838 unsigned &Expcnt, unsigned &Lgkmcnt) {
1839 Vmcnt = decodeVmcnt(Version, Waitcnt);
1840 Expcnt = decodeExpcnt(Version, Waitcnt);
1841 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1842}
1843
1844Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1845 Waitcnt Decoded;
1846 Decoded.LoadCnt = decodeVmcnt(Version, Encoded);
1847 Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1848 Decoded.DsCnt = decodeLgkmcnt(Version, Encoded);
1849 return Decoded;
1850}
1851
1852unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1853 unsigned Vmcnt) {
1854 Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1855 getVmcntBitWidthLo(Version.Major));
1856 return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1857 getVmcntBitShiftHi(Version.Major),
1858 getVmcntBitWidthHi(Version.Major));
1859}
1860
1861unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1862 unsigned Expcnt) {
1863 return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1864 getExpcntBitWidth(Version.Major));
1865}
1866
1867unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1868 unsigned Lgkmcnt) {
1869 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1870 getLgkmcntBitWidth(Version.Major));
1871}
1872
1873unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt,
1874 unsigned Expcnt, unsigned Lgkmcnt) {
1875 unsigned Waitcnt = getWaitcntBitMask(Version);
1877 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1878 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1879 return Waitcnt;
1880}
1881
1882unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1883 return encodeWaitcnt(Version, Decoded.LoadCnt, Decoded.ExpCnt, Decoded.DsCnt);
1884}
1885
1887 bool IsStore) {
1888 unsigned Dscnt = getBitMask(getDscntBitShift(Version.Major),
1889 getDscntBitWidth(Version.Major));
1890 if (IsStore) {
1891 unsigned Storecnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1892 getStorecntBitWidth(Version.Major));
1893 return Dscnt | Storecnt;
1894 }
1895 unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1896 getLoadcntBitWidth(Version.Major));
1897 return Dscnt | Loadcnt;
1898}
1899
1900Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt) {
1901 Waitcnt Decoded;
1902 Decoded.LoadCnt =
1903 unpackBits(LoadcntDscnt, getLoadcntStorecntBitShift(Version.Major),
1904 getLoadcntBitWidth(Version.Major));
1905 Decoded.DsCnt = unpackBits(LoadcntDscnt, getDscntBitShift(Version.Major),
1906 getDscntBitWidth(Version.Major));
1907 return Decoded;
1908}
1909
1910Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt) {
1911 Waitcnt Decoded;
1912 Decoded.StoreCnt =
1913 unpackBits(StorecntDscnt, getLoadcntStorecntBitShift(Version.Major),
1914 getStorecntBitWidth(Version.Major));
1915 Decoded.DsCnt = unpackBits(StorecntDscnt, getDscntBitShift(Version.Major),
1916 getDscntBitWidth(Version.Major));
1917 return Decoded;
1918}
1919
1920static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt,
1921 unsigned Loadcnt) {
1922 return packBits(Loadcnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1923 getLoadcntBitWidth(Version.Major));
1924}
1925
1926static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt,
1927 unsigned Storecnt) {
1928 return packBits(Storecnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1929 getStorecntBitWidth(Version.Major));
1930}
1931
1932static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt,
1933 unsigned Dscnt) {
1934 return packBits(Dscnt, Waitcnt, getDscntBitShift(Version.Major),
1935 getDscntBitWidth(Version.Major));
1936}
1937
1938static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt,
1939 unsigned Dscnt) {
1940 unsigned Waitcnt = getCombinedCountBitMask(Version, false);
1941 Waitcnt = encodeLoadcnt(Version, Waitcnt, Loadcnt);
1943 return Waitcnt;
1944}
1945
1946unsigned encodeLoadcntDscnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1947 return encodeLoadcntDscnt(Version, Decoded.LoadCnt, Decoded.DsCnt);
1948}
1949
1951 unsigned Storecnt, unsigned Dscnt) {
1952 unsigned Waitcnt = getCombinedCountBitMask(Version, true);
1953 Waitcnt = encodeStorecnt(Version, Waitcnt, Storecnt);
1955 return Waitcnt;
1956}
1957
1959 const Waitcnt &Decoded) {
1960 return encodeStorecntDscnt(Version, Decoded.StoreCnt, Decoded.DsCnt);
1961}
1962
1963//===----------------------------------------------------------------------===//
1964// Custom Operand Values
1965//===----------------------------------------------------------------------===//
1966
1968 int Size,
1969 const MCSubtargetInfo &STI) {
1970 unsigned Enc = 0;
1971 for (int Idx = 0; Idx < Size; ++Idx) {
1972 const auto &Op = Opr[Idx];
1973 if (Op.isSupported(STI))
1974 Enc |= Op.encode(Op.Default);
1975 }
1976 return Enc;
1977}
1978
1980 int Size, unsigned Code,
1981 bool &HasNonDefaultVal,
1982 const MCSubtargetInfo &STI) {
1983 unsigned UsedOprMask = 0;
1984 HasNonDefaultVal = false;
1985 for (int Idx = 0; Idx < Size; ++Idx) {
1986 const auto &Op = Opr[Idx];
1987 if (!Op.isSupported(STI))
1988 continue;
1989 UsedOprMask |= Op.getMask();
1990 unsigned Val = Op.decode(Code);
1991 if (!Op.isValid(Val))
1992 return false;
1993 HasNonDefaultVal |= (Val != Op.Default);
1994 }
1995 return (Code & ~UsedOprMask) == 0;
1996}
1997
1998static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1999 unsigned Code, int &Idx, StringRef &Name,
2000 unsigned &Val, bool &IsDefault,
2001 const MCSubtargetInfo &STI) {
2002 while (Idx < Size) {
2003 const auto &Op = Opr[Idx++];
2004 if (Op.isSupported(STI)) {
2005 Name = Op.Name;
2006 Val = Op.decode(Code);
2007 IsDefault = (Val == Op.Default);
2008 return true;
2009 }
2010 }
2011
2012 return false;
2013}
2014
2016 int64_t InputVal) {
2017 if (InputVal < 0 || InputVal > Op.Max)
2018 return OPR_VAL_INVALID;
2019 return Op.encode(InputVal);
2020}
2021
2022static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
2023 const StringRef Name, int64_t InputVal,
2024 unsigned &UsedOprMask,
2025 const MCSubtargetInfo &STI) {
2026 int InvalidId = OPR_ID_UNKNOWN;
2027 for (int Idx = 0; Idx < Size; ++Idx) {
2028 const auto &Op = Opr[Idx];
2029 if (Op.Name == Name) {
2030 if (!Op.isSupported(STI)) {
2031 InvalidId = OPR_ID_UNSUPPORTED;
2032 continue;
2033 }
2034 auto OprMask = Op.getMask();
2035 if (OprMask & UsedOprMask)
2036 return OPR_ID_DUPLICATE;
2037 UsedOprMask |= OprMask;
2038 return encodeCustomOperandVal(Op, InputVal);
2039 }
2040 }
2041 return InvalidId;
2042}
2043
2044//===----------------------------------------------------------------------===//
2045// DepCtr
2046//===----------------------------------------------------------------------===//
2047
2048namespace DepCtr {
2049
2051 static int Default = -1;
2052 if (Default == -1)
2054 return Default;
2055}
2056
2057bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
2058 const MCSubtargetInfo &STI) {
2060 HasNonDefaultVal, STI);
2061}
2062
2063bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
2064 bool &IsDefault, const MCSubtargetInfo &STI) {
2065 return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
2066 IsDefault, STI);
2067}
2068
2069int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
2070 const MCSubtargetInfo &STI) {
2071 return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
2072 STI);
2073}
2074
2075unsigned getVaVdstBitMask() { return (1 << getVaVdstBitWidth()) - 1; }
2076
2077unsigned getVmVsrcBitMask() { return (1 << getVmVsrcBitWidth()) - 1; }
2078
2079unsigned decodeFieldVmVsrc(unsigned Encoded) {
2080 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2081}
2082
2083unsigned decodeFieldVaVdst(unsigned Encoded) {
2084 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2085}
2086
2087unsigned decodeFieldSaSdst(unsigned Encoded) {
2088 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2089}
2090
2091unsigned decodeFieldVaSdst(unsigned Encoded) {
2092 return unpackBits(Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2093}
2094
2095unsigned decodeFieldVaVcc(unsigned Encoded) {
2096 return unpackBits(Encoded, getVaVccBitShift(), getVaVccBitWidth());
2097}
2098
2099unsigned decodeFieldVaSsrc(unsigned Encoded) {
2100 return unpackBits(Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2101}
2102
2103unsigned decodeFieldHoldCnt(unsigned Encoded) {
2104 return unpackBits(Encoded, getHoldCntBitShift(), getHoldCntWidth());
2105}
2106
2107unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc) {
2108 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2109}
2110
2111unsigned encodeFieldVmVsrc(unsigned VmVsrc, const MCSubtargetInfo &STI) {
2112 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2113 return encodeFieldVmVsrc(Encoded, VmVsrc);
2114}
2115
2116unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst) {
2117 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2118}
2119
2120unsigned encodeFieldVaVdst(unsigned VaVdst, const MCSubtargetInfo &STI) {
2121 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2122 return encodeFieldVaVdst(Encoded, VaVdst);
2123}
2124
2125unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst) {
2126 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2127}
2128
2129unsigned encodeFieldSaSdst(unsigned SaSdst, const MCSubtargetInfo &STI) {
2130 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2131 return encodeFieldSaSdst(Encoded, SaSdst);
2132}
2133
2134unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst) {
2135 return packBits(VaSdst, Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2136}
2137
2138unsigned encodeFieldVaSdst(unsigned VaSdst, const MCSubtargetInfo &STI) {
2139 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2140 return encodeFieldVaSdst(Encoded, VaSdst);
2141}
2142
2143unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc) {
2144 return packBits(VaVcc, Encoded, getVaVccBitShift(), getVaVccBitWidth());
2145}
2146
2147unsigned encodeFieldVaVcc(unsigned VaVcc, const MCSubtargetInfo &STI) {
2148 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2149 return encodeFieldVaVcc(Encoded, VaVcc);
2150}
2151
2152unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc) {
2153 return packBits(VaSsrc, Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2154}
2155
2156unsigned encodeFieldVaSsrc(unsigned VaSsrc, const MCSubtargetInfo &STI) {
2157 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2158 return encodeFieldVaSsrc(Encoded, VaSsrc);
2159}
2160
2161unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt) {
2162 return packBits(HoldCnt, Encoded, getHoldCntBitShift(), getHoldCntWidth());
2163}
2164
2165unsigned encodeFieldHoldCnt(unsigned HoldCnt, const MCSubtargetInfo &STI) {
2166 unsigned Encoded = getDefaultDepCtrEncoding(STI);
2167 return encodeFieldHoldCnt(Encoded, HoldCnt);
2168}
2169
2170} // namespace DepCtr
2171
2172//===----------------------------------------------------------------------===//
2173// exp tgt
2174//===----------------------------------------------------------------------===//
2175
2176namespace Exp {
2177
2178struct ExpTgt {
2180 unsigned Tgt;
2181 unsigned MaxIndex;
2182};
2183
2184// clang-format off
2185static constexpr ExpTgt ExpTgtInfo[] = {
2186 {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
2187 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
2188 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
2189 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
2190 {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
2191 {{"dual_src_blend"},ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
2192 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
2193};
2194// clang-format on
2195
2196bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
2197 for (const ExpTgt &Val : ExpTgtInfo) {
2198 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
2199 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
2200 Name = Val.Name;
2201 return true;
2202 }
2203 }
2204 return false;
2205}
2206
2207unsigned getTgtId(const StringRef Name) {
2208
2209 for (const ExpTgt &Val : ExpTgtInfo) {
2210 if (Val.MaxIndex == 0 && Name == Val.Name)
2211 return Val.Tgt;
2212
2213 if (Val.MaxIndex > 0 && Name.starts_with(Val.Name)) {
2214 StringRef Suffix = Name.drop_front(Val.Name.size());
2215
2216 unsigned Id;
2217 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
2218 return ET_INVALID;
2219
2220 // Disable leading zeroes
2221 if (Suffix.size() > 1 && Suffix[0] == '0')
2222 return ET_INVALID;
2223
2224 return Val.Tgt + Id;
2225 }
2226 }
2227 return ET_INVALID;
2228}
2229
2230bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
2231 switch (Id) {
2232 case ET_NULL:
2233 return !isGFX11Plus(STI);
2234 case ET_POS4:
2235 case ET_PRIM:
2236 return isGFX10Plus(STI);
2237 case ET_DUAL_SRC_BLEND0:
2238 case ET_DUAL_SRC_BLEND1:
2239 return isGFX11Plus(STI);
2240 default:
2241 if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
2242 return !isGFX11Plus(STI);
2243 return true;
2244 }
2245}
2246
2247} // namespace Exp
2248
2249//===----------------------------------------------------------------------===//
2250// MTBUF Format
2251//===----------------------------------------------------------------------===//
2252
2253namespace MTBUFFormat {
2254
2255int64_t getDfmt(const StringRef Name) {
2256 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
2257 if (Name == DfmtSymbolic[Id])
2258 return Id;
2259 }
2260 return DFMT_UNDEF;
2261}
2262
2264 assert(Id <= DFMT_MAX);
2265 return DfmtSymbolic[Id];
2266}
2267
2269 if (isSI(STI) || isCI(STI))
2270 return NfmtSymbolicSICI;
2271 if (isVI(STI) || isGFX9(STI))
2272 return NfmtSymbolicVI;
2273 return NfmtSymbolicGFX10;
2274}
2275
2276int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
2277 const auto *lookupTable = getNfmtLookupTable(STI);
2278 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
2279 if (Name == lookupTable[Id])
2280 return Id;
2281 }
2282 return NFMT_UNDEF;
2283}
2284
2285StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
2286 assert(Id <= NFMT_MAX);
2287 return getNfmtLookupTable(STI)[Id];
2288}
2289
2290bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2291 unsigned Dfmt;
2292 unsigned Nfmt;
2293 decodeDfmtNfmt(Id, Dfmt, Nfmt);
2294 return isValidNfmt(Nfmt, STI);
2295}
2296
2297bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2298 return !getNfmtName(Id, STI).empty();
2299}
2300
2301int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
2302 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
2303}
2304
2305void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
2306 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
2307 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
2308}
2309
2310int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
2311 if (isGFX11Plus(STI)) {
2312 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2313 if (Name == UfmtSymbolicGFX11[Id])
2314 return Id;
2315 }
2316 } else {
2317 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2318 if (Name == UfmtSymbolicGFX10[Id])
2319 return Id;
2320 }
2321 }
2322 return UFMT_UNDEF;
2323}
2324
2326 if (isValidUnifiedFormat(Id, STI))
2327 return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
2328 return "";
2329}
2330
2331bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
2332 return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
2333}
2334
2335int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
2336 const MCSubtargetInfo &STI) {
2337 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
2338 if (isGFX11Plus(STI)) {
2339 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2340 if (Fmt == DfmtNfmt2UFmtGFX11[Id])
2341 return Id;
2342 }
2343 } else {
2344 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2345 if (Fmt == DfmtNfmt2UFmtGFX10[Id])
2346 return Id;
2347 }
2348 }
2349 return UFMT_UNDEF;
2350}
2351
2352bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
2353 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
2354}
2355
2357 if (isGFX10Plus(STI))
2358 return UFMT_DEFAULT;
2359 return DFMT_NFMT_DEFAULT;
2360}
2361
2362} // namespace MTBUFFormat
2363
2364//===----------------------------------------------------------------------===//
2365// SendMsg
2366//===----------------------------------------------------------------------===//
2367
2368namespace SendMsg {
2369
2373
2374bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
2375 return (MsgId & ~(getMsgIdMask(STI))) == 0;
2376}
2377
2378bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
2379 bool Strict) {
2380 assert(isValidMsgId(MsgId, STI));
2381
2382 if (!Strict)
2383 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
2384
2385 if (msgRequiresOp(MsgId, STI)) {
2386 if (MsgId == ID_GS_PreGFX11 && OpId == OP_GS_NOP)
2387 return false;
2388
2389 return !getMsgOpName(MsgId, OpId, STI).empty();
2390 }
2391
2392 return OpId == OP_NONE_;
2393}
2394
2395bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
2396 const MCSubtargetInfo &STI, bool Strict) {
2397 assert(isValidMsgOp(MsgId, OpId, STI, Strict));
2398
2399 if (!Strict)
2401
2402 if (!isGFX11Plus(STI)) {
2403 switch (MsgId) {
2404 case ID_GS_PreGFX11:
2407 return (OpId == OP_GS_NOP)
2410 }
2411 }
2412 return StreamId == STREAM_ID_NONE_;
2413}
2414
2415bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
2416 return MsgId == ID_SYSMSG ||
2417 (!isGFX11Plus(STI) &&
2418 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
2419}
2420
2421bool msgSupportsStream(int64_t MsgId, int64_t OpId,
2422 const MCSubtargetInfo &STI) {
2423 return !isGFX11Plus(STI) &&
2424 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
2425 OpId != OP_GS_NOP;
2426}
2427
2428void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
2429 uint16_t &StreamId, const MCSubtargetInfo &STI) {
2430 MsgId = Val & getMsgIdMask(STI);
2431 if (isGFX11Plus(STI)) {
2432 OpId = 0;
2433 StreamId = 0;
2434 } else {
2435 OpId = (Val & OP_MASK_) >> OP_SHIFT_;
2437 }
2438}
2439
2441 return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
2442}
2443
2444} // namespace SendMsg
2445
2446//===----------------------------------------------------------------------===//
2447//
2448//===----------------------------------------------------------------------===//
2449
2451 return F.getFnAttributeAsParsedInteger("InitialPSInputAddr", 0);
2452}
2453
2455 // As a safe default always respond as if PS has color exports.
2456 return F.getFnAttributeAsParsedInteger(
2457 "amdgpu-color-export",
2458 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
2459}
2460
2462 return F.getFnAttributeAsParsedInteger("amdgpu-depth-export", 0) != 0;
2463}
2464
2466 unsigned BlockSize =
2467 F.getFnAttributeAsParsedInteger("amdgpu-dynamic-vgpr-block-size", 0);
2468
2469 if (BlockSize == 16 || BlockSize == 32)
2470 return BlockSize;
2471
2472 return 0;
2473}
2474
2475bool hasXNACK(const MCSubtargetInfo &STI) {
2476 return STI.hasFeature(AMDGPU::FeatureXNACK);
2477}
2478
2479bool hasSRAMECC(const MCSubtargetInfo &STI) {
2480 return STI.hasFeature(AMDGPU::FeatureSRAMECC);
2481}
2482
2484 return STI.hasFeature(AMDGPU::FeatureMIMG_R128) &&
2485 !STI.hasFeature(AMDGPU::FeatureR128A16);
2486}
2487
2488bool hasA16(const MCSubtargetInfo &STI) {
2489 return STI.hasFeature(AMDGPU::FeatureA16);
2490}
2491
2492bool hasG16(const MCSubtargetInfo &STI) {
2493 return STI.hasFeature(AMDGPU::FeatureG16);
2494}
2495
2497 return !STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !isCI(STI) &&
2498 !isSI(STI);
2499}
2500
2501bool hasGDS(const MCSubtargetInfo &STI) {
2502 return STI.hasFeature(AMDGPU::FeatureGDS);
2503}
2504
2505unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler) {
2506 auto Version = getIsaVersion(STI.getCPU());
2507 if (Version.Major == 10)
2508 return Version.Minor >= 3 ? 13 : 5;
2509 if (Version.Major == 11)
2510 return 5;
2511 if (Version.Major >= 12)
2512 return HasSampler ? 4 : 5;
2513 return 0;
2514}
2515
2517 if (isGFX1250(STI))
2518 return 32;
2519 return 16;
2520}
2521
2522bool isSI(const MCSubtargetInfo &STI) {
2523 return STI.hasFeature(AMDGPU::FeatureSouthernIslands);
2524}
2525
2526bool isCI(const MCSubtargetInfo &STI) {
2527 return STI.hasFeature(AMDGPU::FeatureSeaIslands);
2528}
2529
2530bool isVI(const MCSubtargetInfo &STI) {
2531 return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2532}
2533
2534bool isGFX9(const MCSubtargetInfo &STI) {
2535 return STI.hasFeature(AMDGPU::FeatureGFX9);
2536}
2537
2539 return isGFX9(STI) || isGFX10(STI);
2540}
2541
2543 return isGFX9(STI) || isGFX10(STI) || isGFX11(STI);
2544}
2545
2547 return isVI(STI) || isGFX9(STI) || isGFX10(STI);
2548}
2549
2550bool isGFX8Plus(const MCSubtargetInfo &STI) {
2551 return isVI(STI) || isGFX9Plus(STI);
2552}
2553
2554bool isGFX9Plus(const MCSubtargetInfo &STI) {
2555 return isGFX9(STI) || isGFX10Plus(STI);
2556}
2557
2558bool isNotGFX9Plus(const MCSubtargetInfo &STI) { return !isGFX9Plus(STI); }
2559
2560bool isGFX10(const MCSubtargetInfo &STI) {
2561 return STI.hasFeature(AMDGPU::FeatureGFX10);
2562}
2563
2565 return isGFX10(STI) || isGFX11(STI);
2566}
2567
2569 return isGFX10(STI) || isGFX11Plus(STI);
2570}
2571
2572bool isGFX11(const MCSubtargetInfo &STI) {
2573 return STI.hasFeature(AMDGPU::FeatureGFX11);
2574}
2575
2577 return isGFX11(STI) || isGFX12Plus(STI);
2578}
2579
2580bool isGFX12(const MCSubtargetInfo &STI) {
2581 return STI.getFeatureBits()[AMDGPU::FeatureGFX12];
2582}
2583
2584bool isGFX12Plus(const MCSubtargetInfo &STI) { return isGFX12(STI); }
2585
2586bool isNotGFX12Plus(const MCSubtargetInfo &STI) { return !isGFX12Plus(STI); }
2587
2588bool isGFX1250(const MCSubtargetInfo &STI) {
2589 return STI.getFeatureBits()[AMDGPU::FeatureGFX1250Insts];
2590}
2591
2593 if (isGFX1250(STI))
2594 return false;
2595 return isGFX10Plus(STI);
2596}
2597
2598bool isNotGFX11Plus(const MCSubtargetInfo &STI) { return !isGFX11Plus(STI); }
2599
2601 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
2602}
2603
2605 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
2606}
2607
2609 return STI.hasFeature(AMDGPU::FeatureGCN3Encoding);
2610}
2611
2613 return STI.hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2614}
2615
2617 return STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2618}
2619
2621 return STI.hasFeature(AMDGPU::FeatureGFX10_3Insts);
2622}
2623
2625 return isGFX10_BEncoding(STI) && !isGFX12Plus(STI);
2626}
2627
2628bool isGFX90A(const MCSubtargetInfo &STI) {
2629 return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2630}
2631
2632bool isGFX940(const MCSubtargetInfo &STI) {
2633 return STI.hasFeature(AMDGPU::FeatureGFX940Insts);
2634}
2635
2637 return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2638}
2639
2641 return STI.hasFeature(AMDGPU::FeatureMAIInsts);
2642}
2643
2644bool hasVOPD(const MCSubtargetInfo &STI) {
2645 return STI.hasFeature(AMDGPU::FeatureVOPD);
2646}
2647
2649 return STI.hasFeature(AMDGPU::FeatureDPPSrc1SGPR);
2650}
2651
2653 return STI.hasFeature(AMDGPU::FeatureKernargPreload);
2654}
2655
2656int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
2657 int32_t ArgNumVGPR) {
2658 if (has90AInsts && ArgNumAGPR)
2659 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2660 return std::max(ArgNumVGPR, ArgNumAGPR);
2661}
2662
2664 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
2665 const MCRegister FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
2666 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
2667 Reg == AMDGPU::SCC;
2668}
2669
2671 return MRI.getEncodingValue(Reg) & AMDGPU::HWEncoding::IS_HI16;
2672}
2673
2674#define MAP_REG2REG \
2675 using namespace AMDGPU; \
2676 switch (Reg.id()) { \
2677 default: \
2678 return Reg; \
2679 CASE_CI_VI(FLAT_SCR) \
2680 CASE_CI_VI(FLAT_SCR_LO) \
2681 CASE_CI_VI(FLAT_SCR_HI) \
2682 CASE_VI_GFX9PLUS(TTMP0) \
2683 CASE_VI_GFX9PLUS(TTMP1) \
2684 CASE_VI_GFX9PLUS(TTMP2) \
2685 CASE_VI_GFX9PLUS(TTMP3) \
2686 CASE_VI_GFX9PLUS(TTMP4) \
2687 CASE_VI_GFX9PLUS(TTMP5) \
2688 CASE_VI_GFX9PLUS(TTMP6) \
2689 CASE_VI_GFX9PLUS(TTMP7) \
2690 CASE_VI_GFX9PLUS(TTMP8) \
2691 CASE_VI_GFX9PLUS(TTMP9) \
2692 CASE_VI_GFX9PLUS(TTMP10) \
2693 CASE_VI_GFX9PLUS(TTMP11) \
2694 CASE_VI_GFX9PLUS(TTMP12) \
2695 CASE_VI_GFX9PLUS(TTMP13) \
2696 CASE_VI_GFX9PLUS(TTMP14) \
2697 CASE_VI_GFX9PLUS(TTMP15) \
2698 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2699 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2700 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2701 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2702 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2703 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2704 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2705 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2706 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2707 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2708 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2709 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2710 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2711 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2712 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2713 CASE_VI_GFX9PLUS( \
2714 TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2715 CASE_GFXPRE11_GFX11PLUS(M0) \
2716 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2717 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2718 }
2719
2720#define CASE_CI_VI(node) \
2721 assert(!isSI(STI)); \
2722 case node: \
2723 return isCI(STI) ? node##_ci : node##_vi;
2724
2725#define CASE_VI_GFX9PLUS(node) \
2726 case node: \
2727 return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2728
2729#define CASE_GFXPRE11_GFX11PLUS(node) \
2730 case node: \
2731 return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2732
2733#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2734 case node: \
2735 return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2736
2738 if (STI.getTargetTriple().getArch() == Triple::r600)
2739 return Reg;
2741}
2742
2743#undef CASE_CI_VI
2744#undef CASE_VI_GFX9PLUS
2745#undef CASE_GFXPRE11_GFX11PLUS
2746#undef CASE_GFXPRE11_GFX11PLUS_TO
2747
2748#define CASE_CI_VI(node) \
2749 case node##_ci: \
2750 case node##_vi: \
2751 return node;
2752#define CASE_VI_GFX9PLUS(node) \
2753 case node##_vi: \
2754 case node##_gfx9plus: \
2755 return node;
2756#define CASE_GFXPRE11_GFX11PLUS(node) \
2757 case node##_gfx11plus: \
2758 case node##_gfxpre11: \
2759 return node;
2760#define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2761
2763
2765 switch (Reg.id()) {
2766 case AMDGPU::SRC_SHARED_BASE_LO:
2767 case AMDGPU::SRC_SHARED_BASE:
2768 case AMDGPU::SRC_SHARED_LIMIT_LO:
2769 case AMDGPU::SRC_SHARED_LIMIT:
2770 case AMDGPU::SRC_PRIVATE_BASE_LO:
2771 case AMDGPU::SRC_PRIVATE_BASE:
2772 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2773 case AMDGPU::SRC_PRIVATE_LIMIT:
2774 case AMDGPU::SRC_FLAT_SCRATCH_BASE_LO:
2775 case AMDGPU::SRC_FLAT_SCRATCH_BASE_HI:
2776 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2777 return true;
2778 case AMDGPU::SRC_VCCZ:
2779 case AMDGPU::SRC_EXECZ:
2780 case AMDGPU::SRC_SCC:
2781 return true;
2782 case AMDGPU::SGPR_NULL:
2783 return true;
2784 default:
2785 return false;
2786 }
2787}
2788
2789#undef CASE_CI_VI
2790#undef CASE_VI_GFX9PLUS
2791#undef CASE_GFXPRE11_GFX11PLUS
2792#undef CASE_GFXPRE11_GFX11PLUS_TO
2793#undef MAP_REG2REG
2794
2795bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2796 assert(OpNo < Desc.NumOperands);
2797 unsigned OpType = Desc.operands()[OpNo].OperandType;
2798 return OpType >= AMDGPU::OPERAND_KIMM_FIRST &&
2799 OpType <= AMDGPU::OPERAND_KIMM_LAST;
2800}
2801
2802bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2803 assert(OpNo < Desc.NumOperands);
2804 unsigned OpType = Desc.operands()[OpNo].OperandType;
2805 switch (OpType) {
2818 return true;
2819 default:
2820 return false;
2821 }
2822}
2823
2824bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2825 assert(OpNo < Desc.NumOperands);
2826 unsigned OpType = Desc.operands()[OpNo].OperandType;
2827 return (OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
2831}
2832
2833// Avoid using MCRegisterClass::getSize, since that function will go away
2834// (move from MC* level to Target* level). Return size in bits.
2835unsigned getRegBitWidth(unsigned RCID) {
2836 switch (RCID) {
2837 case AMDGPU::VGPR_16RegClassID:
2838 case AMDGPU::VGPR_16_Lo128RegClassID:
2839 case AMDGPU::SGPR_LO16RegClassID:
2840 case AMDGPU::AGPR_LO16RegClassID:
2841 return 16;
2842 case AMDGPU::SGPR_32RegClassID:
2843 case AMDGPU::VGPR_32RegClassID:
2844 case AMDGPU::VGPR_32_Lo256RegClassID:
2845 case AMDGPU::VRegOrLds_32RegClassID:
2846 case AMDGPU::AGPR_32RegClassID:
2847 case AMDGPU::VS_32RegClassID:
2848 case AMDGPU::AV_32RegClassID:
2849 case AMDGPU::SReg_32RegClassID:
2850 case AMDGPU::SReg_32_XM0RegClassID:
2851 case AMDGPU::SRegOrLds_32RegClassID:
2852 return 32;
2853 case AMDGPU::SGPR_64RegClassID:
2854 case AMDGPU::VS_64RegClassID:
2855 case AMDGPU::SReg_64RegClassID:
2856 case AMDGPU::VReg_64RegClassID:
2857 case AMDGPU::AReg_64RegClassID:
2858 case AMDGPU::SReg_64_XEXECRegClassID:
2859 case AMDGPU::VReg_64_Align2RegClassID:
2860 case AMDGPU::AReg_64_Align2RegClassID:
2861 case AMDGPU::AV_64RegClassID:
2862 case AMDGPU::AV_64_Align2RegClassID:
2863 case AMDGPU::VReg_64_Lo256_Align2RegClassID:
2864 case AMDGPU::VS_64_Lo256RegClassID:
2865 return 64;
2866 case AMDGPU::SGPR_96RegClassID:
2867 case AMDGPU::SReg_96RegClassID:
2868 case AMDGPU::VReg_96RegClassID:
2869 case AMDGPU::AReg_96RegClassID:
2870 case AMDGPU::VReg_96_Align2RegClassID:
2871 case AMDGPU::AReg_96_Align2RegClassID:
2872 case AMDGPU::AV_96RegClassID:
2873 case AMDGPU::AV_96_Align2RegClassID:
2874 case AMDGPU::VReg_96_Lo256_Align2RegClassID:
2875 return 96;
2876 case AMDGPU::SGPR_128RegClassID:
2877 case AMDGPU::SReg_128RegClassID:
2878 case AMDGPU::VReg_128RegClassID:
2879 case AMDGPU::AReg_128RegClassID:
2880 case AMDGPU::VReg_128_Align2RegClassID:
2881 case AMDGPU::AReg_128_Align2RegClassID:
2882 case AMDGPU::AV_128RegClassID:
2883 case AMDGPU::AV_128_Align2RegClassID:
2884 case AMDGPU::SReg_128_XNULLRegClassID:
2885 case AMDGPU::VReg_128_Lo256_Align2RegClassID:
2886 return 128;
2887 case AMDGPU::SGPR_160RegClassID:
2888 case AMDGPU::SReg_160RegClassID:
2889 case AMDGPU::VReg_160RegClassID:
2890 case AMDGPU::AReg_160RegClassID:
2891 case AMDGPU::VReg_160_Align2RegClassID:
2892 case AMDGPU::AReg_160_Align2RegClassID:
2893 case AMDGPU::AV_160RegClassID:
2894 case AMDGPU::AV_160_Align2RegClassID:
2895 case AMDGPU::VReg_160_Lo256_Align2RegClassID:
2896 return 160;
2897 case AMDGPU::SGPR_192RegClassID:
2898 case AMDGPU::SReg_192RegClassID:
2899 case AMDGPU::VReg_192RegClassID:
2900 case AMDGPU::AReg_192RegClassID:
2901 case AMDGPU::VReg_192_Align2RegClassID:
2902 case AMDGPU::AReg_192_Align2RegClassID:
2903 case AMDGPU::AV_192RegClassID:
2904 case AMDGPU::AV_192_Align2RegClassID:
2905 case AMDGPU::VReg_192_Lo256_Align2RegClassID:
2906 return 192;
2907 case AMDGPU::SGPR_224RegClassID:
2908 case AMDGPU::SReg_224RegClassID:
2909 case AMDGPU::VReg_224RegClassID:
2910 case AMDGPU::AReg_224RegClassID:
2911 case AMDGPU::VReg_224_Align2RegClassID:
2912 case AMDGPU::AReg_224_Align2RegClassID:
2913 case AMDGPU::AV_224RegClassID:
2914 case AMDGPU::AV_224_Align2RegClassID:
2915 case AMDGPU::VReg_224_Lo256_Align2RegClassID:
2916 return 224;
2917 case AMDGPU::SGPR_256RegClassID:
2918 case AMDGPU::SReg_256RegClassID:
2919 case AMDGPU::VReg_256RegClassID:
2920 case AMDGPU::AReg_256RegClassID:
2921 case AMDGPU::VReg_256_Align2RegClassID:
2922 case AMDGPU::AReg_256_Align2RegClassID:
2923 case AMDGPU::AV_256RegClassID:
2924 case AMDGPU::AV_256_Align2RegClassID:
2925 case AMDGPU::SReg_256_XNULLRegClassID:
2926 case AMDGPU::VReg_256_Lo256_Align2RegClassID:
2927 return 256;
2928 case AMDGPU::SGPR_288RegClassID:
2929 case AMDGPU::SReg_288RegClassID:
2930 case AMDGPU::VReg_288RegClassID:
2931 case AMDGPU::AReg_288RegClassID:
2932 case AMDGPU::VReg_288_Align2RegClassID:
2933 case AMDGPU::AReg_288_Align2RegClassID:
2934 case AMDGPU::AV_288RegClassID:
2935 case AMDGPU::AV_288_Align2RegClassID:
2936 case AMDGPU::VReg_288_Lo256_Align2RegClassID:
2937 return 288;
2938 case AMDGPU::SGPR_320RegClassID:
2939 case AMDGPU::SReg_320RegClassID:
2940 case AMDGPU::VReg_320RegClassID:
2941 case AMDGPU::AReg_320RegClassID:
2942 case AMDGPU::VReg_320_Align2RegClassID:
2943 case AMDGPU::AReg_320_Align2RegClassID:
2944 case AMDGPU::AV_320RegClassID:
2945 case AMDGPU::AV_320_Align2RegClassID:
2946 case AMDGPU::VReg_320_Lo256_Align2RegClassID:
2947 return 320;
2948 case AMDGPU::SGPR_352RegClassID:
2949 case AMDGPU::SReg_352RegClassID:
2950 case AMDGPU::VReg_352RegClassID:
2951 case AMDGPU::AReg_352RegClassID:
2952 case AMDGPU::VReg_352_Align2RegClassID:
2953 case AMDGPU::AReg_352_Align2RegClassID:
2954 case AMDGPU::AV_352RegClassID:
2955 case AMDGPU::AV_352_Align2RegClassID:
2956 case AMDGPU::VReg_352_Lo256_Align2RegClassID:
2957 return 352;
2958 case AMDGPU::SGPR_384RegClassID:
2959 case AMDGPU::SReg_384RegClassID:
2960 case AMDGPU::VReg_384RegClassID:
2961 case AMDGPU::AReg_384RegClassID:
2962 case AMDGPU::VReg_384_Align2RegClassID:
2963 case AMDGPU::AReg_384_Align2RegClassID:
2964 case AMDGPU::AV_384RegClassID:
2965 case AMDGPU::AV_384_Align2RegClassID:
2966 case AMDGPU::VReg_384_Lo256_Align2RegClassID:
2967 return 384;
2968 case AMDGPU::SGPR_512RegClassID:
2969 case AMDGPU::SReg_512RegClassID:
2970 case AMDGPU::VReg_512RegClassID:
2971 case AMDGPU::AReg_512RegClassID:
2972 case AMDGPU::VReg_512_Align2RegClassID:
2973 case AMDGPU::AReg_512_Align2RegClassID:
2974 case AMDGPU::AV_512RegClassID:
2975 case AMDGPU::AV_512_Align2RegClassID:
2976 case AMDGPU::VReg_512_Lo256_Align2RegClassID:
2977 return 512;
2978 case AMDGPU::SGPR_1024RegClassID:
2979 case AMDGPU::SReg_1024RegClassID:
2980 case AMDGPU::VReg_1024RegClassID:
2981 case AMDGPU::AReg_1024RegClassID:
2982 case AMDGPU::VReg_1024_Align2RegClassID:
2983 case AMDGPU::AReg_1024_Align2RegClassID:
2984 case AMDGPU::AV_1024RegClassID:
2985 case AMDGPU::AV_1024_Align2RegClassID:
2986 case AMDGPU::VReg_1024_Lo256_Align2RegClassID:
2987 return 1024;
2988 default:
2989 llvm_unreachable("Unexpected register class");
2990 }
2991}
2992
2993unsigned getRegBitWidth(const MCRegisterClass &RC) {
2994 return getRegBitWidth(RC.getID());
2995}
2996
2997bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
2999 return true;
3000
3001 uint64_t Val = static_cast<uint64_t>(Literal);
3002 return (Val == llvm::bit_cast<uint64_t>(0.0)) ||
3003 (Val == llvm::bit_cast<uint64_t>(1.0)) ||
3004 (Val == llvm::bit_cast<uint64_t>(-1.0)) ||
3005 (Val == llvm::bit_cast<uint64_t>(0.5)) ||
3006 (Val == llvm::bit_cast<uint64_t>(-0.5)) ||
3007 (Val == llvm::bit_cast<uint64_t>(2.0)) ||
3008 (Val == llvm::bit_cast<uint64_t>(-2.0)) ||
3009 (Val == llvm::bit_cast<uint64_t>(4.0)) ||
3010 (Val == llvm::bit_cast<uint64_t>(-4.0)) ||
3011 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
3012}
3013
3014bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
3016 return true;
3017
3018 // The actual type of the operand does not seem to matter as long
3019 // as the bits match one of the inline immediate values. For example:
3020 //
3021 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
3022 // so it is a legal inline immediate.
3023 //
3024 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
3025 // floating-point, so it is a legal inline immediate.
3026
3027 uint32_t Val = static_cast<uint32_t>(Literal);
3028 return (Val == llvm::bit_cast<uint32_t>(0.0f)) ||
3029 (Val == llvm::bit_cast<uint32_t>(1.0f)) ||
3030 (Val == llvm::bit_cast<uint32_t>(-1.0f)) ||
3031 (Val == llvm::bit_cast<uint32_t>(0.5f)) ||
3032 (Val == llvm::bit_cast<uint32_t>(-0.5f)) ||
3033 (Val == llvm::bit_cast<uint32_t>(2.0f)) ||
3034 (Val == llvm::bit_cast<uint32_t>(-2.0f)) ||
3035 (Val == llvm::bit_cast<uint32_t>(4.0f)) ||
3036 (Val == llvm::bit_cast<uint32_t>(-4.0f)) ||
3037 (Val == 0x3e22f983 && HasInv2Pi);
3038}
3039
3040bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi) {
3041 if (!HasInv2Pi)
3042 return false;
3044 return true;
3045 uint16_t Val = static_cast<uint16_t>(Literal);
3046 return Val == 0x3F00 || // 0.5
3047 Val == 0xBF00 || // -0.5
3048 Val == 0x3F80 || // 1.0
3049 Val == 0xBF80 || // -1.0
3050 Val == 0x4000 || // 2.0
3051 Val == 0xC000 || // -2.0
3052 Val == 0x4080 || // 4.0
3053 Val == 0xC080 || // -4.0
3054 Val == 0x3E22; // 1.0 / (2.0 * pi)
3055}
3056
3057bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi) {
3058 return isInlinableLiteral32(Literal, HasInv2Pi);
3059}
3060
3061bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi) {
3062 if (!HasInv2Pi)
3063 return false;
3065 return true;
3066 uint16_t Val = static_cast<uint16_t>(Literal);
3067 return Val == 0x3C00 || // 1.0
3068 Val == 0xBC00 || // -1.0
3069 Val == 0x3800 || // 0.5
3070 Val == 0xB800 || // -0.5
3071 Val == 0x4000 || // 2.0
3072 Val == 0xC000 || // -2.0
3073 Val == 0x4400 || // 4.0
3074 Val == 0xC400 || // -4.0
3075 Val == 0x3118; // 1/2pi
3076}
3077
3078std::optional<unsigned> getInlineEncodingV216(bool IsFloat, uint32_t Literal) {
3079 // Unfortunately, the Instruction Set Architecture Reference Guide is
3080 // misleading about how the inline operands work for (packed) 16-bit
3081 // instructions. In a nutshell, the actual HW behavior is:
3082 //
3083 // - integer encodings (-16 .. 64) are always produced as sign-extended
3084 // 32-bit values
3085 // - float encodings are produced as:
3086 // - for F16 instructions: corresponding half-precision float values in
3087 // the LSBs, 0 in the MSBs
3088 // - for UI16 instructions: corresponding single-precision float value
3089 int32_t Signed = static_cast<int32_t>(Literal);
3090 if (Signed >= 0 && Signed <= 64)
3091 return 128 + Signed;
3092
3093 if (Signed >= -16 && Signed <= -1)
3094 return 192 + std::abs(Signed);
3095
3096 if (IsFloat) {
3097 // clang-format off
3098 switch (Literal) {
3099 case 0x3800: return 240; // 0.5
3100 case 0xB800: return 241; // -0.5
3101 case 0x3C00: return 242; // 1.0
3102 case 0xBC00: return 243; // -1.0
3103 case 0x4000: return 244; // 2.0
3104 case 0xC000: return 245; // -2.0
3105 case 0x4400: return 246; // 4.0
3106 case 0xC400: return 247; // -4.0
3107 case 0x3118: return 248; // 1.0 / (2.0 * pi)
3108 default: break;
3109 }
3110 // clang-format on
3111 } else {
3112 // clang-format off
3113 switch (Literal) {
3114 case 0x3F000000: return 240; // 0.5
3115 case 0xBF000000: return 241; // -0.5
3116 case 0x3F800000: return 242; // 1.0
3117 case 0xBF800000: return 243; // -1.0
3118 case 0x40000000: return 244; // 2.0
3119 case 0xC0000000: return 245; // -2.0
3120 case 0x40800000: return 246; // 4.0
3121 case 0xC0800000: return 247; // -4.0
3122 case 0x3E22F983: return 248; // 1.0 / (2.0 * pi)
3123 default: break;
3124 }
3125 // clang-format on
3126 }
3127
3128 return {};
3129}
3130
3131// Encoding of the literal as an inline constant for a V_PK_*_IU16 instruction
3132// or nullopt.
3133std::optional<unsigned> getInlineEncodingV2I16(uint32_t Literal) {
3134 return getInlineEncodingV216(false, Literal);
3135}
3136
3137// Encoding of the literal as an inline constant for a V_PK_*_BF16 instruction
3138// or nullopt.
3139std::optional<unsigned> getInlineEncodingV2BF16(uint32_t Literal) {
3140 int32_t Signed = static_cast<int32_t>(Literal);
3141 if (Signed >= 0 && Signed <= 64)
3142 return 128 + Signed;
3143
3144 if (Signed >= -16 && Signed <= -1)
3145 return 192 + std::abs(Signed);
3146
3147 // clang-format off
3148 switch (Literal) {
3149 case 0x3F00: return 240; // 0.5
3150 case 0xBF00: return 241; // -0.5
3151 case 0x3F80: return 242; // 1.0
3152 case 0xBF80: return 243; // -1.0
3153 case 0x4000: return 244; // 2.0
3154 case 0xC000: return 245; // -2.0
3155 case 0x4080: return 246; // 4.0
3156 case 0xC080: return 247; // -4.0
3157 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
3158 default: break;
3159 }
3160 // clang-format on
3161
3162 return std::nullopt;
3163}
3164
3165// Encoding of the literal as an inline constant for a V_PK_*_F16 instruction
3166// or nullopt.
3167std::optional<unsigned> getInlineEncodingV2F16(uint32_t Literal) {
3168 return getInlineEncodingV216(true, Literal);
3169}
3170
3171// Whether the given literal can be inlined for a V_PK_* instruction.
3173 switch (OpType) {
3176 return getInlineEncodingV216(false, Literal).has_value();
3179 return getInlineEncodingV216(true, Literal).has_value();
3184 return false;
3185 default:
3186 llvm_unreachable("bad packed operand type");
3187 }
3188}
3189
3190// Whether the given literal can be inlined for a V_PK_*_IU16 instruction.
3194
3195// Whether the given literal can be inlined for a V_PK_*_BF16 instruction.
3199
3200// Whether the given literal can be inlined for a V_PK_*_F16 instruction.
3204
3205bool isValid32BitLiteral(uint64_t Val, bool IsFP64) {
3206 if (IsFP64)
3207 return !Lo_32(Val);
3208
3209 return isUInt<32>(Val) || isInt<32>(Val);
3210}
3211
3212int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit) {
3213 switch (Type) {
3214 default:
3215 break;
3220 return Imm & 0xffff;
3233 return Lo_32(Imm);
3235 return IsLit ? Imm : Hi_32(Imm);
3236 }
3237 return Imm;
3238}
3239
3241 const Function *F = A->getParent();
3242
3243 // Arguments to compute shaders are never a source of divergence.
3244 CallingConv::ID CC = F->getCallingConv();
3245 switch (CC) {
3248 return true;
3259 // For non-compute shaders, SGPR inputs are marked with either inreg or
3260 // byval. Everything else is in VGPRs.
3261 return A->hasAttribute(Attribute::InReg) ||
3262 A->hasAttribute(Attribute::ByVal);
3263 default:
3264 // TODO: treat i1 as divergent?
3265 return A->hasAttribute(Attribute::InReg);
3266 }
3267}
3268
3269bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo) {
3270 // Arguments to compute shaders are never a source of divergence.
3272 switch (CC) {
3275 return true;
3286 // For non-compute shaders, SGPR inputs are marked with either inreg or
3287 // byval. Everything else is in VGPRs.
3288 return CB->paramHasAttr(ArgNo, Attribute::InReg) ||
3289 CB->paramHasAttr(ArgNo, Attribute::ByVal);
3290 default:
3291 return CB->paramHasAttr(ArgNo, Attribute::InReg);
3292 }
3293}
3294
3295static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
3296 return isGCN3Encoding(ST) || isGFX10Plus(ST);
3297}
3298
3300 int64_t EncodedOffset) {
3301 if (isGFX12Plus(ST))
3302 return isUInt<23>(EncodedOffset);
3303
3304 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
3305 : isUInt<8>(EncodedOffset);
3306}
3307
3309 int64_t EncodedOffset, bool IsBuffer) {
3310 if (isGFX12Plus(ST)) {
3311 if (IsBuffer && EncodedOffset < 0)
3312 return false;
3313 return isInt<24>(EncodedOffset);
3314 }
3315
3316 return !IsBuffer && hasSMRDSignedImmOffset(ST) && isInt<21>(EncodedOffset);
3317}
3318
3319static bool isDwordAligned(uint64_t ByteOffset) {
3320 return (ByteOffset & 3) == 0;
3321}
3322
3324 uint64_t ByteOffset) {
3325 if (hasSMEMByteOffset(ST))
3326 return ByteOffset;
3327
3328 assert(isDwordAligned(ByteOffset));
3329 return ByteOffset >> 2;
3330}
3331
3332std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
3333 int64_t ByteOffset, bool IsBuffer,
3334 bool HasSOffset) {
3335 // For unbuffered smem loads, it is illegal for the Immediate Offset to be
3336 // negative if the resulting (Offset + (M0 or SOffset or zero) is negative.
3337 // Handle case where SOffset is not present.
3338 if (!IsBuffer && !HasSOffset && ByteOffset < 0 && hasSMRDSignedImmOffset(ST))
3339 return std::nullopt;
3340
3341 if (isGFX12Plus(ST)) // 24 bit signed offsets
3342 return isInt<24>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3343 : std::nullopt;
3344
3345 // The signed version is always a byte offset.
3346 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
3348 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3349 : std::nullopt;
3350 }
3351
3352 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
3353 return std::nullopt;
3354
3355 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3356 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
3357 ? std::optional<int64_t>(EncodedOffset)
3358 : std::nullopt;
3359}
3360
3361std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
3362 int64_t ByteOffset) {
3363 if (!isCI(ST) || !isDwordAligned(ByteOffset))
3364 return std::nullopt;
3365
3366 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3367 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
3368 : std::nullopt;
3369}
3370
3372 if (AMDGPU::isGFX10(ST))
3373 return 12;
3374
3375 if (AMDGPU::isGFX12(ST))
3376 return 24;
3377 return 13;
3378}
3379
3380namespace {
3381
3382struct SourceOfDivergence {
3383 unsigned Intr;
3384};
3385const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
3386
3387struct AlwaysUniform {
3388 unsigned Intr;
3389};
3390const AlwaysUniform *lookupAlwaysUniform(unsigned Intr);
3391
3392#define GET_SourcesOfDivergence_IMPL
3393#define GET_UniformIntrinsics_IMPL
3394#define GET_Gfx9BufferFormat_IMPL
3395#define GET_Gfx10BufferFormat_IMPL
3396#define GET_Gfx11PlusBufferFormat_IMPL
3397
3398#include "AMDGPUGenSearchableTables.inc"
3399
3400} // end anonymous namespace
3401
3402bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
3403 return lookupSourceOfDivergence(IntrID);
3404}
3405
3406bool isIntrinsicAlwaysUniform(unsigned IntrID) {
3407 return lookupAlwaysUniform(IntrID);
3408}
3409
3411 uint8_t NumComponents,
3412 uint8_t NumFormat,
3413 const MCSubtargetInfo &STI) {
3414 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(
3415 BitsPerComp, NumComponents, NumFormat)
3416 : isGFX10(STI)
3417 ? getGfx10BufferFormatInfo(BitsPerComp, NumComponents, NumFormat)
3418 : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat);
3419}
3420
3422 const MCSubtargetInfo &STI) {
3423 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
3424 : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
3425 : getGfx9BufferFormatInfo(Format);
3426}
3427
3429 const MCRegisterInfo &MRI) {
3430 const unsigned VGPRClasses[] = {
3431 AMDGPU::VGPR_16RegClassID, AMDGPU::VGPR_32RegClassID,
3432 AMDGPU::VReg_64RegClassID, AMDGPU::VReg_96RegClassID,
3433 AMDGPU::VReg_128RegClassID, AMDGPU::VReg_160RegClassID,
3434 AMDGPU::VReg_192RegClassID, AMDGPU::VReg_224RegClassID,
3435 AMDGPU::VReg_256RegClassID, AMDGPU::VReg_288RegClassID,
3436 AMDGPU::VReg_320RegClassID, AMDGPU::VReg_352RegClassID,
3437 AMDGPU::VReg_384RegClassID, AMDGPU::VReg_512RegClassID,
3438 AMDGPU::VReg_1024RegClassID};
3439
3440 for (unsigned RCID : VGPRClasses) {
3441 const MCRegisterClass &RC = MRI.getRegClass(RCID);
3442 if (RC.contains(Reg))
3443 return &RC;
3444 }
3445
3446 return nullptr;
3447}
3448
3450 unsigned Enc = MRI.getEncodingValue(Reg);
3451 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
3452 return Idx >> 8;
3453}
3454
3456 const MCRegisterInfo &MRI) {
3457 unsigned Enc = MRI.getEncodingValue(Reg);
3458 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
3459 if (Idx >= 0x100)
3460 return MCRegister();
3461
3463 if (!RC)
3464 return MCRegister();
3465
3466 Idx |= MSBs << 8;
3467 if (RC->getID() == AMDGPU::VGPR_16RegClassID) {
3468 // This class has 2048 registers with interleaved lo16 and hi16.
3469 Idx *= 2;
3471 ++Idx;
3472 }
3473
3474 return RC->getRegister(Idx);
3475}
3476
3477std::pair<const AMDGPU::OpName *, const AMDGPU::OpName *>
3479 static const AMDGPU::OpName VOPOps[4] = {
3480 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2,
3481 AMDGPU::OpName::vdst};
3482 static const AMDGPU::OpName VDSOps[4] = {
3483 AMDGPU::OpName::addr, AMDGPU::OpName::data0, AMDGPU::OpName::data1,
3484 AMDGPU::OpName::vdst};
3485 static const AMDGPU::OpName FLATOps[4] = {
3486 AMDGPU::OpName::vaddr, AMDGPU::OpName::vdata,
3487 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdst};
3488 static const AMDGPU::OpName BUFOps[4] = {
3489 AMDGPU::OpName::vaddr, AMDGPU::OpName::NUM_OPERAND_NAMES,
3490 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdata};
3491 static const AMDGPU::OpName VIMGOps[4] = {
3492 AMDGPU::OpName::vaddr0, AMDGPU::OpName::vaddr1, AMDGPU::OpName::vaddr2,
3493 AMDGPU::OpName::vdata};
3494
3495 // For VOPD instructions MSB of a corresponding Y component operand VGPR
3496 // address is supposed to match X operand, otherwise VOPD shall not be
3497 // combined.
3498 static const AMDGPU::OpName VOPDOpsX[4] = {
3499 AMDGPU::OpName::src0X, AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vsrc2X,
3500 AMDGPU::OpName::vdstX};
3501 static const AMDGPU::OpName VOPDOpsY[4] = {
3502 AMDGPU::OpName::src0Y, AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vsrc2Y,
3503 AMDGPU::OpName::vdstY};
3504
3505 // VOP2 MADMK instructions use src0, imm, src1 scheme.
3506 static const AMDGPU::OpName VOP2MADMKOps[4] = {
3507 AMDGPU::OpName::src0, AMDGPU::OpName::NUM_OPERAND_NAMES,
3508 AMDGPU::OpName::src1, AMDGPU::OpName::vdst};
3509 static const AMDGPU::OpName VOPDFMAMKOpsX[4] = {
3510 AMDGPU::OpName::src0X, AMDGPU::OpName::NUM_OPERAND_NAMES,
3511 AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vdstX};
3512 static const AMDGPU::OpName VOPDFMAMKOpsY[4] = {
3513 AMDGPU::OpName::src0Y, AMDGPU::OpName::NUM_OPERAND_NAMES,
3514 AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vdstY};
3515
3516 unsigned TSFlags = Desc.TSFlags;
3517
3518 if (TSFlags &
3521 switch (Desc.getOpcode()) {
3522 // LD_SCALE operands ignore MSB.
3523 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32:
3524 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32_gfx1250:
3525 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64:
3526 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64_gfx1250:
3527 return {};
3528 case AMDGPU::V_FMAMK_F16:
3529 case AMDGPU::V_FMAMK_F16_t16:
3530 case AMDGPU::V_FMAMK_F16_t16_gfx12:
3531 case AMDGPU::V_FMAMK_F16_fake16:
3532 case AMDGPU::V_FMAMK_F16_fake16_gfx12:
3533 case AMDGPU::V_FMAMK_F32:
3534 case AMDGPU::V_FMAMK_F32_gfx12:
3535 case AMDGPU::V_FMAMK_F64:
3536 case AMDGPU::V_FMAMK_F64_gfx1250:
3537 return {VOP2MADMKOps, nullptr};
3538 default:
3539 break;
3540 }
3541 return {VOPOps, nullptr};
3542 }
3543
3544 if (TSFlags & SIInstrFlags::DS)
3545 return {VDSOps, nullptr};
3546
3547 if (TSFlags & SIInstrFlags::FLAT)
3548 return {FLATOps, nullptr};
3549
3550 if (TSFlags & (SIInstrFlags::MUBUF | SIInstrFlags::MTBUF))
3551 return {BUFOps, nullptr};
3552
3553 if (TSFlags & SIInstrFlags::VIMAGE)
3554 return {VIMGOps, nullptr};
3555
3556 if (AMDGPU::isVOPD(Desc.getOpcode())) {
3557 auto [OpX, OpY] = getVOPDComponents(Desc.getOpcode());
3558 return {(OpX == AMDGPU::V_FMAMK_F32) ? VOPDFMAMKOpsX : VOPDOpsX,
3559 (OpY == AMDGPU::V_FMAMK_F32) ? VOPDFMAMKOpsY : VOPDOpsY};
3560 }
3561
3562 assert(!(TSFlags & SIInstrFlags::MIMG));
3563
3564 if (TSFlags & (SIInstrFlags::VSAMPLE | SIInstrFlags::EXP))
3565 llvm_unreachable("Sample and export VGPR lowering is not implemented and"
3566 " these instructions are not expected on gfx1250");
3567
3568 return {};
3569}
3570
3571bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode) {
3572 uint64_t TSFlags = MII.get(Opcode).TSFlags;
3573
3574 if (TSFlags & SIInstrFlags::SMRD)
3575 return !getSMEMIsBuffer(Opcode);
3576 if (!(TSFlags & SIInstrFlags::FLAT))
3577 return false;
3578
3579 // Only SV and SVS modes are supported.
3580 if (TSFlags & SIInstrFlags::FlatScratch)
3581 return hasNamedOperand(Opcode, OpName::vaddr);
3582
3583 // Only GVS mode is supported.
3584 return hasNamedOperand(Opcode, OpName::vaddr) &&
3585 hasNamedOperand(Opcode, OpName::saddr);
3586
3587 return false;
3588}
3589
3590bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
3591 const MCSubtargetInfo &ST) {
3592 for (auto OpName : {OpName::vdst, OpName::src0, OpName::src1, OpName::src2}) {
3593 int Idx = getNamedOperandIdx(OpDesc.getOpcode(), OpName);
3594 if (Idx == -1)
3595 continue;
3596
3597 const MCOperandInfo &OpInfo = OpDesc.operands()[Idx];
3598 int16_t RegClass = MII.getOpRegClassID(
3599 OpInfo, ST.getHwMode(MCSubtargetInfo::HwMode_RegInfo));
3600 if (RegClass == AMDGPU::VReg_64RegClassID ||
3601 RegClass == AMDGPU::VReg_64_Align2RegClassID)
3602 return true;
3603 }
3604
3605 return false;
3606}
3607
3608bool isDPALU_DPP32BitOpc(unsigned Opc) {
3609 switch (Opc) {
3610 case AMDGPU::V_MUL_LO_U32_e64:
3611 case AMDGPU::V_MUL_LO_U32_e64_dpp:
3612 case AMDGPU::V_MUL_LO_U32_e64_dpp_gfx1250:
3613 case AMDGPU::V_MUL_HI_U32_e64:
3614 case AMDGPU::V_MUL_HI_U32_e64_dpp:
3615 case AMDGPU::V_MUL_HI_U32_e64_dpp_gfx1250:
3616 case AMDGPU::V_MUL_HI_I32_e64:
3617 case AMDGPU::V_MUL_HI_I32_e64_dpp:
3618 case AMDGPU::V_MUL_HI_I32_e64_dpp_gfx1250:
3619 case AMDGPU::V_MAD_U32_e64:
3620 case AMDGPU::V_MAD_U32_e64_dpp:
3621 case AMDGPU::V_MAD_U32_e64_dpp_gfx1250:
3622 return true;
3623 default:
3624 return false;
3625 }
3626}
3627
3628bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
3629 const MCSubtargetInfo &ST) {
3630 if (!ST.hasFeature(AMDGPU::FeatureDPALU_DPP))
3631 return false;
3632
3633 if (isDPALU_DPP32BitOpc(OpDesc.getOpcode()))
3634 return ST.hasFeature(AMDGPU::FeatureGFX1250Insts);
3635
3636 return hasAny64BitVGPROperands(OpDesc, MII, ST);
3637}
3638
3640 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
3641 return 64;
3642 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
3643 return 128;
3644 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
3645 return 320;
3646 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
3647 return 512;
3648 return 64; // In sync with getAddressableLocalMemorySize
3649}
3650
3651bool isPackedFP32Inst(unsigned Opc) {
3652 switch (Opc) {
3653 case AMDGPU::V_PK_ADD_F32:
3654 case AMDGPU::V_PK_ADD_F32_gfx12:
3655 case AMDGPU::V_PK_MUL_F32:
3656 case AMDGPU::V_PK_MUL_F32_gfx12:
3657 case AMDGPU::V_PK_FMA_F32:
3658 case AMDGPU::V_PK_FMA_F32_gfx12:
3659 return true;
3660 default:
3661 return false;
3662 }
3663}
3664
3665const std::array<unsigned, 3> &ClusterDimsAttr::getDims() const {
3666 assert(isFixedDims() && "expect kind to be FixedDims");
3667 return Dims;
3668}
3669
3670std::string ClusterDimsAttr::to_string() const {
3671 SmallString<10> Buffer;
3672 raw_svector_ostream OS(Buffer);
3673
3674 switch (getKind()) {
3675 case Kind::Unknown:
3676 return "";
3677 case Kind::NoCluster: {
3678 OS << EncoNoCluster << ',' << EncoNoCluster << ',' << EncoNoCluster;
3679 return Buffer.c_str();
3680 }
3681 case Kind::VariableDims: {
3682 OS << EncoVariableDims << ',' << EncoVariableDims << ','
3683 << EncoVariableDims;
3684 return Buffer.c_str();
3685 }
3686 case Kind::FixedDims: {
3687 OS << Dims[0] << ',' << Dims[1] << ',' << Dims[2];
3688 return Buffer.c_str();
3689 }
3690 }
3691 llvm_unreachable("Unknown ClusterDimsAttr kind");
3692}
3693
3695 std::optional<SmallVector<unsigned>> Attr =
3696 getIntegerVecAttribute(F, "amdgpu-cluster-dims", /*Size=*/3);
3698
3699 if (!Attr.has_value())
3700 AttrKind = Kind::Unknown;
3701 else if (all_of(*Attr, [](unsigned V) { return V == EncoNoCluster; }))
3702 AttrKind = Kind::NoCluster;
3703 else if (all_of(*Attr, [](unsigned V) { return V == EncoVariableDims; }))
3704 AttrKind = Kind::VariableDims;
3705
3706 ClusterDimsAttr A(AttrKind);
3707 if (AttrKind == Kind::FixedDims)
3708 A.Dims = {(*Attr)[0], (*Attr)[1], (*Attr)[2]};
3709
3710 return A;
3711}
3712
3713} // namespace AMDGPU
3714
3717 switch (S) {
3719 OS << "Unsupported";
3720 break;
3722 OS << "Any";
3723 break;
3725 OS << "Off";
3726 break;
3728 OS << "On";
3729 break;
3730 }
3731 return OS;
3732}
3733
3734} // namespace llvm
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static llvm::cl::opt< unsigned > DefaultAMDHSACodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::init(llvm::AMDGPU::AMDHSA_COV6), llvm::cl::desc("Set default AMDHSA Code Object Version (module flag " "or asm directive still take priority if present)"))
#define MAP_REG2REG
Provides AMDGPU specific target descriptions.
MC layer struct for AMDGPUMCKernelCodeT, provides MCExpr functionality where required.
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define RegName(no)
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
#define T
uint64_t High
if(PassOpts->AAPipeline)
#define S_00B848_MEM_ORDERED(x)
Definition SIDefines.h:1237
#define S_00B848_WGP_MODE(x)
Definition SIDefines.h:1234
#define S_00B848_FWD_PROGRESS(x)
Definition SIDefines.h:1240
unsigned unsigned DefaultVal
This file contains some functions that are useful when dealing with strings.
static const int BlockSize
Definition TarWriter.cpp:33
static const uint32_t IV[8]
Definition blake3_impl.h:83
static ClusterDimsAttr get(const Function &F)
const std::array< unsigned, 3 > & getDims() const
TargetIDSetting getXnackSetting() const
AMDGPUTargetID(const MCSubtargetInfo &STI)
void setTargetIDFromTargetIDStream(StringRef TargetID)
TargetIDSetting getSramEccSetting() const
unsigned getIndexInParsedOperands(unsigned CompOprIdx) const
unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const
std::optional< unsigned > getInvalidCompOperandIndex(std::function< MCRegister(unsigned, unsigned)> GetRegIdx, const MCRegisterInfo &MRI, bool SkipSrc=false, bool AllowSameVGPR=false, bool VOPD3=false) const
std::array< MCRegister, Component::MAX_OPR_NUM > RegIndices
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
constexpr bool test(unsigned I) const
unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
A helper class to return the specified delimiter string after the first invocation of operator String...
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayStore() const
Return true if this instruction could possibly modify memory.
bool mayLoad() const
Return true if this instruction could possibly read memory.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
unsigned getOpcode() const
Return the opcode number for this descriptor.
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
int16_t getOpRegClassID(const MCOperandInfo &OpInfo, unsigned HwModeId) const
Return the ID of the register class to use for OpInfo, for the active HwMode HwModeId.
Definition MCInstrInfo.h:80
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
StringRef getCPU() const
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
const char * c_str()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
Definition StringRef.h:864
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
Manages the enabling and disabling of subtarget specific features.
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
OSType getOS() const
Get the parsed operating system type of this triple.
Definition Triple.h:426
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:417
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Definition Triple.h:932
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
unsigned decodeFieldVaVcc(unsigned Encoded)
unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc)
unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt)
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc)
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned decodeFieldSaSdst(unsigned Encoded)
unsigned decodeFieldVaSdst(unsigned Encoded)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaSsrc(unsigned Encoded)
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
const CustomOperandVal DepCtrInfo[]
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
unsigned decodeFieldVaVdst(unsigned Encoded)
unsigned decodeFieldHoldCnt(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
static constexpr ExpTgt ExpTgtInfo[]
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
unsigned getTgtId(const StringRef Name)
constexpr uint32_t VersionMinor
HSA metadata minor version.
constexpr uint32_t VersionMajor
HSA metadata major version.
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
unsigned getArchVGPRAllocGranule()
For subtargets with a unified VGPR file and mixed ArchVGPR/AGPR usage, returns the allocation granule...
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize)
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getAllocatedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, std::optional< bool > EnableWavefrontSize32)
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves, AMDGPUSubtarget::Generation Gen)
static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs, unsigned Granule)
unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI)
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize)
StringLiteral const UfmtSymbolicGFX11[]
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI)
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX10[]
StringLiteral const DfmtSymbolic[]
static StringLiteral const * getNfmtLookupTable(const MCSubtargetInfo &STI)
bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI)
StringLiteral const NfmtSymbolicGFX10[]
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, const MCSubtargetInfo &STI)
StringRef getDfmtName(unsigned Id)
int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt)
int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI)
bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI)
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX11[]
StringLiteral const NfmtSymbolicVI[]
StringLiteral const NfmtSymbolicSICI[]
int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI)
int64_t getDfmt(const StringRef Name)
StringLiteral const UfmtSymbolicGFX10[]
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
constexpr unsigned VOPD_VGPR_BANK_MASKS[]
constexpr unsigned COMPONENTS_NUM
constexpr unsigned VOPD3_VGPR_BANK_MASKS[]
bool isPackedFP32Inst(unsigned Opc)
bool isGCN3Encoding(const MCSubtargetInfo &STI)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
bool isInlineValue(MCRegister Reg)
bool isGFX10_GFX11(const MCSubtargetInfo &STI)
bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Storecnt)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
bool isVOPCAsmOnly(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool getMTBUFHasSrsrc(unsigned Opc)
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool getWMMAIsXDL(unsigned Opc)
uint8_t wmmaScaleF8F6F4FormatToNumRegs(unsigned Fmt)
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isGFX10Before1030(const MCSubtargetInfo &STI)
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
const int OPR_ID_UNSUPPORTED
bool shouldEmitConstantsToTextSection(const Triple &TT)
bool isInlinableLiteralV2I16(uint32_t Literal)
bool isDPMACCInstruction(unsigned Opc)
int getMTBUFElements(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
unsigned getTemporalHintType(const MCInstrDesc TID)
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
bool isGFX10(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV216(bool IsFloat, uint32_t Literal)
FPType getFPDstSelType(unsigned Opc)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool hasA16(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
bool isGFX12Plus(const MCSubtargetInfo &STI)
unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler)
const MCRegisterClass * getVGPRPhysRegClass(MCRegister Reg, const MCRegisterInfo &MRI)
bool hasPackedD16(const MCSubtargetInfo &STI)
unsigned getStorecntBitMask(const IsaVersion &Version)
unsigned getLdsDwGranularity(const MCSubtargetInfo &ST)
bool isGFX940(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isHsaAbi(const MCSubtargetInfo &STI)
bool isGFX11(const MCSubtargetInfo &STI)
const int OPR_VAL_INVALID
bool getSMEMIsBuffer(unsigned Opc)
bool isGFX10_3_GFX11(const MCSubtargetInfo &STI)
bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val)
Checks if Val is inside MD, a !range-like metadata.
uint8_t mfmaScaleF8F6F4FormatToNumRegs(unsigned EncodingVal)
unsigned getVOPDOpcode(unsigned Opc, bool VOPD3)
bool isGroupSegment(const GlobalValue *GV)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
bool getMTBUFHasSoffset(unsigned Opc)
bool hasXNACK(const MCSubtargetInfo &STI)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
static unsigned getCombinedCountBitMask(const IsaVersion &Version, bool IsStore)
CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
bool isVOPC64DPP(unsigned Opc)
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool getMAIIsGFX940XDL(unsigned Opc)
bool isSI(const MCSubtargetInfo &STI)
unsigned getDefaultAMDHSACodeObjectVersion()
bool isReadOnlySegment(const GlobalValue *GV)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
int getMUBUFBaseOpcode(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getWaitcntBitMask(const IsaVersion &Version)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool isGFX9(const MCSubtargetInfo &STI)
bool isDPALU_DPP32BitOpc(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
static bool isDwordAligned(uint64_t ByteOffset)
unsigned getVOPDEncodingFamily(const MCSubtargetInfo &ST)
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool getHasColorExport(const Function &F)
int getMTBUFBaseOpcode(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
unsigned getSamplecntBitMask(const IsaVersion &Version)
unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion)
std::tuple< char, unsigned, unsigned > parseAsmPhysRegName(StringRef RegName)
Returns a valid charcode or 0 in the first entry if this is a valid physical register name.
bool hasSRAMECC(const MCSubtargetInfo &STI)
bool getHasDepthExport(const Function &F)
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
bool getMUBUFHasVAddr(unsigned Opc)
bool isTrue16Inst(unsigned Opc)
unsigned getVGPREncodingMSBs(MCRegister Reg, const MCRegisterInfo &MRI)
std::pair< unsigned, unsigned > getVOPDComponents(unsigned VOPDOpcode)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getInitialPSInputAddr(const Function &F)
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
bool isAsyncStore(unsigned Opc)
unsigned getDynamicVGPRBlockSize(const Function &F)
unsigned getKmcntBitMask(const IsaVersion &Version)
MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
unsigned getVmcntBitMask(const IsaVersion &Version)
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
bool hasMAIInsts(const MCSubtargetInfo &STI)
unsigned getBitOp2(unsigned Opc)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
unsigned getXcntBitMask(const IsaVersion &Version)
bool isGenericAtomic(unsigned Opc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
bool isGFX8Plus(const MCSubtargetInfo &STI)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
bool getMUBUFTfe(unsigned Opc)
unsigned getBvhcntBitMask(const IsaVersion &Version)
bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
bool hasMIMG_R128(const MCSubtargetInfo &STI)
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
bool getMUBUFHasSoffset(unsigned Opc)
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
std::tuple< char, unsigned, unsigned > parseAsmConstraintPhysReg(StringRef Constraint)
Returns a valid charcode or 0 in the first entry if this is a valid physical register constraint.
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion)
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Loadcnt)
bool isGFX10Plus(const MCSubtargetInfo &STI)
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
static bool isValidRegPrefix(char C)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer, bool HasSOffset)
bool isGlobalSegment(const GlobalValue *GV)
int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition SIDefines.h:231
@ OPERAND_REG_INLINE_C_LAST
Definition SIDefines.h:254
@ OPERAND_REG_IMM_V2FP16
Definition SIDefines.h:209
@ OPERAND_REG_INLINE_C_FP64
Definition SIDefines.h:222
@ OPERAND_REG_INLINE_C_BF16
Definition SIDefines.h:219
@ OPERAND_REG_INLINE_C_V2BF16
Definition SIDefines.h:224
@ OPERAND_REG_IMM_V2INT16
Definition SIDefines.h:210
@ OPERAND_REG_IMM_BF16
Definition SIDefines.h:206
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
Definition SIDefines.h:201
@ OPERAND_REG_IMM_V2BF16
Definition SIDefines.h:208
@ OPERAND_REG_INLINE_AC_FIRST
Definition SIDefines.h:256
@ OPERAND_REG_IMM_FP16
Definition SIDefines.h:207
@ OPERAND_REG_IMM_NOINLINE_V2FP16
Definition SIDefines.h:211
@ OPERAND_REG_IMM_FP64
Definition SIDefines.h:205
@ OPERAND_REG_INLINE_C_V2FP16
Definition SIDefines.h:225
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
Definition SIDefines.h:236
@ OPERAND_REG_INLINE_AC_FP32
Definition SIDefines.h:237
@ OPERAND_REG_IMM_V2INT32
Definition SIDefines.h:212
@ OPERAND_REG_IMM_FP32
Definition SIDefines.h:204
@ OPERAND_REG_INLINE_C_FIRST
Definition SIDefines.h:253
@ OPERAND_REG_INLINE_C_FP32
Definition SIDefines.h:221
@ OPERAND_REG_INLINE_AC_LAST
Definition SIDefines.h:257
@ OPERAND_REG_INLINE_C_INT32
Definition SIDefines.h:217
@ OPERAND_REG_INLINE_C_V2INT16
Definition SIDefines.h:223
@ OPERAND_REG_IMM_V2FP32
Definition SIDefines.h:213
@ OPERAND_REG_INLINE_AC_FP64
Definition SIDefines.h:238
@ OPERAND_REG_INLINE_C_FP16
Definition SIDefines.h:220
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition SIDefines.h:228
raw_ostream & operator<<(raw_ostream &OS, const AMDGPU::Waitcnt &Wait)
void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &KernelCode, const MCSubtargetInfo *STI)
bool isNotGFX9Plus(const MCSubtargetInfo &STI)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool hasGDS(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI)
const int OPR_ID_DUPLICATE
bool isVOPD(unsigned Opc)
VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool isGFX1250(const MCSubtargetInfo &STI)
int getMCOpcode(uint16_t Opcode, unsigned Gen)
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
bool isVI(const MCSubtargetInfo &STI)
bool isTensorStore(unsigned Opc)
bool getMUBUFIsBufferInv(unsigned Opc)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool supportsWGP(const MCSubtargetInfo &STI)
bool isMAC(unsigned Opc)
bool isCI(const MCSubtargetInfo &STI)
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
bool getVOP2IsSingle(unsigned Opc)
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
const int OPR_ID_UNKNOWN
unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion)
SmallVector< unsigned > getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size, unsigned DefaultVal)
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
bool isNotGFX12Plus(const MCSubtargetInfo &STI)
bool getMTBUFHasVAddr(unsigned Opc)
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
unsigned getLoadcntBitMask(const IsaVersion &Version)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
bool hasVOPD(const MCSubtargetInfo &STI)
int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily, bool VOPD3)
static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Dscnt)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion)
bool isGFX9_GFX10_GFX11(const MCSubtargetInfo &STI)
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
int getMUBUFElements(unsigned Opc)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
bool isPermlane16(unsigned Opc)
bool getMUBUFHasSrsrc(unsigned Opc)
unsigned getDscntBitMask(const IsaVersion &Version)
bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ ELFABIVERSION_AMDGPU_HSA_V4
Definition ELF.h:384
@ ELFABIVERSION_AMDGPU_HSA_V5
Definition ELF.h:385
@ ELFABIVERSION_AMDGPU_HSA_V6
Definition ELF.h:386
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
@ Wait
Definition Threading.h:60
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:546
std::string utostr(uint64_t X, bool isNeg=false)
Op::Description Desc
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition MathExtras.h:150
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition MathExtras.h:155
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
To bit_cast(const From &from) noexcept
Definition bit.h:90
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
@ AlwaysUniform
The result values are always uniform.
Definition Uniformity.h:23
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
#define N
AMD Kernel Code Object (amd_kernel_code_t).
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.