LLVM  15.0.0git
AMDGPUBaseInfo.cpp
Go to the documentation of this file.
1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPU.h"
11 #include "AMDGPUAsmUtils.h"
12 #include "AMDKernelCodeT.h"
13 #include "GCNSubtarget.h"
15 #include "llvm/BinaryFormat/ELF.h"
16 #include "llvm/IR/Attributes.h"
17 #include "llvm/IR/Function.h"
18 #include "llvm/IR/GlobalValue.h"
19 #include "llvm/IR/IntrinsicsAMDGPU.h"
20 #include "llvm/IR/IntrinsicsR600.h"
21 #include "llvm/IR/LLVMContext.h"
26 
27 #define GET_INSTRINFO_NAMED_OPS
28 #define GET_INSTRMAP_INFO
29 #include "AMDGPUGenInstrInfo.inc"
30 
32  AmdhsaCodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden,
33  llvm::cl::desc("AMDHSA Code Object Version"),
34  llvm::cl::init(4));
35 
36 // TODO-GFX11: Remove this when full 16-bit codegen is implemented.
38  LimitTo128VGPRs("amdgpu-limit-to-128-vgprs", llvm::cl::Hidden,
39  llvm::cl::desc("Never use more than 128 VGPRs"));
40 
41 namespace {
42 
43 /// \returns Bit mask for given bit \p Shift and bit \p Width.
44 unsigned getBitMask(unsigned Shift, unsigned Width) {
45  return ((1 << Width) - 1) << Shift;
46 }
47 
48 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
49 ///
50 /// \returns Packed \p Dst.
51 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
52  unsigned Mask = getBitMask(Shift, Width);
53  return ((Src << Shift) & Mask) | (Dst & ~Mask);
54 }
55 
56 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
57 ///
58 /// \returns Unpacked bits.
59 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
60  return (Src & getBitMask(Shift, Width)) >> Shift;
61 }
62 
63 /// \returns Vmcnt bit shift (lower bits).
64 unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
65  return VersionMajor >= 11 ? 10 : 0;
66 }
67 
68 /// \returns Vmcnt bit width (lower bits).
69 unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
70  return VersionMajor >= 11 ? 6 : 4;
71 }
72 
73 /// \returns Expcnt bit shift.
74 unsigned getExpcntBitShift(unsigned VersionMajor) {
75  return VersionMajor >= 11 ? 0 : 4;
76 }
77 
78 /// \returns Expcnt bit width.
79 unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
80 
81 /// \returns Lgkmcnt bit shift.
82 unsigned getLgkmcntBitShift(unsigned VersionMajor) {
83  return VersionMajor >= 11 ? 4 : 8;
84 }
85 
86 /// \returns Lgkmcnt bit width.
87 unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
88  return VersionMajor >= 10 ? 6 : 4;
89 }
90 
91 /// \returns Vmcnt bit shift (higher bits).
92 unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
93 
94 /// \returns Vmcnt bit width (higher bits).
95 unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
96  return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
97 }
98 
99 } // end namespace anonymous
100 
101 namespace llvm {
102 
103 namespace AMDGPU {
104 
106  if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA)
107  return None;
108 
109  switch (AmdhsaCodeObjectVersion) {
110  case 2:
112  case 3:
114  case 4:
116  case 5:
118  default:
119  report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") +
121  }
122 }
123 
125  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
126  return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2;
127  return false;
128 }
129 
131  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
132  return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3;
133  return false;
134 }
135 
137  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
138  return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4;
139  return false;
140 }
141 
143  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
144  return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5;
145  return false;
146 }
147 
149  return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) ||
150  isHsaAbiVersion5(STI);
151 }
152 
155 }
156 
158  switch (AmdhsaCodeObjectVersion) {
159  case 2:
160  case 3:
161  case 4:
162  return 48;
163  case 5:
165  default:
166  llvm_unreachable("Unexpected code object version");
167  return 0;
168  }
169 }
170 
171 
172 // FIXME: All such magic numbers about the ABI should be in a
173 // central TD file.
175  switch (AmdhsaCodeObjectVersion) {
176  case 2:
177  case 3:
178  case 4:
179  return 24;
180  case 5:
182  default:
183  llvm_unreachable("Unexpected code object version");
184  return 0;
185  }
186 }
187 
188 #define GET_MIMGBaseOpcodesTable_IMPL
189 #define GET_MIMGDimInfoTable_IMPL
190 #define GET_MIMGInfoTable_IMPL
191 #define GET_MIMGLZMappingTable_IMPL
192 #define GET_MIMGMIPMappingTable_IMPL
193 #define GET_MIMGBiasMappingTable_IMPL
194 #define GET_MIMGOffsetMappingTable_IMPL
195 #define GET_MIMGG16MappingTable_IMPL
196 #define GET_MAIInstInfoTable_IMPL
197 #include "AMDGPUGenSearchableTables.inc"
198 
199 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
200  unsigned VDataDwords, unsigned VAddrDwords) {
201  const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
202  VDataDwords, VAddrDwords);
203  return Info ? Info->Opcode : -1;
204 }
205 
206 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) {
207  const MIMGInfo *Info = getMIMGInfo(Opc);
208  return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
209 }
210 
211 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
212  const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
213  const MIMGInfo *NewInfo =
214  getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
215  NewChannels, OrigInfo->VAddrDwords);
216  return NewInfo ? NewInfo->Opcode : -1;
217 }
218 
219 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
220  const MIMGDimInfo *Dim, bool IsA16,
221  bool IsG16Supported) {
222  unsigned AddrWords = BaseOpcode->NumExtraArgs;
223  unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
224  (BaseOpcode->LodOrClampOrMip ? 1 : 0);
225  if (IsA16)
226  AddrWords += divideCeil(AddrComponents, 2);
227  else
228  AddrWords += AddrComponents;
229 
230  // Note: For subtargets that support A16 but not G16, enabling A16 also
231  // enables 16 bit gradients.
232  // For subtargets that support A16 (operand) and G16 (done with a different
233  // instruction encoding), they are independent.
234 
235  if (BaseOpcode->Gradients) {
236  if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
237  // There are two gradients per coordinate, we pack them separately.
238  // For the 3d case,
239  // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
240  AddrWords += alignTo<2>(Dim->NumGradients / 2);
241  else
242  AddrWords += Dim->NumGradients;
243  }
244  return AddrWords;
245 }
246 
247 struct MUBUFInfo {
250  uint8_t elements;
251  bool has_vaddr;
252  bool has_srsrc;
255 };
256 
257 struct MTBUFInfo {
260  uint8_t elements;
261  bool has_vaddr;
262  bool has_srsrc;
264 };
265 
266 struct SMInfo {
268  bool IsBuffer;
269 };
270 
271 struct VOPInfo {
273  bool IsSingle;
274 };
275 
278 };
279 
280 #define GET_MTBUFInfoTable_DECL
281 #define GET_MTBUFInfoTable_IMPL
282 #define GET_MUBUFInfoTable_DECL
283 #define GET_MUBUFInfoTable_IMPL
284 #define GET_SMInfoTable_DECL
285 #define GET_SMInfoTable_IMPL
286 #define GET_VOP1InfoTable_DECL
287 #define GET_VOP1InfoTable_IMPL
288 #define GET_VOP2InfoTable_DECL
289 #define GET_VOP2InfoTable_IMPL
290 #define GET_VOP3InfoTable_DECL
291 #define GET_VOP3InfoTable_IMPL
292 #define GET_VOPC64DPPTable_DECL
293 #define GET_VOPC64DPPTable_IMPL
294 #define GET_VOPC64DPP8Table_DECL
295 #define GET_VOPC64DPP8Table_IMPL
296 #include "AMDGPUGenSearchableTables.inc"
297 
298 int getMTBUFBaseOpcode(unsigned Opc) {
299  const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
300  return Info ? Info->BaseOpcode : -1;
301 }
302 
303 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
304  const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
305  return Info ? Info->Opcode : -1;
306 }
307 
308 int getMTBUFElements(unsigned Opc) {
309  const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
310  return Info ? Info->elements : 0;
311 }
312 
313 bool getMTBUFHasVAddr(unsigned Opc) {
314  const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
315  return Info ? Info->has_vaddr : false;
316 }
317 
318 bool getMTBUFHasSrsrc(unsigned Opc) {
319  const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
320  return Info ? Info->has_srsrc : false;
321 }
322 
323 bool getMTBUFHasSoffset(unsigned Opc) {
324  const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
325  return Info ? Info->has_soffset : false;
326 }
327 
328 int getMUBUFBaseOpcode(unsigned Opc) {
329  const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
330  return Info ? Info->BaseOpcode : -1;
331 }
332 
333 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
334  const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
335  return Info ? Info->Opcode : -1;
336 }
337 
338 int getMUBUFElements(unsigned Opc) {
339  const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
340  return Info ? Info->elements : 0;
341 }
342 
343 bool getMUBUFHasVAddr(unsigned Opc) {
344  const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
345  return Info ? Info->has_vaddr : false;
346 }
347 
348 bool getMUBUFHasSrsrc(unsigned Opc) {
349  const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
350  return Info ? Info->has_srsrc : false;
351 }
352 
353 bool getMUBUFHasSoffset(unsigned Opc) {
354  const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
355  return Info ? Info->has_soffset : false;
356 }
357 
358 bool getMUBUFIsBufferInv(unsigned Opc) {
359  const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
360  return Info ? Info->IsBufferInv : false;
361 }
362 
363 bool getSMEMIsBuffer(unsigned Opc) {
364  const SMInfo *Info = getSMEMOpcodeHelper(Opc);
365  return Info ? Info->IsBuffer : false;
366 }
367 
368 bool getVOP1IsSingle(unsigned Opc) {
369  const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
370  return Info ? Info->IsSingle : false;
371 }
372 
373 bool getVOP2IsSingle(unsigned Opc) {
374  const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
375  return Info ? Info->IsSingle : false;
376 }
377 
378 bool getVOP3IsSingle(unsigned Opc) {
379  const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
380  return Info ? Info->IsSingle : false;
381 }
382 
383 bool isVOPC64DPP(unsigned Opc) {
384  return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
385 }
386 
387 bool getMAIIsDGEMM(unsigned Opc) {
388  const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
389  return Info ? Info->is_dgemm : false;
390 }
391 
392 bool getMAIIsGFX940XDL(unsigned Opc) {
393  const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
394  return Info ? Info->is_gfx940_xdl : false;
395 }
396 
397 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any
398 // header files, so we need to wrap it in a function that takes unsigned
399 // instead.
400 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
401  return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
402 }
403 
404 namespace IsaInfo {
405 
407  : STI(STI), XnackSetting(TargetIDSetting::Any),
408  SramEccSetting(TargetIDSetting::Any) {
409  if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
410  XnackSetting = TargetIDSetting::Unsupported;
411  if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
412  SramEccSetting = TargetIDSetting::Unsupported;
413 }
414 
416  // Check if xnack or sramecc is explicitly enabled or disabled. In the
417  // absence of the target features we assume we must generate code that can run
418  // in any environment.
419  SubtargetFeatures Features(FS);
420  Optional<bool> XnackRequested;
421  Optional<bool> SramEccRequested;
422 
423  for (const std::string &Feature : Features.getFeatures()) {
424  if (Feature == "+xnack")
425  XnackRequested = true;
426  else if (Feature == "-xnack")
427  XnackRequested = false;
428  else if (Feature == "+sramecc")
429  SramEccRequested = true;
430  else if (Feature == "-sramecc")
431  SramEccRequested = false;
432  }
433 
434  bool XnackSupported = isXnackSupported();
435  bool SramEccSupported = isSramEccSupported();
436 
437  if (XnackRequested) {
438  if (XnackSupported) {
439  XnackSetting =
440  *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
441  } else {
442  // If a specific xnack setting was requested and this GPU does not support
443  // xnack emit a warning. Setting will remain set to "Unsupported".
444  if (*XnackRequested) {
445  errs() << "warning: xnack 'On' was requested for a processor that does "
446  "not support it!\n";
447  } else {
448  errs() << "warning: xnack 'Off' was requested for a processor that "
449  "does not support it!\n";
450  }
451  }
452  }
453 
454  if (SramEccRequested) {
455  if (SramEccSupported) {
456  SramEccSetting =
457  *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
458  } else {
459  // If a specific sramecc setting was requested and this GPU does not
460  // support sramecc emit a warning. Setting will remain set to
461  // "Unsupported".
462  if (*SramEccRequested) {
463  errs() << "warning: sramecc 'On' was requested for a processor that "
464  "does not support it!\n";
465  } else {
466  errs() << "warning: sramecc 'Off' was requested for a processor that "
467  "does not support it!\n";
468  }
469  }
470  }
471 }
472 
473 static TargetIDSetting
475  if (FeatureString.endswith("-"))
476  return TargetIDSetting::Off;
477  if (FeatureString.endswith("+"))
478  return TargetIDSetting::On;
479 
480  llvm_unreachable("Malformed feature string");
481 }
482 
484  SmallVector<StringRef, 3> TargetIDSplit;
485  TargetID.split(TargetIDSplit, ':');
486 
487  for (const auto &FeatureString : TargetIDSplit) {
488  if (FeatureString.startswith("xnack"))
489  XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
490  if (FeatureString.startswith("sramecc"))
491  SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
492  }
493 }
494 
495 std::string AMDGPUTargetID::toString() const {
496  std::string StringRep;
497  raw_string_ostream StreamRep(StringRep);
498 
499  auto TargetTriple = STI.getTargetTriple();
500  auto Version = getIsaVersion(STI.getCPU());
501 
502  StreamRep << TargetTriple.getArchName() << '-'
503  << TargetTriple.getVendorName() << '-'
504  << TargetTriple.getOSName() << '-'
505  << TargetTriple.getEnvironmentName() << '-';
506 
507  std::string Processor;
508  // TODO: Following else statement is present here because we used various
509  // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
510  // Remove once all aliases are removed from GCNProcessors.td.
511  if (Version.Major >= 9)
512  Processor = STI.getCPU().str();
513  else
514  Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
515  Twine(Version.Stepping))
516  .str();
517 
518  std::string Features;
519  if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
520  switch (*HsaAbiVersion) {
522  // Code object V2 only supported specific processors and had fixed
523  // settings for the XNACK.
524  if (Processor == "gfx600") {
525  } else if (Processor == "gfx601") {
526  } else if (Processor == "gfx602") {
527  } else if (Processor == "gfx700") {
528  } else if (Processor == "gfx701") {
529  } else if (Processor == "gfx702") {
530  } else if (Processor == "gfx703") {
531  } else if (Processor == "gfx704") {
532  } else if (Processor == "gfx705") {
533  } else if (Processor == "gfx801") {
534  if (!isXnackOnOrAny())
536  "AMD GPU code object V2 does not support processor " +
537  Twine(Processor) + " without XNACK");
538  } else if (Processor == "gfx802") {
539  } else if (Processor == "gfx803") {
540  } else if (Processor == "gfx805") {
541  } else if (Processor == "gfx810") {
542  if (!isXnackOnOrAny())
544  "AMD GPU code object V2 does not support processor " +
545  Twine(Processor) + " without XNACK");
546  } else if (Processor == "gfx900") {
547  if (isXnackOnOrAny())
548  Processor = "gfx901";
549  } else if (Processor == "gfx902") {
550  if (isXnackOnOrAny())
551  Processor = "gfx903";
552  } else if (Processor == "gfx904") {
553  if (isXnackOnOrAny())
554  Processor = "gfx905";
555  } else if (Processor == "gfx906") {
556  if (isXnackOnOrAny())
557  Processor = "gfx907";
558  } else if (Processor == "gfx90c") {
559  if (isXnackOnOrAny())
561  "AMD GPU code object V2 does not support processor " +
562  Twine(Processor) + " with XNACK being ON or ANY");
563  } else {
565  "AMD GPU code object V2 does not support processor " +
566  Twine(Processor));
567  }
568  break;
570  // xnack.
571  if (isXnackOnOrAny())
572  Features += "+xnack";
573  // In code object v2 and v3, "sramecc" feature was spelled with a
574  // hyphen ("sram-ecc").
575  if (isSramEccOnOrAny())
576  Features += "+sram-ecc";
577  break;
580  // sramecc.
582  Features += ":sramecc-";
584  Features += ":sramecc+";
585  // xnack.
587  Features += ":xnack-";
588  else if (getXnackSetting() == TargetIDSetting::On)
589  Features += ":xnack+";
590  break;
591  default:
592  break;
593  }
594  }
595 
596  StreamRep << Processor << Features;
597 
598  StreamRep.flush();
599  return StringRep;
600 }
601 
602 unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
603  if (STI->getFeatureBits().test(FeatureWavefrontSize16))
604  return 16;
605  if (STI->getFeatureBits().test(FeatureWavefrontSize32))
606  return 32;
607 
608  return 64;
609 }
610 
611 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) {
612  if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
613  return 32768;
614  if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
615  return 65536;
616 
617  return 0;
618 }
619 
620 unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
621  // "Per CU" really means "per whatever functional block the waves of a
622  // workgroup must share". For gfx10 in CU mode this is the CU, which contains
623  // two SIMDs.
624  if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
625  return 2;
626  // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains
627  // two CUs, so a total of four SIMDs.
628  return 4;
629 }
630 
632  unsigned FlatWorkGroupSize) {
633  assert(FlatWorkGroupSize != 0);
634  if (STI->getTargetTriple().getArch() != Triple::amdgcn)
635  return 8;
636  unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
637  if (N == 1)
638  return 40;
639  N = 40 / N;
640  return std::min(N, 16u);
641 }
642 
643 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
644  return 1;
645 }
646 
647 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
648  // FIXME: Need to take scratch memory into account.
649  if (isGFX90A(*STI))
650  return 8;
651  if (!isGFX10Plus(*STI))
652  return 10;
653  return hasGFX10_3Insts(*STI) ? 16 : 20;
654 }
655 
657  unsigned FlatWorkGroupSize) {
658  return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
659  getEUsPerCU(STI));
660 }
661 
663  return 1;
664 }
665 
667  // Some subtargets allow encoding 2048, but this isn't tested or supported.
668  return 1024;
669 }
670 
672  unsigned FlatWorkGroupSize) {
673  return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
674 }
675 
676 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
678  if (Version.Major >= 10)
679  return getAddressableNumSGPRs(STI);
680  if (Version.Major >= 8)
681  return 16;
682  return 8;
683 }
684 
686  return 8;
687 }
688 
689 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
691  if (Version.Major >= 8)
692  return 800;
693  return 512;
694 }
695 
697  if (STI->getFeatureBits().test(FeatureSGPRInitBug))
699 
701  if (Version.Major >= 10)
702  return 106;
703  if (Version.Major >= 8)
704  return 102;
705  return 104;
706 }
707 
708 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
709  assert(WavesPerEU != 0);
710 
712  if (Version.Major >= 10)
713  return 0;
714 
715  if (WavesPerEU >= getMaxWavesPerEU(STI))
716  return 0;
717 
718  unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
719  if (STI->getFeatureBits().test(FeatureTrapHandler))
720  MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
721  MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
722  return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
723 }
724 
725 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
726  bool Addressable) {
727  assert(WavesPerEU != 0);
728 
729  unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
731  if (Version.Major >= 10)
732  return Addressable ? AddressableNumSGPRs : 108;
733  if (Version.Major >= 8 && !Addressable)
734  AddressableNumSGPRs = 112;
735  unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
736  if (STI->getFeatureBits().test(FeatureTrapHandler))
737  MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
738  MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
739  return std::min(MaxNumSGPRs, AddressableNumSGPRs);
740 }
741 
742 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
743  bool FlatScrUsed, bool XNACKUsed) {
744  unsigned ExtraSGPRs = 0;
745  if (VCCUsed)
746  ExtraSGPRs = 2;
747 
749  if (Version.Major >= 10)
750  return ExtraSGPRs;
751 
752  if (Version.Major < 8) {
753  if (FlatScrUsed)
754  ExtraSGPRs = 4;
755  } else {
756  if (XNACKUsed)
757  ExtraSGPRs = 4;
758 
759  if (FlatScrUsed ||
760  STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
761  ExtraSGPRs = 6;
762  }
763 
764  return ExtraSGPRs;
765 }
766 
767 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
768  bool FlatScrUsed) {
769  return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
770  STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
771 }
772 
773 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
775  // SGPRBlocks is actual number of SGPR blocks minus 1.
776  return NumSGPRs / getSGPREncodingGranule(STI) - 1;
777 }
778 
780  Optional<bool> EnableWavefrontSize32) {
781  if (STI->getFeatureBits().test(FeatureGFX90AInsts))
782  return 8;
783 
784  bool IsWave32 = EnableWavefrontSize32 ?
785  *EnableWavefrontSize32 :
786  STI->getFeatureBits().test(FeatureWavefrontSize32);
787 
788  if (hasGFX10_3Insts(*STI))
789  return IsWave32 ? 16 : 8;
790 
791  return IsWave32 ? 8 : 4;
792 }
793 
795  Optional<bool> EnableWavefrontSize32) {
796  if (STI->getFeatureBits().test(FeatureGFX90AInsts))
797  return 8;
798 
799  bool IsWave32 = EnableWavefrontSize32 ?
800  *EnableWavefrontSize32 :
801  STI->getFeatureBits().test(FeatureWavefrontSize32);
802 
803  return IsWave32 ? 8 : 4;
804 }
805 
806 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
807  if (STI->getFeatureBits().test(FeatureGFX90AInsts))
808  return 512;
809  if (!isGFX10Plus(*STI))
810  return 256;
811  return STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1024 : 512;
812 }
813 
816  : isGFX11Plus(*STI)) {
817  // GFX11 changes the encoding of 16-bit operands in VOP1/2/C instructions
818  // such that values 128..255 no longer mean v128..v255, they mean
819  // v0.hi..v127.hi instead. Until the compiler understands this, it is not
820  // safe to use v128..v255.
821  // TODO-GFX11: Remove this when full 16-bit codegen is implemented.
822  return 128;
823  }
824  if (STI->getFeatureBits().test(FeatureGFX90AInsts))
825  return 512;
826  return 256;
827 }
828 
829 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
830  assert(WavesPerEU != 0);
831 
832  if (WavesPerEU >= getMaxWavesPerEU(STI))
833  return 0;
834  unsigned MinNumVGPRs =
835  alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1),
836  getVGPRAllocGranule(STI)) + 1;
837  return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI));
838 }
839 
840 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
841  assert(WavesPerEU != 0);
842 
843  unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
844  getVGPRAllocGranule(STI));
845  unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
846  return std::min(MaxNumVGPRs, AddressableNumVGPRs);
847 }
848 
849 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
850  Optional<bool> EnableWavefrontSize32) {
852  getVGPREncodingGranule(STI, EnableWavefrontSize32));
853  // VGPRBlocks is actual number of VGPR blocks minus 1.
854  return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1;
855 }
856 
857 } // end namespace IsaInfo
858 
860  const MCSubtargetInfo *STI) {
862 
863  memset(&Header, 0, sizeof(Header));
864 
867  Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
868  Header.amd_machine_version_major = Version.Major;
869  Header.amd_machine_version_minor = Version.Minor;
870  Header.amd_machine_version_stepping = Version.Stepping;
871  Header.kernel_code_entry_byte_offset = sizeof(Header);
872  Header.wavefront_size = 6;
873 
874  // If the code object does not support indirect functions, then the value must
875  // be 0xffffffff.
876  Header.call_convention = -1;
877 
878  // These alignment values are specified in powers of two, so alignment =
879  // 2^n. The minimum alignment is 2^4 = 16.
880  Header.kernarg_segment_alignment = 4;
881  Header.group_segment_alignment = 4;
882  Header.private_segment_alignment = 4;
883 
884  if (Version.Major >= 10) {
885  if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
886  Header.wavefront_size = 5;
888  }
890  S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
892  }
893 }
894 
896  const MCSubtargetInfo *STI) {
898 
900  memset(&KD, 0, sizeof(KD));
901 
903  amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
906  amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
908  amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
910  amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
911  if (Version.Major >= 10) {
913  amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
914  STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0);
916  amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
917  STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
919  amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
920  }
921  if (AMDGPU::isGFX90A(*STI)) {
923  amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT,
924  STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0);
925  }
926  return KD;
927 }
928 
929 bool isGroupSegment(const GlobalValue *GV) {
931 }
932 
933 bool isGlobalSegment(const GlobalValue *GV) {
935 }
936 
938  unsigned AS = GV->getAddressSpace();
939  return AS == AMDGPUAS::CONSTANT_ADDRESS ||
941 }
942 
944  return TT.getArch() == Triple::r600;
945 }
946 
947 int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
948  Attribute A = F.getFnAttribute(Name);
949  int Result = Default;
950 
951  if (A.isStringAttribute()) {
952  StringRef Str = A.getValueAsString();
953  if (Str.getAsInteger(0, Result)) {
954  LLVMContext &Ctx = F.getContext();
955  Ctx.emitError("can't parse integer attribute " + Name);
956  }
957  }
958 
959  return Result;
960 }
961 
962 std::pair<int, int> getIntegerPairAttribute(const Function &F,
963  StringRef Name,
964  std::pair<int, int> Default,
965  bool OnlyFirstRequired) {
966  Attribute A = F.getFnAttribute(Name);
967  if (!A.isStringAttribute())
968  return Default;
969 
970  LLVMContext &Ctx = F.getContext();
971  std::pair<int, int> Ints = Default;
972  std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
973  if (Strs.first.trim().getAsInteger(0, Ints.first)) {
974  Ctx.emitError("can't parse first integer attribute " + Name);
975  return Default;
976  }
977  if (Strs.second.trim().getAsInteger(0, Ints.second)) {
978  if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
979  Ctx.emitError("can't parse second integer attribute " + Name);
980  return Default;
981  }
982  }
983 
984  return Ints;
985 }
986 
988  return (1 << (getVmcntBitWidthLo(Version.Major) +
989  getVmcntBitWidthHi(Version.Major))) -
990  1;
991 }
992 
994  return (1 << getExpcntBitWidth(Version.Major)) - 1;
995 }
996 
998  return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
999 }
1000 
1002  unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1003  getVmcntBitWidthLo(Version.Major));
1004  unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1005  getExpcntBitWidth(Version.Major));
1006  unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1007  getLgkmcntBitWidth(Version.Major));
1008  unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1009  getVmcntBitWidthHi(Version.Major));
1010  return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1011 }
1012 
1013 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1014  unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1015  getVmcntBitWidthLo(Version.Major));
1016  unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1017  getVmcntBitWidthHi(Version.Major));
1018  return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1019 }
1020 
1021 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1022  return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1023  getExpcntBitWidth(Version.Major));
1024 }
1025 
1026 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1027  return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1028  getLgkmcntBitWidth(Version.Major));
1029 }
1030 
1031 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
1032  unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
1033  Vmcnt = decodeVmcnt(Version, Waitcnt);
1034  Expcnt = decodeExpcnt(Version, Waitcnt);
1035  Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1036 }
1037 
1038 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1039  Waitcnt Decoded;
1040  Decoded.VmCnt = decodeVmcnt(Version, Encoded);
1041  Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1042  Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded);
1043  return Decoded;
1044 }
1045 
1046 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1047  unsigned Vmcnt) {
1048  Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1049  getVmcntBitWidthLo(Version.Major));
1050  return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1051  getVmcntBitShiftHi(Version.Major),
1052  getVmcntBitWidthHi(Version.Major));
1053 }
1054 
1055 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1056  unsigned Expcnt) {
1057  return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1058  getExpcntBitWidth(Version.Major));
1059 }
1060 
1061 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1062  unsigned Lgkmcnt) {
1063  return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1064  getLgkmcntBitWidth(Version.Major));
1065 }
1066 
1068  unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
1069  unsigned Waitcnt = getWaitcntBitMask(Version);
1070  Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
1071  Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1072  Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1073  return Waitcnt;
1074 }
1075 
1076 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1077  return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt);
1078 }
1079 
1080 //===----------------------------------------------------------------------===//
1081 // Custom Operands.
1082 //
1083 // A table of custom operands shall describe "primary" operand names
1084 // first followed by aliases if any. It is not required but recommended
1085 // to arrange operands so that operand encoding match operand position
1086 // in the table. This will make disassembly a bit more efficient.
1087 // Unused slots in the table shall have an empty name.
1088 //
1089 //===----------------------------------------------------------------------===//
1090 
1091 template <class T>
1092 static bool isValidOpr(int Idx, const CustomOperand<T> OpInfo[], int OpInfoSize,
1093  T Context) {
1094  return 0 <= Idx && Idx < OpInfoSize && !OpInfo[Idx].Name.empty() &&
1095  (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context));
1096 }
1097 
1098 template <class T>
1099 static int getOprIdx(std::function<bool(const CustomOperand<T> &)> Test,
1100  const CustomOperand<T> OpInfo[], int OpInfoSize,
1101  T Context) {
1102  int InvalidIdx = OPR_ID_UNKNOWN;
1103  for (int Idx = 0; Idx < OpInfoSize; ++Idx) {
1104  if (Test(OpInfo[Idx])) {
1105  if (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context))
1106  return Idx;
1108  }
1109  }
1110  return InvalidIdx;
1111 }
1112 
1113 template <class T>
1114 static int getOprIdx(const StringRef Name, const CustomOperand<T> OpInfo[],
1115  int OpInfoSize, T Context) {
1116  auto Test = [=](const CustomOperand<T> &Op) { return Op.Name == Name; };
1117  return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1118 }
1119 
1120 template <class T>
1121 static int getOprIdx(int Id, const CustomOperand<T> OpInfo[], int OpInfoSize,
1122  T Context, bool QuickCheck = true) {
1123  auto Test = [=](const CustomOperand<T> &Op) {
1124  return Op.Encoding == Id && !Op.Name.empty();
1125  };
1126  // This is an optimization that should work in most cases.
1127  // As a side effect, it may cause selection of an alias
1128  // instead of a primary operand name in case of sparse tables.
1129  if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize, Context) &&
1130  OpInfo[Id].Encoding == Id) {
1131  return Id;
1132  }
1133  return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1134 }
1135 
1136 //===----------------------------------------------------------------------===//
1137 // Custom Operand Values
1138 //===----------------------------------------------------------------------===//
1139 
1141  int Size,
1142  const MCSubtargetInfo &STI) {
1143  unsigned Enc = 0;
1144  for (int Idx = 0; Idx < Size; ++Idx) {
1145  const auto &Op = Opr[Idx];
1146  if (Op.isSupported(STI))
1147  Enc |= Op.encode(Op.Default);
1148  }
1149  return Enc;
1150 }
1151 
1153  int Size, unsigned Code,
1154  bool &HasNonDefaultVal,
1155  const MCSubtargetInfo &STI) {
1156  unsigned UsedOprMask = 0;
1157  HasNonDefaultVal = false;
1158  for (int Idx = 0; Idx < Size; ++Idx) {
1159  const auto &Op = Opr[Idx];
1160  if (!Op.isSupported(STI))
1161  continue;
1162  UsedOprMask |= Op.getMask();
1163  unsigned Val = Op.decode(Code);
1164  if (!Op.isValid(Val))
1165  return false;
1166  HasNonDefaultVal |= (Val != Op.Default);
1167  }
1168  return (Code & ~UsedOprMask) == 0;
1169 }
1170 
1171 static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1172  unsigned Code, int &Idx, StringRef &Name,
1173  unsigned &Val, bool &IsDefault,
1174  const MCSubtargetInfo &STI) {
1175  while (Idx < Size) {
1176  const auto &Op = Opr[Idx++];
1177  if (Op.isSupported(STI)) {
1178  Name = Op.Name;
1179  Val = Op.decode(Code);
1180  IsDefault = (Val == Op.Default);
1181  return true;
1182  }
1183  }
1184 
1185  return false;
1186 }
1187 
1189  int64_t InputVal) {
1190  if (InputVal < 0 || InputVal > Op.Max)
1191  return OPR_VAL_INVALID;
1192  return Op.encode(InputVal);
1193 }
1194 
1195 static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1196  const StringRef Name, int64_t InputVal,
1197  unsigned &UsedOprMask,
1198  const MCSubtargetInfo &STI) {
1199  int InvalidId = OPR_ID_UNKNOWN;
1200  for (int Idx = 0; Idx < Size; ++Idx) {
1201  const auto &Op = Opr[Idx];
1202  if (Op.Name == Name) {
1203  if (!Op.isSupported(STI)) {
1204  InvalidId = OPR_ID_UNSUPPORTED;
1205  continue;
1206  }
1207  auto OprMask = Op.getMask();
1208  if (OprMask & UsedOprMask)
1209  return OPR_ID_DUPLICATE;
1210  UsedOprMask |= OprMask;
1211  return encodeCustomOperandVal(Op, InputVal);
1212  }
1213  }
1214  return InvalidId;
1215 }
1216 
1217 //===----------------------------------------------------------------------===//
1218 // DepCtr
1219 //===----------------------------------------------------------------------===//
1220 
1221 namespace DepCtr {
1222 
1224  static int Default = -1;
1225  if (Default == -1)
1227  return Default;
1228 }
1229 
1230 bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
1231  const MCSubtargetInfo &STI) {
1233  HasNonDefaultVal, STI);
1234 }
1235 
1236 bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
1237  bool &IsDefault, const MCSubtargetInfo &STI) {
1238  return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
1239  IsDefault, STI);
1240 }
1241 
1242 int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
1243  const MCSubtargetInfo &STI) {
1244  return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
1245  STI);
1246 }
1247 
1248 } // namespace DepCtr
1249 
1250 //===----------------------------------------------------------------------===//
1251 // hwreg
1252 //===----------------------------------------------------------------------===//
1253 
1254 namespace Hwreg {
1255 
1256 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) {
1257  int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Opr, OPR_SIZE, STI);
1258  return (Idx < 0) ? Idx : Opr[Idx].Encoding;
1259 }
1260 
1261 bool isValidHwreg(int64_t Id) {
1262  return 0 <= Id && isUInt<ID_WIDTH_>(Id);
1263 }
1264 
1266  return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset);
1267 }
1268 
1269 bool isValidHwregWidth(int64_t Width) {
1270  return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1);
1271 }
1272 
1274  return (Id << ID_SHIFT_) |
1275  (Offset << OFFSET_SHIFT_) |
1276  ((Width - 1) << WIDTH_M1_SHIFT_);
1277 }
1278 
1279 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
1280  int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI);
1281  return (Idx < 0) ? "" : Opr[Idx].Name;
1282 }
1283 
1284 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) {
1285  Id = (Val & ID_MASK_) >> ID_SHIFT_;
1286  Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_;
1287  Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
1288 }
1289 
1290 } // namespace Hwreg
1291 
1292 //===----------------------------------------------------------------------===//
1293 // exp tgt
1294 //===----------------------------------------------------------------------===//
1295 
1296 namespace Exp {
1297 
1298 struct ExpTgt {
1300  unsigned Tgt;
1301  unsigned MaxIndex;
1302 };
1303 
1304 static constexpr ExpTgt ExpTgtInfo[] = {
1305  {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
1306  {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
1307  {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
1308  {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
1309  {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
1310  {{"dual_src_blend"}, ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
1311  {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
1312 };
1313 
1314 bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
1315  for (const ExpTgt &Val : ExpTgtInfo) {
1316  if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
1317  Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
1318  Name = Val.Name;
1319  return true;
1320  }
1321  }
1322  return false;
1323 }
1324 
1325 unsigned getTgtId(const StringRef Name) {
1326 
1327  for (const ExpTgt &Val : ExpTgtInfo) {
1328  if (Val.MaxIndex == 0 && Name == Val.Name)
1329  return Val.Tgt;
1330 
1331  if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) {
1332  StringRef Suffix = Name.drop_front(Val.Name.size());
1333 
1334  unsigned Id;
1335  if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
1336  return ET_INVALID;
1337 
1338  // Disable leading zeroes
1339  if (Suffix.size() > 1 && Suffix[0] == '0')
1340  return ET_INVALID;
1341 
1342  return Val.Tgt + Id;
1343  }
1344  }
1345  return ET_INVALID;
1346 }
1347 
1348 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
1349  switch (Id) {
1350  case ET_NULL:
1351  return !isGFX11Plus(STI);
1352  case ET_POS4:
1353  case ET_PRIM:
1354  return isGFX10Plus(STI);
1355  case ET_DUAL_SRC_BLEND0:
1356  case ET_DUAL_SRC_BLEND1:
1357  return isGFX11Plus(STI);
1358  default:
1359  if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
1360  return !isGFX11Plus(STI);
1361  return true;
1362  }
1363 }
1364 
1365 } // namespace Exp
1366 
1367 //===----------------------------------------------------------------------===//
1368 // MTBUF Format
1369 //===----------------------------------------------------------------------===//
1370 
1371 namespace MTBUFFormat {
1372 
1373 int64_t getDfmt(const StringRef Name) {
1374  for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
1375  if (Name == DfmtSymbolic[Id])
1376  return Id;
1377  }
1378  return DFMT_UNDEF;
1379 }
1380 
1382  assert(Id <= DFMT_MAX);
1383  return DfmtSymbolic[Id];
1384 }
1385 
1387  if (isSI(STI) || isCI(STI))
1388  return NfmtSymbolicSICI;
1389  if (isVI(STI) || isGFX9(STI))
1390  return NfmtSymbolicVI;
1391  return NfmtSymbolicGFX10;
1392 }
1393 
1394 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
1395  auto lookupTable = getNfmtLookupTable(STI);
1396  for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
1397  if (Name == lookupTable[Id])
1398  return Id;
1399  }
1400  return NFMT_UNDEF;
1401 }
1402 
1403 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
1404  assert(Id <= NFMT_MAX);
1405  return getNfmtLookupTable(STI)[Id];
1406 }
1407 
1408 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1409  unsigned Dfmt;
1410  unsigned Nfmt;
1411  decodeDfmtNfmt(Id, Dfmt, Nfmt);
1412  return isValidNfmt(Nfmt, STI);
1413 }
1414 
1415 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1416  return !getNfmtName(Id, STI).empty();
1417 }
1418 
1419 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
1420  return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
1421 }
1422 
1423 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
1424  Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
1425  Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
1426 }
1427 
1428 int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
1429  if (isGFX11Plus(STI)) {
1430  for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1431  if (Name == UfmtSymbolicGFX11[Id])
1432  return Id;
1433  }
1434  } else {
1435  for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1436  if (Name == UfmtSymbolicGFX10[Id])
1437  return Id;
1438  }
1439  }
1440  return UFMT_UNDEF;
1441 }
1442 
1444  if(isValidUnifiedFormat(Id, STI))
1445  return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
1446  return "";
1447 }
1448 
1449 bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
1450  return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
1451 }
1452 
1453 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
1454  const MCSubtargetInfo &STI) {
1455  int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
1456  if (isGFX11Plus(STI)) {
1457  for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1458  if (Fmt == DfmtNfmt2UFmtGFX11[Id])
1459  return Id;
1460  }
1461  } else {
1462  for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1463  if (Fmt == DfmtNfmt2UFmtGFX10[Id])
1464  return Id;
1465  }
1466  }
1467  return UFMT_UNDEF;
1468 }
1469 
1470 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
1471  return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
1472 }
1473 
1475  if (isGFX10Plus(STI))
1476  return UFMT_DEFAULT;
1477  return DFMT_NFMT_DEFAULT;
1478 }
1479 
1480 } // namespace MTBUFFormat
1481 
1482 //===----------------------------------------------------------------------===//
1483 // SendMsg
1484 //===----------------------------------------------------------------------===//
1485 
1486 namespace SendMsg {
1487 
1490 }
1491 
1492 int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI) {
1493  int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Msg, MSG_SIZE, STI);
1494  return (Idx < 0) ? Idx : Msg[Idx].Encoding;
1495 }
1496 
1497 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
1498  return (MsgId & ~(getMsgIdMask(STI))) == 0;
1499 }
1500 
1501 StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI) {
1502  int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId, Msg, MSG_SIZE, STI);
1503  return (Idx < 0) ? "" : Msg[Idx].Name;
1504 }
1505 
1506 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) {
1507  const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1508  const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1509  const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1510  for (int i = F; i < L; ++i) {
1511  if (Name == S[i]) {
1512  return i;
1513  }
1514  }
1515  return OP_UNKNOWN_;
1516 }
1517 
1518 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
1519  bool Strict) {
1520  assert(isValidMsgId(MsgId, STI));
1521 
1522  if (!Strict)
1523  return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
1524 
1525  if (MsgId == ID_SYSMSG)
1526  return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_;
1527  if (!isGFX11Plus(STI)) {
1528  switch (MsgId) {
1529  case ID_GS_PreGFX11:
1530  return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP;
1531  case ID_GS_DONE_PreGFX11:
1532  return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_;
1533  }
1534  }
1535  return OpId == OP_NONE_;
1536 }
1537 
1538 StringRef getMsgOpName(int64_t MsgId, int64_t OpId,
1539  const MCSubtargetInfo &STI) {
1540  assert(msgRequiresOp(MsgId, STI));
1541  return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId];
1542 }
1543 
1544 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
1545  const MCSubtargetInfo &STI, bool Strict) {
1546  assert(isValidMsgOp(MsgId, OpId, STI, Strict));
1547 
1548  if (!Strict)
1549  return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId);
1550 
1551  if (!isGFX11Plus(STI)) {
1552  switch (MsgId) {
1553  case ID_GS_PreGFX11:
1555  case ID_GS_DONE_PreGFX11:
1556  return (OpId == OP_GS_NOP) ?
1557  (StreamId == STREAM_ID_NONE_) :
1559  }
1560  }
1561  return StreamId == STREAM_ID_NONE_;
1562 }
1563 
1564 bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
1565  return MsgId == ID_SYSMSG ||
1566  (!isGFX11Plus(STI) &&
1567  (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
1568 }
1569 
1570 bool msgSupportsStream(int64_t MsgId, int64_t OpId,
1571  const MCSubtargetInfo &STI) {
1572  return !isGFX11Plus(STI) &&
1573  (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
1574  OpId != OP_GS_NOP;
1575 }
1576 
1577 void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
1578  uint16_t &StreamId, const MCSubtargetInfo &STI) {
1579  MsgId = Val & getMsgIdMask(STI);
1580  if (isGFX11Plus(STI)) {
1581  OpId = 0;
1582  StreamId = 0;
1583  } else {
1584  OpId = (Val & OP_MASK_) >> OP_SHIFT_;
1586  }
1587 }
1588 
1590  uint64_t OpId,
1591  uint64_t StreamId) {
1592  return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
1593 }
1594 
1595 } // namespace SendMsg
1596 
1597 //===----------------------------------------------------------------------===//
1598 //
1599 //===----------------------------------------------------------------------===//
1600 
1602  return getIntegerAttribute(F, "InitialPSInputAddr", 0);
1603 }
1604 
1606  // As a safe default always respond as if PS has color exports.
1607  return getIntegerAttribute(
1608  F, "amdgpu-color-export",
1609  F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
1610 }
1611 
1613  return getIntegerAttribute(F, "amdgpu-depth-export", 0) != 0;
1614 }
1615 
1617  switch(cc) {
1625  return true;
1626  default:
1627  return false;
1628  }
1629 }
1630 
1632  return isShader(cc) || cc == CallingConv::AMDGPU_Gfx;
1633 }
1634 
1636  return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS;
1637 }
1638 
1640  switch (CC) {
1650  return true;
1651  default:
1652  return false;
1653  }
1654 }
1655 
1657  switch (CC) {
1659  return true;
1660  default:
1661  return isEntryFunctionCC(CC);
1662  }
1663 }
1664 
1665 bool isKernelCC(const Function *Func) {
1666  return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv());
1667 }
1668 
1669 bool hasXNACK(const MCSubtargetInfo &STI) {
1670  return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
1671 }
1672 
1673 bool hasSRAMECC(const MCSubtargetInfo &STI) {
1674  return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC];
1675 }
1676 
1677 bool hasMIMG_R128(const MCSubtargetInfo &STI) {
1678  return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128A16];
1679 }
1680 
1681 bool hasGFX10A16(const MCSubtargetInfo &STI) {
1682  return STI.getFeatureBits()[AMDGPU::FeatureGFX10A16];
1683 }
1684 
1685 bool hasG16(const MCSubtargetInfo &STI) {
1686  return STI.getFeatureBits()[AMDGPU::FeatureG16];
1687 }
1688 
1689 bool hasPackedD16(const MCSubtargetInfo &STI) {
1690  return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem] && !isCI(STI) &&
1691  !isSI(STI);
1692 }
1693 
1694 bool isSI(const MCSubtargetInfo &STI) {
1695  return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
1696 }
1697 
1698 bool isCI(const MCSubtargetInfo &STI) {
1699  return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
1700 }
1701 
1702 bool isVI(const MCSubtargetInfo &STI) {
1703  return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
1704 }
1705 
1706 bool isGFX9(const MCSubtargetInfo &STI) {
1707  return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
1708 }
1709 
1710 bool isGFX9_GFX10(const MCSubtargetInfo &STI) {
1711  return isGFX9(STI) || isGFX10(STI);
1712 }
1713 
1715  return isVI(STI) || isGFX9(STI) || isGFX10(STI);
1716 }
1717 
1718 bool isGFX8Plus(const MCSubtargetInfo &STI) {
1719  return isVI(STI) || isGFX9Plus(STI);
1720 }
1721 
1722 bool isGFX9Plus(const MCSubtargetInfo &STI) {
1723  return isGFX9(STI) || isGFX10Plus(STI);
1724 }
1725 
1726 bool isGFX10(const MCSubtargetInfo &STI) {
1727  return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
1728 }
1729 
1730 bool isGFX10Plus(const MCSubtargetInfo &STI) {
1731  return isGFX10(STI) || isGFX11Plus(STI);
1732 }
1733 
1734 bool isGFX11(const MCSubtargetInfo &STI) {
1735  return STI.getFeatureBits()[AMDGPU::FeatureGFX11];
1736 }
1737 
1738 bool isGFX11Plus(const MCSubtargetInfo &STI) {
1739  return isGFX11(STI);
1740 }
1741 
1743  return !isGFX11Plus(STI);
1744 }
1745 
1747  return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
1748 }
1749 
1751  return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
1752 }
1753 
1755  return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
1756 }
1757 
1759  return STI.getFeatureBits()[AMDGPU::FeatureGFX10_AEncoding];
1760 }
1761 
1763  return STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding];
1764 }
1765 
1767  return STI.getFeatureBits()[AMDGPU::FeatureGFX10_3Insts];
1768 }
1769 
1770 bool isGFX90A(const MCSubtargetInfo &STI) {
1771  return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts];
1772 }
1773 
1774 bool isGFX940(const MCSubtargetInfo &STI) {
1775  return STI.getFeatureBits()[AMDGPU::FeatureGFX940Insts];
1776 }
1777 
1779  return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
1780 }
1781 
1782 bool hasMAIInsts(const MCSubtargetInfo &STI) {
1783  return STI.getFeatureBits()[AMDGPU::FeatureMAIInsts];
1784 }
1785 
1786 bool hasVOPD(const MCSubtargetInfo &STI) {
1787  return STI.getFeatureBits()[AMDGPU::FeatureVOPD];
1788 }
1789 
1790 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
1791  int32_t ArgNumVGPR) {
1792  if (has90AInsts && ArgNumAGPR)
1793  return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
1794  return std::max(ArgNumVGPR, ArgNumAGPR);
1795 }
1796 
1797 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
1798  const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
1799  const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
1800  return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
1801  Reg == AMDGPU::SCC;
1802 }
1803 
1804 #define MAP_REG2REG \
1805  using namespace AMDGPU; \
1806  switch(Reg) { \
1807  default: return Reg; \
1808  CASE_CI_VI(FLAT_SCR) \
1809  CASE_CI_VI(FLAT_SCR_LO) \
1810  CASE_CI_VI(FLAT_SCR_HI) \
1811  CASE_VI_GFX9PLUS(TTMP0) \
1812  CASE_VI_GFX9PLUS(TTMP1) \
1813  CASE_VI_GFX9PLUS(TTMP2) \
1814  CASE_VI_GFX9PLUS(TTMP3) \
1815  CASE_VI_GFX9PLUS(TTMP4) \
1816  CASE_VI_GFX9PLUS(TTMP5) \
1817  CASE_VI_GFX9PLUS(TTMP6) \
1818  CASE_VI_GFX9PLUS(TTMP7) \
1819  CASE_VI_GFX9PLUS(TTMP8) \
1820  CASE_VI_GFX9PLUS(TTMP9) \
1821  CASE_VI_GFX9PLUS(TTMP10) \
1822  CASE_VI_GFX9PLUS(TTMP11) \
1823  CASE_VI_GFX9PLUS(TTMP12) \
1824  CASE_VI_GFX9PLUS(TTMP13) \
1825  CASE_VI_GFX9PLUS(TTMP14) \
1826  CASE_VI_GFX9PLUS(TTMP15) \
1827  CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
1828  CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
1829  CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
1830  CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
1831  CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
1832  CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
1833  CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
1834  CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
1835  CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
1836  CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
1837  CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
1838  CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
1839  CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
1840  CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
1841  CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1842  CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1843  CASE_GFXPRE11_GFX11PLUS(M0) \
1844  CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
1845  CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
1846  }
1847 
1848 #define CASE_CI_VI(node) \
1849  assert(!isSI(STI)); \
1850  case node: return isCI(STI) ? node##_ci : node##_vi;
1851 
1852 #define CASE_VI_GFX9PLUS(node) \
1853  case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
1854 
1855 #define CASE_GFXPRE11_GFX11PLUS(node) \
1856  case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
1857 
1858 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
1859  case node: return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
1860 
1861 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
1862  if (STI.getTargetTriple().getArch() == Triple::r600)
1863  return Reg;
1864  MAP_REG2REG
1865 }
1866 
1867 #undef CASE_CI_VI
1868 #undef CASE_VI_GFX9PLUS
1869 #undef CASE_GFXPRE11_GFX11PLUS
1870 #undef CASE_GFXPRE11_GFX11PLUS_TO
1871 
1872 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
1873 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node;
1874 #define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node;
1875 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
1876 
1877 unsigned mc2PseudoReg(unsigned Reg) {
1878  MAP_REG2REG
1879 }
1880 
1881 #undef CASE_CI_VI
1882 #undef CASE_VI_GFX9PLUS
1883 #undef CASE_GFXPRE11_GFX11PLUS
1884 #undef CASE_GFXPRE11_GFX11PLUS_TO
1885 #undef MAP_REG2REG
1886 
1887 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1888  assert(OpNo < Desc.NumOperands);
1889  unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1890  return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
1891  OpType <= AMDGPU::OPERAND_SRC_LAST;
1892 }
1893 
1894 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1895  assert(OpNo < Desc.NumOperands);
1896  unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1897  switch (OpType) {
1917  return true;
1918  default:
1919  return false;
1920  }
1921 }
1922 
1923 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1924  assert(OpNo < Desc.NumOperands);
1925  unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1926  return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
1928 }
1929 
1930 // Avoid using MCRegisterClass::getSize, since that function will go away
1931 // (move from MC* level to Target* level). Return size in bits.
1932 unsigned getRegBitWidth(unsigned RCID) {
1933  switch (RCID) {
1934  case AMDGPU::VGPR_LO16RegClassID:
1935  case AMDGPU::VGPR_HI16RegClassID:
1936  case AMDGPU::SGPR_LO16RegClassID:
1937  case AMDGPU::AGPR_LO16RegClassID:
1938  return 16;
1939  case AMDGPU::SGPR_32RegClassID:
1940  case AMDGPU::VGPR_32RegClassID:
1941  case AMDGPU::VRegOrLds_32RegClassID:
1942  case AMDGPU::AGPR_32RegClassID:
1943  case AMDGPU::VS_32RegClassID:
1944  case AMDGPU::AV_32RegClassID:
1945  case AMDGPU::SReg_32RegClassID:
1946  case AMDGPU::SReg_32_XM0RegClassID:
1947  case AMDGPU::SRegOrLds_32RegClassID:
1948  return 32;
1949  case AMDGPU::SGPR_64RegClassID:
1950  case AMDGPU::VS_64RegClassID:
1951  case AMDGPU::SReg_64RegClassID:
1952  case AMDGPU::VReg_64RegClassID:
1953  case AMDGPU::AReg_64RegClassID:
1954  case AMDGPU::SReg_64_XEXECRegClassID:
1955  case AMDGPU::VReg_64_Align2RegClassID:
1956  case AMDGPU::AReg_64_Align2RegClassID:
1957  case AMDGPU::AV_64RegClassID:
1958  case AMDGPU::AV_64_Align2RegClassID:
1959  return 64;
1960  case AMDGPU::SGPR_96RegClassID:
1961  case AMDGPU::SReg_96RegClassID:
1962  case AMDGPU::VReg_96RegClassID:
1963  case AMDGPU::AReg_96RegClassID:
1964  case AMDGPU::VReg_96_Align2RegClassID:
1965  case AMDGPU::AReg_96_Align2RegClassID:
1966  case AMDGPU::AV_96RegClassID:
1967  case AMDGPU::AV_96_Align2RegClassID:
1968  return 96;
1969  case AMDGPU::SGPR_128RegClassID:
1970  case AMDGPU::SReg_128RegClassID:
1971  case AMDGPU::VReg_128RegClassID:
1972  case AMDGPU::AReg_128RegClassID:
1973  case AMDGPU::VReg_128_Align2RegClassID:
1974  case AMDGPU::AReg_128_Align2RegClassID:
1975  case AMDGPU::AV_128RegClassID:
1976  case AMDGPU::AV_128_Align2RegClassID:
1977  return 128;
1978  case AMDGPU::SGPR_160RegClassID:
1979  case AMDGPU::SReg_160RegClassID:
1980  case AMDGPU::VReg_160RegClassID:
1981  case AMDGPU::AReg_160RegClassID:
1982  case AMDGPU::VReg_160_Align2RegClassID:
1983  case AMDGPU::AReg_160_Align2RegClassID:
1984  case AMDGPU::AV_160RegClassID:
1985  case AMDGPU::AV_160_Align2RegClassID:
1986  return 160;
1987  case AMDGPU::SGPR_192RegClassID:
1988  case AMDGPU::SReg_192RegClassID:
1989  case AMDGPU::VReg_192RegClassID:
1990  case AMDGPU::AReg_192RegClassID:
1991  case AMDGPU::VReg_192_Align2RegClassID:
1992  case AMDGPU::AReg_192_Align2RegClassID:
1993  case AMDGPU::AV_192RegClassID:
1994  case AMDGPU::AV_192_Align2RegClassID:
1995  return 192;
1996  case AMDGPU::SGPR_224RegClassID:
1997  case AMDGPU::SReg_224RegClassID:
1998  case AMDGPU::VReg_224RegClassID:
1999  case AMDGPU::AReg_224RegClassID:
2000  case AMDGPU::VReg_224_Align2RegClassID:
2001  case AMDGPU::AReg_224_Align2RegClassID:
2002  case AMDGPU::AV_224RegClassID:
2003  case AMDGPU::AV_224_Align2RegClassID:
2004  return 224;
2005  case AMDGPU::SGPR_256RegClassID:
2006  case AMDGPU::SReg_256RegClassID:
2007  case AMDGPU::VReg_256RegClassID:
2008  case AMDGPU::AReg_256RegClassID:
2009  case AMDGPU::VReg_256_Align2RegClassID:
2010  case AMDGPU::AReg_256_Align2RegClassID:
2011  case AMDGPU::AV_256RegClassID:
2012  case AMDGPU::AV_256_Align2RegClassID:
2013  return 256;
2014  case AMDGPU::SGPR_512RegClassID:
2015  case AMDGPU::SReg_512RegClassID:
2016  case AMDGPU::VReg_512RegClassID:
2017  case AMDGPU::AReg_512RegClassID:
2018  case AMDGPU::VReg_512_Align2RegClassID:
2019  case AMDGPU::AReg_512_Align2RegClassID:
2020  case AMDGPU::AV_512RegClassID:
2021  case AMDGPU::AV_512_Align2RegClassID:
2022  return 512;
2023  case AMDGPU::SGPR_1024RegClassID:
2024  case AMDGPU::SReg_1024RegClassID:
2025  case AMDGPU::VReg_1024RegClassID:
2026  case AMDGPU::AReg_1024RegClassID:
2027  case AMDGPU::VReg_1024_Align2RegClassID:
2028  case AMDGPU::AReg_1024_Align2RegClassID:
2029  case AMDGPU::AV_1024RegClassID:
2030  case AMDGPU::AV_1024_Align2RegClassID:
2031  return 1024;
2032  default:
2033  llvm_unreachable("Unexpected register class");
2034  }
2035 }
2036 
2037 unsigned getRegBitWidth(const MCRegisterClass &RC) {
2038  return getRegBitWidth(RC.getID());
2039 }
2040 
2041 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
2042  unsigned OpNo) {
2043  assert(OpNo < Desc.NumOperands);
2044  unsigned RCID = Desc.OpInfo[OpNo].RegClass;
2045  return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
2046 }
2047 
2048 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
2049  if (isInlinableIntLiteral(Literal))
2050  return true;
2051 
2052  uint64_t Val = static_cast<uint64_t>(Literal);
2053  return (Val == DoubleToBits(0.0)) ||
2054  (Val == DoubleToBits(1.0)) ||
2055  (Val == DoubleToBits(-1.0)) ||
2056  (Val == DoubleToBits(0.5)) ||
2057  (Val == DoubleToBits(-0.5)) ||
2058  (Val == DoubleToBits(2.0)) ||
2059  (Val == DoubleToBits(-2.0)) ||
2060  (Val == DoubleToBits(4.0)) ||
2061  (Val == DoubleToBits(-4.0)) ||
2062  (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2063 }
2064 
2065 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
2066  if (isInlinableIntLiteral(Literal))
2067  return true;
2068 
2069  // The actual type of the operand does not seem to matter as long
2070  // as the bits match one of the inline immediate values. For example:
2071  //
2072  // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
2073  // so it is a legal inline immediate.
2074  //
2075  // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
2076  // floating-point, so it is a legal inline immediate.
2077 
2078  uint32_t Val = static_cast<uint32_t>(Literal);
2079  return (Val == FloatToBits(0.0f)) ||
2080  (Val == FloatToBits(1.0f)) ||
2081  (Val == FloatToBits(-1.0f)) ||
2082  (Val == FloatToBits(0.5f)) ||
2083  (Val == FloatToBits(-0.5f)) ||
2084  (Val == FloatToBits(2.0f)) ||
2085  (Val == FloatToBits(-2.0f)) ||
2086  (Val == FloatToBits(4.0f)) ||
2087  (Val == FloatToBits(-4.0f)) ||
2088  (Val == 0x3e22f983 && HasInv2Pi);
2089 }
2090 
2091 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
2092  if (!HasInv2Pi)
2093  return false;
2094 
2095  if (isInlinableIntLiteral(Literal))
2096  return true;
2097 
2098  uint16_t Val = static_cast<uint16_t>(Literal);
2099  return Val == 0x3C00 || // 1.0
2100  Val == 0xBC00 || // -1.0
2101  Val == 0x3800 || // 0.5
2102  Val == 0xB800 || // -0.5
2103  Val == 0x4000 || // 2.0
2104  Val == 0xC000 || // -2.0
2105  Val == 0x4400 || // 4.0
2106  Val == 0xC400 || // -4.0
2107  Val == 0x3118; // 1/2pi
2108 }
2109 
2110 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2111  assert(HasInv2Pi);
2112 
2113  if (isInt<16>(Literal) || isUInt<16>(Literal)) {
2114  int16_t Trunc = static_cast<int16_t>(Literal);
2115  return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
2116  }
2117  if (!(Literal & 0xffff))
2118  return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
2119 
2120  int16_t Lo16 = static_cast<int16_t>(Literal);
2121  int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2122  return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
2123 }
2124 
2125 bool isInlinableIntLiteralV216(int32_t Literal) {
2126  int16_t Lo16 = static_cast<int16_t>(Literal);
2127  if (isInt<16>(Literal) || isUInt<16>(Literal))
2128  return isInlinableIntLiteral(Lo16);
2129 
2130  int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2131  if (!(Literal & 0xffff))
2132  return isInlinableIntLiteral(Hi16);
2133  return Lo16 == Hi16 && isInlinableIntLiteral(Lo16);
2134 }
2135 
2136 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2137  assert(HasInv2Pi);
2138 
2139  int16_t Lo16 = static_cast<int16_t>(Literal);
2140  if (isInt<16>(Literal) || isUInt<16>(Literal))
2141  return true;
2142 
2143  int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2144  if (!(Literal & 0xffff))
2145  return true;
2146  return Lo16 == Hi16;
2147 }
2148 
2149 bool isArgPassedInSGPR(const Argument *A) {
2150  const Function *F = A->getParent();
2151 
2152  // Arguments to compute shaders are never a source of divergence.
2153  CallingConv::ID CC = F->getCallingConv();
2154  switch (CC) {
2157  return true;
2166  // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
2167  // Everything else is in VGPRs.
2168  return F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::InReg) ||
2169  F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::ByVal);
2170  default:
2171  // TODO: Should calls support inreg for SGPR inputs?
2172  return false;
2173  }
2174 }
2175 
2176 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
2177  return isGCN3Encoding(ST) || isGFX10Plus(ST);
2178 }
2179 
2181  return isGFX9Plus(ST);
2182 }
2183 
2185  int64_t EncodedOffset) {
2186  return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
2187  : isUInt<8>(EncodedOffset);
2188 }
2189 
2191  int64_t EncodedOffset,
2192  bool IsBuffer) {
2193  return !IsBuffer &&
2195  isInt<21>(EncodedOffset);
2196 }
2197 
2198 static bool isDwordAligned(uint64_t ByteOffset) {
2199  return (ByteOffset & 3) == 0;
2200 }
2201 
2203  uint64_t ByteOffset) {
2204  if (hasSMEMByteOffset(ST))
2205  return ByteOffset;
2206 
2207  assert(isDwordAligned(ByteOffset));
2208  return ByteOffset >> 2;
2209 }
2210 
2212  int64_t ByteOffset, bool IsBuffer) {
2213  // The signed version is always a byte offset.
2214  if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
2216  return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : None;
2217  }
2218 
2219  if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
2220  return None;
2221 
2222  int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2223  return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
2224  ? Optional<int64_t>(EncodedOffset)
2225  : None;
2226 }
2227 
2229  int64_t ByteOffset) {
2230  if (!isCI(ST) || !isDwordAligned(ByteOffset))
2231  return None;
2232 
2233  int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2234  return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset) : None;
2235 }
2236 
2238  // Address offset is 12-bit signed for GFX10, 13-bit for GFX9 and GFX11+.
2239  if (AMDGPU::isGFX10(ST))
2240  return Signed ? 12 : 11;
2241 
2242  return Signed ? 13 : 12;
2243 }
2244 
2245 // Given Imm, split it into the values to put into the SOffset and ImmOffset
2246 // fields in an MUBUF instruction. Return false if it is not possible (due to a
2247 // hardware bug needing a workaround).
2248 //
2249 // The required alignment ensures that individual address components remain
2250 // aligned if they are aligned to begin with. It also ensures that additional
2251 // offsets within the given alignment can be added to the resulting ImmOffset.
2252 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset,
2253  const GCNSubtarget *Subtarget, Align Alignment) {
2254  const uint32_t MaxImm = alignDown(4095, Alignment.value());
2255  uint32_t Overflow = 0;
2256 
2257  if (Imm > MaxImm) {
2258  if (Imm <= MaxImm + 64) {
2259  // Use an SOffset inline constant for 4..64
2260  Overflow = Imm - MaxImm;
2261  Imm = MaxImm;
2262  } else {
2263  // Try to keep the same value in SOffset for adjacent loads, so that
2264  // the corresponding register contents can be re-used.
2265  //
2266  // Load values with all low-bits (except for alignment bits) set into
2267  // SOffset, so that a larger range of values can be covered using
2268  // s_movk_i32.
2269  //
2270  // Atomic operations fail to work correctly when individual address
2271  // components are unaligned, even if their sum is aligned.
2272  uint32_t High = (Imm + Alignment.value()) & ~4095;
2273  uint32_t Low = (Imm + Alignment.value()) & 4095;
2274  Imm = Low;
2275  Overflow = High - Alignment.value();
2276  }
2277  }
2278 
2279  // There is a hardware bug in SI and CI which prevents address clamping in
2280  // MUBUF instructions from working correctly with SOffsets. The immediate
2281  // offset is unaffected.
2282  if (Overflow > 0 &&
2284  return false;
2285 
2286  ImmOffset = Imm;
2287  SOffset = Overflow;
2288  return true;
2289 }
2290 
2292  *this = getDefaultForCallingConv(F.getCallingConv());
2293 
2294  StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString();
2295  if (!IEEEAttr.empty())
2296  IEEE = IEEEAttr == "true";
2297 
2298  StringRef DX10ClampAttr
2299  = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString();
2300  if (!DX10ClampAttr.empty())
2301  DX10Clamp = DX10ClampAttr == "true";
2302 
2303  StringRef DenormF32Attr = F.getFnAttribute("denormal-fp-math-f32").getValueAsString();
2304  if (!DenormF32Attr.empty()) {
2305  DenormalMode DenormMode = parseDenormalFPAttribute(DenormF32Attr);
2308  }
2309 
2310  StringRef DenormAttr = F.getFnAttribute("denormal-fp-math").getValueAsString();
2311  if (!DenormAttr.empty()) {
2312  DenormalMode DenormMode = parseDenormalFPAttribute(DenormAttr);
2313 
2314  if (DenormF32Attr.empty()) {
2317  }
2318 
2321  }
2322 }
2323 
2324 namespace {
2325 
2326 struct SourceOfDivergence {
2327  unsigned Intr;
2328 };
2329 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
2330 
2331 #define GET_SourcesOfDivergence_IMPL
2332 #define GET_Gfx9BufferFormat_IMPL
2333 #define GET_Gfx10BufferFormat_IMPL
2334 #define GET_Gfx11PlusBufferFormat_IMPL
2335 #include "AMDGPUGenSearchableTables.inc"
2336 
2337 } // end anonymous namespace
2338 
2339 bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
2340  return lookupSourceOfDivergence(IntrID);
2341 }
2342 
2343 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp,
2344  uint8_t NumComponents,
2345  uint8_t NumFormat,
2346  const MCSubtargetInfo &STI) {
2347  return isGFX11Plus(STI)
2348  ? getGfx11PlusBufferFormatInfo(BitsPerComp, NumComponents,
2349  NumFormat)
2350  : isGFX10(STI) ? getGfx10BufferFormatInfo(BitsPerComp,
2351  NumComponents, NumFormat)
2352  : getGfx9BufferFormatInfo(BitsPerComp,
2353  NumComponents, NumFormat);
2354 }
2355 
2357  const MCSubtargetInfo &STI) {
2358  return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
2359  : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
2360  : getGfx9BufferFormatInfo(Format);
2361 }
2362 
2363 } // namespace AMDGPU
2364 
2367  switch (S) {
2369  OS << "Unsupported";
2370  break;
2372  OS << "Any";
2373  break;
2375  OS << "Off";
2376  break;
2378  OS << "On";
2379  break;
2380  }
2381  return OS;
2382 }
2383 
2384 } // namespace llvm
llvm::AMDGPU::OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:170
llvm::AMDGPU::MTBUFFormat::isValidUnifiedFormat
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1449
llvm::AMDGPU::Hwreg::encodeHwreg
uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width)
Definition: AMDGPUBaseInfo.cpp:1273
i
i
Definition: README.txt:29
llvm::AMDGPU::MUBUFInfo::elements
uint8_t elements
Definition: AMDGPUBaseInfo.cpp:250
llvm::AMDGPU::Hwreg::OPR_SIZE
const int OPR_SIZE
Definition: AMDGPUAsmUtils.cpp:134
llvm::AMDGPU::OPR_ID_UNSUPPORTED
const int OPR_ID_UNSUPPORTED
Definition: AMDGPUAsmUtils.h:24
llvm::AMDGPU::getMUBUFIsBufferInv
bool getMUBUFIsBufferInv(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:358
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:156
llvm::AMDGPU::getMCReg
unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
Definition: AMDGPUBaseInfo.cpp:1861
llvm::AMDGPU::isHsaAbiVersion3
bool isHsaAbiVersion3(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:130
llvm::AMDGPU::UfmtGFX10::UFMT_FIRST
@ UFMT_FIRST
Definition: SIDefines.h:628
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::AMDGPUTargetID
AMDGPUTargetID(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:406
llvm::AMDGPU::mc2PseudoReg
unsigned mc2PseudoReg(unsigned Reg)
Convert hardware register Reg to a pseudo register.
Definition: AMDGPUBaseInfo.cpp:1877
llvm::AMDGPU::VOPInfo::IsSingle
bool IsSingle
Definition: AMDGPUBaseInfo.cpp:273
Signed
@ Signed
Definition: NVPTXISelLowering.cpp:4637
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::AMDGPU::getMAIIsDGEMM
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
Definition: AMDGPUBaseInfo.cpp:387
llvm::AMDGPU::IsaInfo::getSGPRAllocGranule
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:676
llvm::AMDGPU::getMUBUFHasSoffset
bool getMUBUFHasSoffset(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:353
llvm::LLVMContext::emitError
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
Definition: LLVMContext.cpp:266
llvm::AMDGPU::MTBUFFormat::NumFormat
NumFormat
Definition: SIDefines.h:494
llvm::CallingConv::AMDGPU_HS
@ AMDGPU_HS
Calling convention used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:223
llvm::AMDGPU::Hwreg::getHwreg
StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1279
llvm::AMDGPU::MTBUFFormat::getDefaultFormatEncoding
unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1474
llvm::AMDGPU::hasVOPD
bool hasVOPD(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1786
llvm::AMDGPU::MIMGBaseOpcodeInfo::Gradients
bool Gradients
Definition: AMDGPUBaseInfo.h:309
llvm::AMDGPU::HSAMD::Kernel::CodeProps::Key::NumSGPRs
constexpr char NumSGPRs[]
Key for Kernel::CodeProps::Metadata::mNumSGPRs.
Definition: AMDGPUMetadata.h:258
llvm::AMDGPU::Hwreg::getHwregId
int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1256
llvm::AMDGPU::SendMsg::msgRequiresOp
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1564
llvm::CallingConv::AMDGPU_VS
@ AMDGPU_VS
Calling convention used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (ve...
Definition: CallingConv.h:204
llvm::AMDGPU::MIMGBaseOpcodeInfo::LodOrClampOrMip
bool LodOrClampOrMip
Definition: AMDGPUBaseInfo.h:312
llvm::AMDGPU::MTBUFFormat::getDfmtName
StringRef getDfmtName(unsigned Id)
Definition: AMDGPUBaseInfo.cpp:1381
llvm::AMDGPU::SendMsg::encodeMsg
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
Definition: AMDGPUBaseInfo.cpp:1589
llvm::AMDGPU::isGFX11Plus
bool isGFX11Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1738
llvm::AMDGPU::getIsaVersion
IsaVersion getIsaVersion(StringRef GPU)
Definition: TargetParser.cpp:193
llvm::AMDGPU::decodeLgkmcnt
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
Definition: AMDGPUBaseInfo.cpp:1026
llvm::AMDGPU::Hwreg::ID_SHIFT_
@ ID_SHIFT_
Definition: SIDefines.h:408
llvm::StringRef::endswith
LLVM_NODISCARD bool endswith(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:301
llvm::AMDGPU::SendMsg::OpGsSymbolic
const char *const OpGsSymbolic[OP_GS_LAST_]
Definition: AMDGPUAsmUtils.cpp:77
llvm::Function
Definition: Function.h:60
llvm::AMDGPU::getMUBUFBaseOpcode
int getMUBUFBaseOpcode(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:328
llvm::Attribute
Definition: Attributes.h:65
llvm::AMDGPU::SIModeRegisterDefaults::FP32OutputDenormals
bool FP32OutputDenormals
Definition: AMDGPUBaseInfo.h:1001
llvm::AMDGPU::getSMRDEncodedOffset
Optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer)
Definition: AMDGPUBaseInfo.cpp:2211
llvm::AMDGPU::MTBUFFormat::DFMT_MASK
@ DFMT_MASK
Definition: SIDefines.h:491
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::isSramEccOnOrAny
bool isSramEccOnOrAny() const
Definition: AMDGPUBaseInfo.h:150
llvm::AMDGPU::getMCOpcode
int getMCOpcode(uint16_t Opcode, unsigned Gen)
Definition: AMDGPUBaseInfo.cpp:400
llvm::AMDGPU::decodeVmcnt
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
Definition: AMDGPUBaseInfo.cpp:1013
llvm::raw_string_ostream
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:632
llvm::AMDGPU::hasSRAMECC
bool hasSRAMECC(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1673
llvm::AMDGPU::SIModeRegisterDefaults::IEEE
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
Definition: AMDGPUBaseInfo.h:992
llvm::AMDGPU::hasXNACK
bool hasXNACK(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1669
llvm::AMDGPU::IsaInfo::TargetIDSetting::Unsupported
@ Unsupported
llvm::AMDGPU::OPERAND_REG_IMM_V2FP16
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:159
High
uint64_t High
Definition: NVVMIntrRange.cpp:61
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::AMDGPU::IsaInfo::TargetIDSetting::On
@ On
llvm::AMDGPU::isGFX10_BEncoding
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1762
llvm::Triple::amdgcn
@ amdgcn
Definition: Triple.h:74
llvm::AMDGPU::CustomOperand::Name
StringLiteral Name
Definition: AMDGPUAsmUtils.h:29
llvm::AMDGPU::isGFX10_AEncoding
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1758
amd_kernel_code_t::compute_pgm_resource_registers
uint64_t compute_pgm_resource_registers
Shader program settings for CS.
Definition: AMDKernelCodeT.h:558
llvm::AMDGPU::getVOP2IsSingle
bool getVOP2IsSingle(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:373
llvm::AMDGPU::OPR_VAL_INVALID
const int OPR_VAL_INVALID
Definition: AMDGPUAsmUtils.h:26
llvm::AMDGPU::MIMGDimInfo
Definition: AMDGPUBaseInfo.h:324
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::AMDGPU::hasArchitectedFlatScratch
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1778
llvm::AMDGPU::MTBUFFormat::encodeDfmtNfmt
int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt)
Definition: AMDGPUBaseInfo.cpp:1419
llvm::AMDGPU::getHsaAbiVersion
Optional< uint8_t > getHsaAbiVersion(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:105
llvm::AMDGPU::MTBUFFormat::NFMT_UNDEF
@ NFMT_UNDEF
Definition: SIDefines.h:508
llvm::AMDGPUSubtarget::SEA_ISLANDS
@ SEA_ISLANDS
Definition: AMDGPUSubtarget.h:38
llvm::AMDGPU::Exp::ET_NULL
@ ET_NULL
Definition: SIDefines.h:859
llvm::AMDGPU::SendMsg::STREAM_ID_MASK_
@ STREAM_ID_MASK_
Definition: SIDefines.h:375
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
llvm::AMDGPU::SendMsg::OpSysSymbolic
const char *const OpSysSymbolic[OP_SYS_LAST_]
Definition: AMDGPUAsmUtils.cpp:69
llvm::AMDGPU::getSMRDEncodedLiteralOffset32
Optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
Definition: AMDGPUBaseInfo.cpp:2228
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::AMDGPU::getAmdhsaCodeObjectVersion
unsigned getAmdhsaCodeObjectVersion()
Definition: AMDGPUBaseInfo.cpp:153
llvm::AMDGPU::MTBUFInfo::has_vaddr
bool has_vaddr
Definition: AMDGPUBaseInfo.cpp:261
llvm::AMDGPU::SendMsg::OP_GS_LAST_
@ OP_GS_LAST_
Definition: SIDefines.h:357
llvm::AMDGPU::MTBUFFormat::isValidNfmt
bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1415
llvm::AMDGPU::DepCtr::encodeDepCtr
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1242
llvm::AMDGPU::isGFX11
bool isGFX11(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1734
Shift
bool Shift
Definition: README.txt:468
llvm::AMDGPU::Hwreg::ID_MASK_
@ ID_MASK_
Definition: SIDefines.h:410
llvm::AMDGPU::Exp::ET_POS0
@ ET_POS0
Definition: SIDefines.h:860
llvm::AMDGPU::CustomOperand::Encoding
int Encoding
Definition: AMDGPUAsmUtils.h:30
llvm::AMDGPU::MTBUFFormat::DFMT_NFMT_MAX
@ DFMT_NFMT_MAX
Definition: SIDefines.h:523
llvm::FloatToBits
uint32_t FloatToBits(float Float)
This function takes a float and returns the bit equivalent 32-bit integer.
Definition: MathExtras.h:690
llvm::AMDGPU::IsaInfo::getNumExtraSGPRs
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
Definition: AMDGPUBaseInfo.cpp:742
llvm::AMDGPU::isGFX10
bool isGFX10(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1726
llvm::AMDGPU::IsaInfo::getTotalNumVGPRs
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:806
LimitTo128VGPRs
static llvm::cl::opt< bool > LimitTo128VGPRs("amdgpu-limit-to-128-vgprs", llvm::cl::Hidden, llvm::cl::desc("Never use more than 128 VGPRs"))
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::setTargetIDFromTargetIDStream
void setTargetIDFromTargetIDStream(StringRef TargetID)
Definition: AMDGPUBaseInfo.cpp:483
llvm::AMDGPU::IsaInfo::getMinWavesPerEU
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:643
llvm::Optional< uint8_t >
llvm::AMDGPU::SIModeRegisterDefaults::getDefaultForCallingConv
static SIModeRegisterDefaults getDefaultForCallingConv(CallingConv::ID CC)
Definition: AMDGPUBaseInfo.h:1018
llvm::AMDGPU::MUBUFInfo::has_soffset
bool has_soffset
Definition: AMDGPUBaseInfo.cpp:253
llvm::MCRegisterClass::contains
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
Definition: MCRegisterInfo.h:68
llvm::AMDGPU::SIModeRegisterDefaults::FP32InputDenormals
bool FP32InputDenormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
Definition: AMDGPUBaseInfo.h:1000
llvm::AMDGPU::IsaInfo::TargetIDSetting::Any
@ Any
llvm::GCNSubtarget
Definition: GCNSubtarget.h:31
AMDGPUAsmUtils.h
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:893
llvm::AMDGPU::getVmcntBitMask
unsigned getVmcntBitMask(const IsaVersion &Version)
Definition: AMDGPUBaseInfo.cpp:987
llvm::AMDGPU::isGlobalSegment
bool isGlobalSegment(const GlobalValue *GV)
Definition: AMDGPUBaseInfo.cpp:933
llvm::SPIRV::Dim
Dim
Definition: SPIRVBaseInfo.h:279
llvm::AMDGPU::hasGFX10_3Insts
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1766
TargetParser.h
llvm::AMDGPU::IsaInfo::getMaxNumVGPRs
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
Definition: AMDGPUBaseInfo.cpp:840
llvm::AMDGPU::Exp::ET_PARAM0
@ ET_PARAM0
Definition: SIDefines.h:867
llvm::AMDGPU::getWaitcntBitMask
unsigned getWaitcntBitMask(const IsaVersion &Version)
Definition: AMDGPUBaseInfo.cpp:1001
llvm::AMDGPU::isIntrinsicSourceOfDivergence
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
Definition: AMDGPUBaseInfo.cpp:2339
llvm::amdhsa::kernel_descriptor_t::compute_pgm_rsrc2
uint32_t compute_pgm_rsrc2
Definition: AMDHSAKernelDescriptor.h:178
llvm::AMDGPU::SendMsg::MSG_SIZE
const int MSG_SIZE
Definition: AMDGPUAsmUtils.cpp:65
llvm::CallingConv::AMDGPU_Gfx
@ AMDGPU_Gfx
Calling convention used for AMD graphics targets.
Definition: CallingConv.h:250
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
llvm::AMDGPU::getMTBUFOpcode
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
Definition: AMDGPUBaseInfo.cpp:303
llvm::AMDGPU::MTBUFFormat::getNfmtLookupTable
static const StringLiteral * getNfmtLookupTable(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1386
AmdhsaCodeObjectVersion
static llvm::cl::opt< unsigned > AmdhsaCodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::desc("AMDHSA Code Object Version"), llvm::cl::init(4))
llvm::AMDGPU::MUBUFInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.cpp:248
llvm::AMDGPU::MIMGInfo::VAddrDwords
uint8_t VAddrDwords
Definition: AMDGPUBaseInfo.h:400
llvm::AMDGPU::IsaInfo::getMaxWorkGroupsPerCU
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
Definition: AMDGPUBaseInfo.cpp:631
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::AMDGPU::MTBUFFormat::isValidDfmtNfmt
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1408
llvm::AMDGPU::SendMsg::ID_MASK_PreGFX11_
@ ID_MASK_PreGFX11_
Definition: SIDefines.h:341
llvm::AMDGPU::DepCtr::decodeDepCtr
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1236
AMDHSAKernelDescriptor.h
llvm::AMDGPU::HSAMD::V3::VersionMajor
constexpr uint32_t VersionMajor
HSA metadata major version.
Definition: AMDGPUMetadata.h:459
llvm::AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG
@ FIXED_NUM_SGPRS_FOR_INIT_BUG
Definition: AMDGPUBaseInfo.h:94
llvm::MCRegisterClass
MCRegisterClass - Base class of TargetRegisterClass.
Definition: MCRegisterInfo.h:31
llvm::AMDGPUAS::CONSTANT_ADDRESS_32BIT
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
Definition: AMDGPU.h:366
llvm::AMDGPU::getDefaultCustomOperandEncoding
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1140
llvm::AMDGPU::IsaInfo::TargetIDSetting
TargetIDSetting
Definition: AMDGPUBaseInfo.h:98
llvm::parseDenormalFPAttribute
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
Definition: FloatingPointMode.h:176
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::AMDGPU::Hwreg::decodeHwreg
void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width)
Definition: AMDGPUBaseInfo.cpp:1284
llvm::AMDGPU::IsaVersion
Instruction set architecture version.
Definition: TargetParser.h:113
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::AMDGPU::IsaInfo::getSGPREncodingGranule
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:685
CommandLine.h
llvm::AMDGPU::MTBUFFormat::NfmtSymbolicVI
const StringLiteral NfmtSymbolicVI[]
Definition: AMDGPUAsmUtils.cpp:182
llvm::AMDGPU::isGFX90A
bool isGFX90A(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1770
llvm::AMDGPU::OPR_ID_DUPLICATE
const int OPR_ID_DUPLICATE
Definition: AMDGPUAsmUtils.h:25
llvm::AMDGPU::SendMsg::OP_GS_FIRST_
@ OP_GS_FIRST_
Definition: SIDefines.h:358
llvm::AMDGPU::OPERAND_REG_IMM_FP32
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:154
llvm::AMDGPU::SendMsg::msgSupportsStream
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1570
llvm::AMDGPU::MTBUFFormat::UfmtSymbolicGFX11
const StringLiteral UfmtSymbolicGFX11[]
Definition: AMDGPUAsmUtils.cpp:381
llvm::AMDGPU::MUBUFInfo::has_srsrc
bool has_srsrc
Definition: AMDGPUBaseInfo.cpp:252
llvm::StringLiteral
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
Definition: StringRef.h:914
llvm::AMDGPU::getMTBUFHasSrsrc
bool getMTBUFHasSrsrc(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:318
GlobalValue.h
ELF.h
llvm::DenormalMode::Input
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
Definition: FloatingPointMode.h:92
llvm::AMDGPU::isShader
bool isShader(CallingConv::ID cc)
Definition: AMDGPUBaseInfo.cpp:1616
llvm::PGSOQueryType::Test
@ Test
llvm::AMDGPU::IsaInfo::getWavesPerWorkGroup
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
Definition: AMDGPUBaseInfo.cpp:671
llvm::ARM::InvalidIdx
@ InvalidIdx
Definition: ARMRegisterBankInfo.cpp:69
llvm::AMDGPU::MTBUFInfo::has_soffset
bool has_soffset
Definition: AMDGPUBaseInfo.cpp:263
amd_kernel_code_t::amd_kernel_code_version_major
uint32_t amd_kernel_code_version_major
Definition: AMDKernelCodeT.h:527
llvm::AMDGPU::Exp::ET_INVALID
@ ET_INVALID
Definition: SIDefines.h:878
llvm::AMDGPU::Exp::ET_MRTZ_MAX_IDX
@ ET_MRTZ_MAX_IDX
Definition: SIDefines.h:871
GCNSubtarget.h
llvm::AMDGPU::hasSMEMByteOffset
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
Definition: AMDGPUBaseInfo.cpp:2176
f
Itanium Name Demangler i e convert the string _Z1fv into f()". You can also use the CRTP base ManglingParser to perform some simple analysis on the mangled name
llvm::AMDGPU::getRegOperandSize
unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, unsigned OpNo)
Get size of register operand.
Definition: AMDGPUBaseInfo.cpp:2041
llvm::AMDGPU::SendMsg::OP_SYS_LAST_
@ OP_SYS_LAST_
Definition: SIDefines.h:364
llvm::AMDGPU::getMIMGBaseOpcode
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:206
llvm::StringRef::split
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:753
llvm::AMDGPU::MIMGBaseOpcodeInfo
Definition: AMDGPUBaseInfo.h:300
llvm::AMDGPU::isInlinableIntLiteralV216
bool isInlinableIntLiteralV216(int32_t Literal)
Definition: AMDGPUBaseInfo.cpp:2125
Intr
unsigned Intr
Definition: AMDGPUBaseInfo.cpp:2327
llvm::AMDGPU::OPERAND_REG_INLINE_AC_FP16
@ OPERAND_REG_INLINE_AC_FP16
Definition: SIDefines.h:183
llvm::AMDGPU::MTBUFFormat::DfmtSymbolic
const StringLiteral DfmtSymbolic[]
Definition: AMDGPUAsmUtils.cpp:141
llvm::AMDGPU::getMUBUFOpcode
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
Definition: AMDGPUBaseInfo.cpp:333
llvm::MCSubtargetInfo::getTargetTriple
const Triple & getTargetTriple() const
Definition: MCSubtargetInfo.h:108
llvm::AMDGPU::getSMEMIsBuffer
bool getSMEMIsBuffer(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:363
llvm::AMDGPU::isGFX940
bool isGFX940(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1774
llvm::AMDGPU::IsaInfo::getMaxNumSGPRs
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
Definition: AMDGPUBaseInfo.cpp:725
llvm::AMDGPU::isSGPR
bool isSGPR(unsigned Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
Definition: AMDGPUBaseInfo.cpp:1797
llvm::AMDGPU::hasMAIInsts
bool hasMAIInsts(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1782
llvm::AMDGPU::VOPC64DPPInfo
Definition: AMDGPUBaseInfo.cpp:276
llvm::AMDGPU::IsaInfo::getTargetIDSettingFromFeatureString
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
Definition: AMDGPUBaseInfo.cpp:474
llvm::AMDGPU::SendMsg::STREAM_ID_LAST_
@ STREAM_ID_LAST_
Definition: SIDefines.h:371
llvm::SubtargetFeatures::getFeatures
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Definition: SubtargetFeature.h:196
llvm::DoubleToBits
uint64_t DoubleToBits(double Double)
This function takes a double and returns the bit equivalent 64-bit integer.
Definition: MathExtras.h:680
llvm::SubtargetFeatures
Manages the enabling and disabling of subtarget specific features.
Definition: SubtargetFeature.h:183
llvm::AMDGPU::SendMsg::ID_SYSMSG
@ ID_SYSMSG
Definition: SIDefines.h:332
llvm::AMDGPU::Hwreg::Id
Id
Definition: SIDefines.h:382
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::isXnackOnOrAny
bool isXnackOnOrAny() const
Definition: AMDGPUBaseInfo.h:121
llvm::AMDGPU::decodeExpcnt
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
Definition: AMDGPUBaseInfo.cpp:1021
llvm::AMDGPU::getMAIIsGFX940XDL
bool getMAIIsGFX940XDL(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:392
llvm::ELF::ELFABIVERSION_AMDGPU_HSA_V5
@ ELFABIVERSION_AMDGPU_HSA_V5
Definition: ELF.h:377
llvm::CallingConv::AMDGPU_ES
@ AMDGPU_ES
Calling convention used for AMDPAL shader stage before geometry shader if geometry is in use.
Definition: CallingConv.h:236
llvm::CallingConv::AMDGPU_GS
@ AMDGPU_GS
Calling convention used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:207
llvm::AMDGPU::MTBUFInfo::BaseOpcode
uint16_t BaseOpcode
Definition: AMDGPUBaseInfo.cpp:259
llvm::dwarf::Index
Index
Definition: Dwarf.h:472
llvm::alignDown
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
Definition: MathExtras.h:787
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
S_00B848_MEM_ORDERED
#define S_00B848_MEM_ORDERED(x)
Definition: SIDefines.h:1008
llvm::AMDGPU::hasGFX10A16
bool hasGFX10A16(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1681
MCSubtargetInfo.h
llvm::MCSubtargetInfo::getFeatureBits
const FeatureBitset & getFeatureBits() const
Definition: MCSubtargetInfo.h:112
llvm::AMDGPU::Exp::ET_PRIM
@ ET_PRIM
Definition: SIDefines.h:864
AMDGPU
Definition: AMDGPUReplaceLDSUseWithPointer.cpp:114
llvm::AMDGPUAS::LOCAL_ADDRESS
@ LOCAL_ADDRESS
Address space for local memory.
Definition: AMDGPU.h:363
llvm::AMDGPU::encodeCustomOperand
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1195
llvm::AMDGPU::SendMsg::OP_GS_NOP
@ OP_GS_NOP
Definition: SIDefines.h:353
llvm::AMDGPU::OPERAND_REG_IMM_FP64
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:155
llvm::AMDGPU::getMTBUFBaseOpcode
int getMTBUFBaseOpcode(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:298
llvm::AMDGPU::getInitialPSInputAddr
unsigned getInitialPSInputAddr(const Function &F)
Definition: AMDGPUBaseInfo.cpp:1601
llvm::AMDGPU::SendMsg::OP_SHIFT_
@ OP_SHIFT_
Definition: SIDefines.h:347
llvm::Triple::r600
@ r600
Definition: Triple.h:73
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:143
llvm::AMDGPU::Exp::ExpTgt::Name
StringLiteral Name
Definition: AMDGPUBaseInfo.cpp:1299
llvm::AMDGPU::IsaInfo::TRAP_NUM_SGPRS
@ TRAP_NUM_SGPRS
Definition: AMDGPUBaseInfo.h:95
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:54
llvm::AMDGPU::getMTBUFHasSoffset
bool getMTBUFHasSoffset(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:323
llvm::raw_ostream::flush
void flush()
Definition: raw_ostream.h:187
llvm::operator<<
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:230
llvm::AMDGPU::MTBUFFormat::DFMT_UNDEF
@ DFMT_UNDEF
Definition: SIDefines.h:487
llvm::cl::Option::getNumOccurrences
int getNumOccurrences() const
Definition: CommandLine.h:395
llvm::ThreadPriority::Low
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
llvm::AMDGPU::decodeWaitcnt
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
Definition: AMDGPUBaseInfo.cpp:1031
amd_kernel_code_t::wavefront_size
uint8_t wavefront_size
Wavefront size expressed as a power of two.
Definition: AMDKernelCodeT.h:643
llvm::AMDGPU::MTBUFInfo
Definition: AMDGPUBaseInfo.cpp:257
llvm::AMDGPU::OPERAND_REG_IMM_V2FP32
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:162
llvm::AMDGPU::IsaInfo::getMinFlatWorkGroupSize
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:662
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::getSramEccSetting
TargetIDSetting getSramEccSetting() const
Definition: AMDGPUBaseInfo.h:164
llvm::amdhsa::kernel_descriptor_t::kernel_code_properties
uint16_t kernel_code_properties
Definition: AMDHSAKernelDescriptor.h:179
llvm::IndexedInstrProf::Version
const uint64_t Version
Definition: InstrProf.h:1027
llvm::AMDGPU::getMUBUFHasSrsrc
bool getMUBUFHasSrsrc(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:348
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
llvm::AMDGPU::convertSMRDOffsetUnits
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
Definition: AMDGPUBaseInfo.cpp:2202
llvm::AMDGPU::IsaInfo::getEUsPerCU
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:620
llvm::AMDGPU::MIMGInfo::MIMGEncoding
uint8_t MIMGEncoding
Definition: AMDGPUBaseInfo.h:398
llvm::MCOperandInfo::RegClass
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:90
llvm::AMDGPU::MTBUFInfo::elements
uint8_t elements
Definition: AMDGPUBaseInfo.cpp:260
llvm::AMDGPU::IsaInfo::getLocalMemorySize
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:611
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::AMDGPU::getMIMGOpcode
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
Definition: AMDGPUBaseInfo.cpp:199
llvm::Triple::getArch
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:345
llvm::StringRef::str
LLVM_NODISCARD std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:249
llvm::AMDGPU::isCI
bool isCI(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1698
llvm::StringRef::getAsInteger
std::enable_if_t< std::numeric_limits< T >::is_signed, bool > getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:514
llvm::None
const NoneType None
Definition: None.h:24
llvm::AMDGPU::SendMsg::StreamId
StreamId
Definition: SIDefines.h:368
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::AMDGPU::VOPC64DPPInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.cpp:277
llvm::AMDGPU::isGFX10Plus
bool isGFX10Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1730
llvm::AMDGPU::isSymbolicCustomOperandEncoding
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1152
llvm::amdhsa::kernel_descriptor_t::compute_pgm_rsrc3
uint32_t compute_pgm_rsrc3
Definition: AMDHSAKernelDescriptor.h:176
llvm::AMDGPU::Exp::ET_DUAL_SRC_BLEND0
@ ET_DUAL_SRC_BLEND0
Definition: SIDefines.h:865
llvm::AMDGPU::isEntryFunctionCC
bool isEntryFunctionCC(CallingConv::ID CC)
Definition: AMDGPUBaseInfo.cpp:1639
amd_kernel_code_t::amd_machine_version_minor
uint16_t amd_machine_version_minor
Definition: AMDKernelCodeT.h:531
llvm::AMDGPU::MTBUFFormat::isValidFormatEncoding
bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1470
llvm::AMDGPU::isHsaAbiVersion2
bool isHsaAbiVersion2(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:124
llvm::AMDGPU::hasPackedD16
bool hasPackedD16(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1689
llvm::AMDGPU::MTBUFFormat::NFMT_MIN
@ NFMT_MIN
Definition: SIDefines.h:505
llvm::AMDGPU::SMInfo::IsBuffer
bool IsBuffer
Definition: AMDGPUBaseInfo.cpp:268
llvm::AMDGPU::Hwreg::OFFSET_MASK_
@ OFFSET_MASK_
Definition: SIDefines.h:417
llvm::AMDGPU::Hwreg::isValidHwregWidth
bool isValidHwregWidth(int64_t Width)
Definition: AMDGPUBaseInfo.cpp:1269
llvm::AMDGPU::DepCtr::getDefaultDepCtrEncoding
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1223
llvm::AMDGPU::shouldEmitConstantsToTextSection
bool shouldEmitConstantsToTextSection(const Triple &TT)
Definition: AMDGPUBaseInfo.cpp:943
llvm::AMDGPU::SendMsg::getMsgId
int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1492
llvm::AMDGPU::Hwreg::WIDTH_M1_SHIFT_
@ WIDTH_M1_SHIFT_
Definition: SIDefines.h:427
llvm::AMDGPU::DepCtr::DepCtrInfo
const CustomOperandVal DepCtrInfo[]
Definition: AMDGPUAsmUtils.cpp:18
llvm::Triple::AMDHSA
@ AMDHSA
Definition: Triple.h:207
llvm::AMDGPU::Hwreg::isValidHwreg
bool isValidHwreg(int64_t Id)
Definition: AMDGPUBaseInfo.cpp:1261
llvm::AMDGPU::UfmtGFX11::UFMT_FIRST
@ UFMT_FIRST
Definition: SIDefines.h:714
llvm::AMDGPU::isVI
bool isVI(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1702
llvm::AMDGPU::MUBUFInfo::BaseOpcode
uint16_t BaseOpcode
Definition: AMDGPUBaseInfo.cpp:249
llvm::AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED
@ OPERAND_REG_IMM_FP16_DEFERRED
Definition: SIDefines.h:157
llvm::AMDGPU::isInlinableLiteralV216
bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi)
Definition: AMDGPUBaseInfo.cpp:2110
llvm::AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET
@ HOSTCALL_PTR_OFFSET
Definition: SIDefines.h:896
llvm::cl::opt
Definition: CommandLine.h:1392
llvm::AMDGPU::Exp::ET_PARAM_MAX_IDX
@ ET_PARAM_MAX_IDX
Definition: SIDefines.h:876
llvm::AMDGPU::getRegBitWidth
unsigned getRegBitWidth(unsigned RCID)
Get the size in bits of a register from the register class RC.
Definition: AMDGPUBaseInfo.cpp:1932
llvm::AMDGPU::MTBUFFormat::NFMT_MAX
@ NFMT_MAX
Definition: SIDefines.h:506
llvm::AMDGPU::getExpcntBitMask
unsigned getExpcntBitMask(const IsaVersion &Version)
Definition: AMDGPUBaseInfo.cpp:993
llvm::MCInstrDesc::NumOperands
unsigned short NumOperands
Definition: MCInstrDesc.h:200
llvm::GlobalValue
Definition: GlobalValue.h:44
llvm::AMDGPU::getMultigridSyncArgImplicitArgPosition
unsigned getMultigridSyncArgImplicitArgPosition()
Definition: AMDGPUBaseInfo.cpp:157
llvm::divideCeil
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:776
llvm::AMDGPU::isHsaAbiVersion4
bool isHsaAbiVersion4(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:136
llvm::AMDGPU::IsaInfo::getMinNumSGPRs
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
Definition: AMDGPUBaseInfo.cpp:708
llvm::AMDGPU::isInlinableIntLiteral
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
Definition: AMDGPUBaseInfo.h:912
llvm::AMDGPU::hasG16
bool hasG16(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1685
AMDGPUMCTargetDesc.h
llvm::AMDGPU::Hwreg::Offset
Offset
Definition: SIDefines.h:413
llvm::isUInt< 16 >
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:408
llvm::StringRef::empty
constexpr LLVM_NODISCARD bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:153
llvm::AMDGPU::SMInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.cpp:267
uint64_t
llvm::AMDGPU::MTBUFFormat::NFMT_MASK
@ NFMT_MASK
Definition: SIDefines.h:512
llvm::Triple::getOS
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:354
llvm::AMDGPU::isVOPC64DPP
bool isVOPC64DPP(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:383
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::AMDGPU::OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:169
llvm::AMDGPU::isGFX9
bool isGFX9(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1706
llvm::AMDGPU::MTBUFFormat::DFMT_SHIFT
@ DFMT_SHIFT
Definition: SIDefines.h:490
llvm::AMDGPU::SendMsg::ID_MASK_GFX11Plus_
@ ID_MASK_GFX11Plus_
Definition: SIDefines.h:342
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::MCOperandInfo::OperandType
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:96
llvm::AMDGPU::initDefaultAMDKernelCodeT
void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header, const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:859
llvm::AMDGPU::getIntegerAttribute
int getIntegerAttribute(const Function &F, StringRef Name, int Default)
Definition: AMDGPUBaseInfo.cpp:947
llvm::AMDGPU::Exp::ET_DUAL_SRC_BLEND1
@ ET_DUAL_SRC_BLEND1
Definition: SIDefines.h:866
llvm::AMDGPU::VOPInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.cpp:272
llvm::AMDGPU::OPERAND_SRC_FIRST
@ OPERAND_SRC_FIRST
Definition: SIDefines.h:200
amd_kernel_code_t::call_convention
int32_t call_convention
Definition: AMDKernelCodeT.h:645
llvm::AMDGPUAS::CONSTANT_ADDRESS
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
Definition: AMDGPU.h:362
llvm::MCSubtargetInfo::getCPU
StringRef getCPU() const
Definition: MCSubtargetInfo.h:109
llvm::AMDGPU::MUBUFInfo::IsBufferInv
bool IsBufferInv
Definition: AMDGPUBaseInfo.cpp:254
llvm::AMDGPU::OPR_ID_UNKNOWN
const int OPR_ID_UNKNOWN
Definition: AMDGPUAsmUtils.h:23
llvm::DenormalMode
Represent subnormal handling kind for floating point instruction inputs and outputs.
Definition: FloatingPointMode.h:69
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::AMDGPU::getGcnBufferFormatInfo
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:2343
llvm::AMDGPU::SendMsg::OP_UNKNOWN_
@ OP_UNKNOWN_
Definition: SIDefines.h:346
llvm::AMDGPU::IsaInfo::getWavefrontSize
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:602
llvm::ELF::ELFABIVERSION_AMDGPU_HSA_V2
@ ELFABIVERSION_AMDGPU_HSA_V2
Definition: ELF.h:374
llvm::isUInt< 32 >
constexpr bool isUInt< 32 >(uint64_t x)
Definition: MathExtras.h:411
llvm::AMDGPU::isSISrcInlinableOperand
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
Definition: AMDGPUBaseInfo.cpp:1923
llvm::AMDGPU::Hwreg::isValidHwregOffset
bool isValidHwregOffset(int64_t Offset)
Definition: AMDGPUBaseInfo.cpp:1265
llvm::AMDGPU::SIModeRegisterDefaults::DX10Clamp
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
Definition: AMDGPUBaseInfo.h:996
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::AMDGPU::encodeVmcnt
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
Definition: AMDGPUBaseInfo.cpp:1046
llvm::X86AS::FS
@ FS
Definition: X86.h:192
llvm::isUInt< 8 >
constexpr bool isUInt< 8 >(uint64_t x)
Definition: MathExtras.h:405
llvm::AMDGPU::getAddrSizeMIMGOp
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
Definition: AMDGPUBaseInfo.cpp:219
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::getXnackSetting
TargetIDSetting getXnackSetting() const
Definition: AMDGPUBaseInfo.h:135
llvm::AMDGPU::SIModeRegisterDefaults::FP64FP16InputDenormals
bool FP64FP16InputDenormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
Definition: AMDGPUBaseInfo.h:1005
amd_kernel_code_t::amd_machine_version_stepping
uint16_t amd_machine_version_stepping
Definition: AMDKernelCodeT.h:532
llvm::AMDGPU::isGFX9_GFX10
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1710
amd_kernel_code_t::group_segment_alignment
uint8_t group_segment_alignment
Definition: AMDKernelCodeT.h:635
llvm::MCInstrDesc::OpInfo
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:208
function
print Print MemDeps of function
Definition: MemDepPrinter.cpp:82
llvm::AMDGPU::Waitcnt::LgkmCnt
unsigned LgkmCnt
Definition: AMDGPUBaseInfo.h:523
llvm::AMDGPU::Exp::isSupportedTgtId
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1348
llvm::AMDGPU::IsaInfo::getTotalNumSGPRs
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:689
llvm::AMDGPU::getMIMGInfo
const LLVM_READONLY MIMGInfo * getMIMGInfo(unsigned Opc)
llvm::AMDGPU::isGFX10Before1030
bool isGFX10Before1030(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1750
llvm::AMDGPU::VOPInfo
Definition: AMDGPUBaseInfo.cpp:271
llvm::AMDGPU::OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:171
llvm::AMDGPU::Exp::ET_MRT0
@ ET_MRT0
Definition: SIDefines.h:856
llvm::AMDGPU::IsaInfo::getAddressableNumSGPRs
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:696
llvm::AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED
@ OPERAND_REG_IMM_FP32_DEFERRED
Definition: SIDefines.h:158
llvm::DenormalMode::IEEE
@ IEEE
IEEE-754 denormal numbers preserved.
Definition: FloatingPointMode.h:76
llvm::AMDGPU::MTBUFFormat::convertDfmtNfmt2Ufmt
int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1453
llvm::AMDGPU::IsaInfo::getNumSGPRBlocks
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
Definition: AMDGPUBaseInfo.cpp:773
amd_kernel_code_t::kernarg_segment_alignment
uint8_t kernarg_segment_alignment
The maximum byte alignment of variables used by the kernel in the specified memory segment.
Definition: AMDKernelCodeT.h:634
amd_kernel_code_t::amd_kernel_code_version_minor
uint32_t amd_kernel_code_version_minor
Definition: AMDKernelCodeT.h:528
llvm::AMDGPU::Exp::ExpTgtInfo
static constexpr ExpTgt ExpTgtInfo[]
Definition: AMDGPUBaseInfo.cpp:1304
llvm::AMDGPU::OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:184
llvm::AMDGPU::IsaInfo::TargetIDSetting::Off
@ Off
llvm::AMDGPU::MTBUFFormat::DFMT_MIN
@ DFMT_MIN
Definition: SIDefines.h:484
llvm::AMDGPU::MIMGInfo::BaseOpcode
uint16_t BaseOpcode
Definition: AMDGPUBaseInfo.h:397
llvm::AMDGPU::CustomOperand
Definition: AMDGPUAsmUtils.h:28
llvm::AMDGPU::isInlinableLiteral16
bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi)
Definition: AMDGPUBaseInfo.cpp:2091
llvm::AMDGPU::getMIMGBaseOpcodeInfo
const LLVM_READONLY MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
llvm::AMDGPU::CPol::SCC
@ SCC
Definition: SIDefines.h:304
llvm::AMDGPU::MTBUFFormat::NfmtSymbolicGFX10
const StringLiteral NfmtSymbolicGFX10[]
Definition: AMDGPUAsmUtils.cpp:160
llvm::AMDGPU::isDwordAligned
static bool isDwordAligned(uint64_t ByteOffset)
Definition: AMDGPUBaseInfo.cpp:2198
llvm::AMDGPU::Exp::ExpTgt::Tgt
unsigned Tgt
Definition: AMDGPUBaseInfo.cpp:1300
llvm::AMDGPU::isHsaAbiVersion5
bool isHsaAbiVersion5(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:142
llvm::AMDGPU::DepCtr::DEP_CTR_SIZE
const int DEP_CTR_SIZE
Definition: AMDGPUAsmUtils.cpp:30
llvm::AMDGPU::hasSMRDSignedImmOffset
static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
Definition: AMDGPUBaseInfo.cpp:2180
llvm::AMDGPU::Exp::ET_MRTZ
@ ET_MRTZ
Definition: SIDefines.h:858
llvm::AMDGPU::MTBUFFormat::UFMT_UNDEF
@ UFMT_UNDEF
Definition: SIDefines.h:528
llvm::AMDGPU::Exp::ExpTgt::MaxIndex
unsigned MaxIndex
Definition: AMDGPUBaseInfo.cpp:1301
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::AMDGPU::isNotGFX10Plus
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1746
llvm::AMDGPU::SendMsg::isValidMsgId
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1497
llvm::CallingConv::AMDGPU_PS
@ AMDGPU_PS
Calling convention used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:210
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:137
llvm::AMDGPU::DepCtr::isSymbolicDepCtrEncoding
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1230
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
llvm::AMDGPUAS::GLOBAL_ADDRESS
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
Definition: AMDGPU.h:359
AMDGPU.h
MAP_REG2REG
#define MAP_REG2REG
Definition: AMDGPUBaseInfo.cpp:1804
llvm::AMDGPU::getVOP3IsSingle
bool getVOP3IsSingle(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:378
llvm::AMDGPU::Hwreg::OFFSET_SHIFT_
@ OFFSET_SHIFT_
Definition: SIDefines.h:415
llvm::AMDGPU::isModuleEntryFunctionCC
bool isModuleEntryFunctionCC(CallingConv::ID CC)
Definition: AMDGPUBaseInfo.cpp:1656
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::AMDGPU::isCompute
bool isCompute(CallingConv::ID cc)
Definition: AMDGPUBaseInfo.cpp:1635
llvm::AMDGPU::IsaInfo::getVGPREncodingGranule
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, Optional< bool > EnableWavefrontSize32)
Definition: AMDGPUBaseInfo.cpp:794
llvm::MCRegisterClass::getID
unsigned getID() const
getID() - Return the register class ID number.
Definition: MCRegisterInfo.h:48
uint32_t
llvm::AMDGPU::IsaInfo::getNumVGPRBlocks
unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, Optional< bool > EnableWavefrontSize32)
Definition: AMDGPUBaseInfo.cpp:849
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::AMDGPU::MIMGBaseOpcodeInfo::Coordinates
bool Coordinates
Definition: AMDGPUBaseInfo.h:311
llvm::AMDGPU::isLegalSMRDEncodedUnsignedOffset
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
Definition: AMDGPUBaseInfo.cpp:2184
llvm::AMDGPU::isSISrcOperand
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Can this operand also contain immediate values?
Definition: AMDGPUBaseInfo.cpp:1887
llvm::AMDGPU::isGraphics
bool isGraphics(CallingConv::ID cc)
Definition: AMDGPUBaseInfo.cpp:1631
amd_kernel_code_t
AMD Kernel Code Object (amd_kernel_code_t).
Definition: AMDKernelCodeT.h:526
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::isXnackSupported
bool isXnackSupported() const
Definition: AMDGPUBaseInfo.h:116
llvm::AMDGPU::IsaInfo::getMaxFlatWorkGroupSize
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:666
llvm::AMDGPU::Exp::getTgtName
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
Definition: AMDGPUBaseInfo.cpp:1314
llvm::AMDGPU::Waitcnt::VmCnt
unsigned VmCnt
Definition: AMDGPUBaseInfo.h:521
llvm::AMDGPU::getMTBUFHasVAddr
bool getMTBUFHasVAddr(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:313
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
AMDHSA_BITS_SET
#define AMDHSA_BITS_SET(DST, MSK, VAL)
Definition: AMDHSAKernelDescriptor.h:42
llvm::amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE
@ FLOAT_DENORM_MODE_FLUSH_NONE
Definition: AMDHSAKernelDescriptor.h:63
amd_kernel_code_t::kernel_code_entry_byte_offset
int64_t kernel_code_entry_byte_offset
Byte offset (possibly negative) from start of amd_kernel_code_t object to kernel's entry point instru...
Definition: AMDKernelCodeT.h:544
llvm::AMDGPU::MTBUFFormat::UfmtSymbolicGFX10
const StringLiteral UfmtSymbolicGFX10[]
Definition: AMDGPUAsmUtils.cpp:193
llvm::AMDGPU::GcnBufferFormatInfo
Definition: AMDGPUBaseInfo.h:66
llvm::AMDGPU::isGFX8_GFX9_GFX10
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1714
llvm::AMDGPU::MTBUFFormat::getUnifiedFormat
int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1428
llvm::AMDGPU::isGFX9Plus
bool isGFX9Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1722
llvm::AMDGPU::getDefaultAmdhsaKernelDescriptor
amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:895
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::AMDGPU::MTBUFFormat::UFMT_DEFAULT
@ UFMT_DEFAULT
Definition: SIDefines.h:529
llvm::AMDGPU::SendMsg::Msg
const CustomOperand< const MCSubtargetInfo & > Msg[]
Definition: AMDGPUAsmUtils.cpp:39
amd_kernel_code_t::amd_machine_kind
uint16_t amd_machine_kind
Definition: AMDKernelCodeT.h:529
llvm::AMDGPU::SIModeRegisterDefaults::FP64FP16OutputDenormals
bool FP64FP16OutputDenormals
Definition: AMDGPUBaseInfo.h:1006
llvm::AMDGPU::isGroupSegment
bool isGroupSegment(const GlobalValue *GV)
Definition: AMDGPUBaseInfo.cpp:929
llvm::AMDGPU::SendMsg::STREAM_ID_FIRST_
@ STREAM_ID_FIRST_
Definition: SIDefines.h:372
llvm::AMDGPU::SMInfo
Definition: AMDGPUBaseInfo.cpp:266
llvm::CallingConv::AMDGPU_KERNEL
@ AMDGPU_KERNEL
Calling convention for AMDGPU code object kernels.
Definition: CallingConv.h:216
llvm::AMDGPU::encodeWaitcnt
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
Definition: AMDGPUBaseInfo.cpp:1067
llvm::AMDGPU::SendMsg::isValidMsgOp
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
Definition: AMDGPUBaseInfo.cpp:1518
llvm::AMDGPU::SendMsg::OP_SYS_FIRST_
@ OP_SYS_FIRST_
Definition: SIDefines.h:365
Attributes.h
llvm::isInt< 16 >
constexpr bool isInt< 16 >(int64_t x)
Definition: MathExtras.h:370
llvm::StringRef::size
constexpr LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:157
llvm::AMDGPU::isSI
bool isSI(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1694
llvm::ELF::ELFABIVERSION_AMDGPU_HSA_V4
@ ELFABIVERSION_AMDGPU_HSA_V4
Definition: ELF.h:376
llvm::AMDGPU::OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:185
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::toString
std::string toString() const
Definition: AMDGPUBaseInfo.cpp:495
amd_kernel_code_t::amd_machine_version_major
uint16_t amd_machine_version_major
Definition: AMDKernelCodeT.h:530
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:83
llvm::Any
Definition: Any.h:28
llvm::AMDGPU::OPERAND_SRC_LAST
@ OPERAND_SRC_LAST
Definition: SIDefines.h:201
llvm::AMDGPU::isArgPassedInSGPR
bool isArgPassedInSGPR(const Argument *A)
Definition: AMDGPUBaseInfo.cpp:2149
llvm::AMDGPU::UfmtGFX11::UFMT_LAST
@ UFMT_LAST
Definition: SIDefines.h:715
llvm::AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET
@ MULTIGRID_SYNC_ARG_OFFSET
Definition: SIDefines.h:897
llvm::AMDGPU::Waitcnt
Represents the counter values to wait for in an s_waitcnt instruction.
Definition: AMDGPUBaseInfo.h:520
llvm::AMDGPU::Exp::ET_DUAL_SRC_BLEND_MAX_IDX
@ ET_DUAL_SRC_BLEND_MAX_IDX
Definition: SIDefines.h:875
llvm::AMDGPU::Hwreg::Opr
const CustomOperand< const MCSubtargetInfo & > Opr[]
Definition: AMDGPUAsmUtils.cpp:90
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:50
llvm::AMDGPU::SendMsg::decodeMsg
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1577
amd_kernel_code_t::private_segment_alignment
uint8_t private_segment_alignment
Definition: AMDKernelCodeT.h:636
llvm::AMDGPU::SIModeRegisterDefaults::SIModeRegisterDefaults
SIModeRegisterDefaults()
Definition: AMDGPUBaseInfo.h:1008
llvm::AMDGPU::decodeCustomOperand
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1171
llvm::AMDGPU::getMUBUFHasVAddr
bool getMUBUFHasVAddr(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:343
llvm::AMDGPU::isInlinableLiteral64
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
Definition: AMDGPUBaseInfo.cpp:2048
llvm::AMDGPU::OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:160
uint16_t
llvm::AMDGPU::getMUBUFElements
int getMUBUFElements(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:338
llvm::AMDGPU::encodeCustomOperandVal
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
Definition: AMDGPUBaseInfo.cpp:1188
llvm::AMDGPU::MTBUFFormat::DFMT_NFMT_DEFAULT
@ DFMT_NFMT_DEFAULT
Definition: SIDefines.h:517
llvm::AMDGPU::SendMsg::getMsgOpId
int64_t getMsgOpId(int64_t MsgId, const StringRef Name)
Definition: AMDGPUBaseInfo.cpp:1506
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:345
llvm::AMDGPU::Exp::ET_POS4
@ ET_POS4
Definition: SIDefines.h:862
llvm::FPOpFusion::Strict
@ Strict
Definition: TargetOptions.h:39
amd_kernel_code_t::code_properties
uint32_t code_properties
Code properties.
Definition: AMDKernelCodeT.h:562
llvm::AMDGPU::isFoldableLiteralV216
bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi)
Definition: AMDGPUBaseInfo.cpp:2136
llvm::AMDGPU::OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:172
llvm::AMDGPU::CustomOperand::Cond
bool(* Cond)(T Context)
Definition: AMDGPUAsmUtils.h:31
llvm::AMDGPU::MIMGInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.h:396
Function.h
llvm::AMDGPU::MUBUFInfo::has_vaddr
bool has_vaddr
Definition: AMDGPUBaseInfo.cpp:251
llvm::AMDGPU::OPERAND_REG_INLINE_C_FIRST
@ OPERAND_REG_INLINE_C_FIRST
Definition: SIDefines.h:194
llvm::amdhsa::kernel_descriptor_t
Definition: AMDHSAKernelDescriptor.h:169
llvm::AMDGPU::getVOP1IsSingle
bool getVOP1IsSingle(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:368
llvm::AMDGPU::splitMUBUFOffset
bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, const GCNSubtarget *Subtarget, Align Alignment)
Definition: AMDGPUBaseInfo.cpp:2252
llvm::AMDGPU::SendMsg::STREAM_ID_NONE_
@ STREAM_ID_NONE_
Definition: SIDefines.h:369
llvm::AMDGPU::MIMGInfo
Definition: AMDGPUBaseInfo.h:395
llvm::AMDGPU::MTBUFFormat::decodeDfmtNfmt
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
Definition: AMDGPUBaseInfo.cpp:1423
llvm::AMDGPU::Exp::getTgtId
unsigned getTgtId(const StringRef Name)
Definition: AMDGPUBaseInfo.cpp:1325
llvm::AMDGPU::MTBUFFormat::NfmtSymbolicSICI
const StringLiteral NfmtSymbolicSICI[]
Definition: AMDGPUAsmUtils.cpp:171
llvm::AMDGPU::MTBUFFormat::NFMT_SHIFT
@ NFMT_SHIFT
Definition: SIDefines.h:511
llvm::AMDGPU::getTotalNumVGPRs
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
Definition: AMDGPUBaseInfo.cpp:1790
llvm::AMDGPU::OPERAND_REG_INLINE_C_V2FP32
@ OPERAND_REG_INLINE_C_V2FP32
Definition: SIDefines.h:174
llvm::AMDGPU::IsaInfo::getAddressableNumVGPRs
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:814
llvm::AMDGPU::SendMsg::isValidMsgStream
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
Definition: AMDGPUBaseInfo.cpp:1544
llvm::AMDGPU::MTBUFFormat::getNfmtName
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1403
AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
Definition: AMDKernelCodeT.h:127
llvm::amdhsa::kernel_descriptor_t::compute_pgm_rsrc1
uint32_t compute_pgm_rsrc1
Definition: AMDHSAKernelDescriptor.h:177
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:436
llvm::AMDGPU::SendMsg::STREAM_ID_SHIFT_
@ STREAM_ID_SHIFT_
Definition: SIDefines.h:373
llvm::AMDGPU::getLgkmcntBitMask
unsigned getLgkmcntBitMask(const IsaVersion &Version)
Definition: AMDGPUBaseInfo.cpp:997
llvm::GlobalValue::getAddressSpace
unsigned getAddressSpace() const
Definition: Globals.cpp:121
llvm::GCNSubtarget::getGeneration
Generation getGeneration() const
Definition: GCNSubtarget.h:262
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
llvm::AMDGPU::Hwreg::WIDTH_M1_MASK_
@ WIDTH_M1_MASK_
Definition: SIDefines.h:429
llvm::AMDGPU::MIMGBaseOpcodeInfo::NumExtraArgs
uint8_t NumExtraArgs
Definition: AMDGPUBaseInfo.h:308
llvm::AMDGPU::isInlinableLiteral32
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
Definition: AMDGPUBaseInfo.cpp:2065
llvm::AMDGPU::isSISrcFPOperand
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
Definition: AMDGPUBaseInfo.cpp:1894
llvm::AMDGPU::isGFX8Plus
bool isGFX8Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1718
llvm::FeatureBitset::test
constexpr bool test(unsigned I) const
Definition: SubtargetFeature.h:90
llvm::AMDGPU::isNotGFX11Plus
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1742
llvm::AMDGPU::encodeExpcnt
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
Definition: AMDGPUBaseInfo.cpp:1055
llvm::AMDGPU::Exp::ET_POS_MAX_IDX
@ ET_POS_MAX_IDX
Definition: SIDefines.h:874
llvm::AMDGPU::isGCN3Encoding
bool isGCN3Encoding(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1754
llvm::AMDGPU::MTBUFFormat::getNfmt
int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1394
llvm::AMDGPU::Exp::ExpTgt
Definition: AMDGPUBaseInfo.cpp:1298
llvm::AMDGPU::isValidOpr
static bool isValidOpr(int Idx, const CustomOperand< T > OpInfo[], int OpInfoSize, T Context)
Definition: AMDGPUBaseInfo.cpp:1092
llvm::AMDGPU::SendMsg::getMsgOpName
StringRef getMsgOpName(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1538
llvm::AMDGPU::Exp::ET_PRIM_MAX_IDX
@ ET_PRIM_MAX_IDX
Definition: SIDefines.h:872
llvm::AMDGPU::getNumFlatOffsetBits
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST, bool Signed)
For FLAT segment the offset must be positive; MSB is ignored and forced to zero.
Definition: AMDGPUBaseInfo.cpp:2237
llvm::AMDGPU::SendMsg::ID_GS_DONE_PreGFX11
@ ID_GS_DONE_PreGFX11
Definition: SIDefines.h:319
N
#define N
AMDKernelCodeT.h
llvm::CallingConv::AMDGPU_LS
@ AMDGPU_LS
Calling convention used for AMDPAL vertex shader if tessellation is in use.
Definition: CallingConv.h:231
llvm::AMDGPU::IsaInfo::getWavesPerEUForWorkGroup
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
Definition: AMDGPUBaseInfo.cpp:656
llvm::AMDGPU::IsaInfo::getMaxWavesPerEU
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:647
llvm::AMDGPU::isHsaAbiVersion3AndAbove
bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:148
llvm::AMDGPU::MTBUFFormat::DfmtNfmt2UFmtGFX10
const unsigned DfmtNfmt2UFmtGFX10[]
Definition: AMDGPUAsmUtils.cpp:287
llvm::AMDGPU::IsaInfo::getMinNumVGPRs
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
Definition: AMDGPUBaseInfo.cpp:829
llvm::AMDGPU::MTBUFFormat::UFMT_MAX
@ UFMT_MAX
Definition: SIDefines.h:527
llvm::AMDGPU::hasMIMG_R128
bool hasMIMG_R128(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1677
llvm::AMDGPU::MIMGBaseOpcodeInfo::G16
bool G16
Definition: AMDGPUBaseInfo.h:310
llvm::CallingConv::SPIR_KERNEL
@ SPIR_KERNEL
SPIR_KERNEL - Calling convention for SPIR kernel functions.
Definition: CallingConv.h:152
llvm::AMDGPU::Exp::ET_NULL_MAX_IDX
@ ET_NULL_MAX_IDX
Definition: SIDefines.h:870
llvm::DenormalMode::Output
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
Definition: FloatingPointMode.h:87
llvm::AMDGPU::OPERAND_REG_INLINE_C_FP16
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:168
llvm::CallingConv::AMDGPU_CS
@ AMDGPU_CS
Calling convention used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:213
llvm::AMDGPU::HSAMD::Kernel::CodeProps::Key::NumVGPRs
constexpr char NumVGPRs[]
Key for Kernel::CodeProps::Metadata::mNumVGPRs.
Definition: AMDGPUMetadata.h:260
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::setTargetIDFromFeaturesString
void setTargetIDFromFeaturesString(StringRef FS)
Definition: AMDGPUBaseInfo.cpp:415
llvm::AMDGPU::isReadOnlySegment
bool isReadOnlySegment(const GlobalValue *GV)
Definition: AMDGPUBaseInfo.cpp:937
llvm::AMDGPU::SendMsg::OP_NONE_
@ OP_NONE_
Definition: SIDefines.h:348
llvm::AMDGPU::isLegalSMRDEncodedSignedOffset
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
Definition: AMDGPUBaseInfo.cpp:2190
llvm::AMDGPU::OPERAND_REG_IMM_FP16
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:156
llvm::AMDGPU::Exp::ET_PARAM31
@ ET_PARAM31
Definition: SIDefines.h:868
S_00B848_WGP_MODE
#define S_00B848_WGP_MODE(x)
Definition: SIDefines.h:1005
LLVMContext.h
llvm::AMDGPU::encodeLgkmcnt
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
Definition: AMDGPUBaseInfo.cpp:1061
llvm::AMDGPU::MAIInstInfo
Definition: AMDGPUBaseInfo.h:74
llvm::cl::desc
Definition: CommandLine.h:405
llvm::AMDGPU::OPERAND_REG_INLINE_AC_V2INT16
@ OPERAND_REG_INLINE_AC_V2INT16
Definition: SIDefines.h:186
llvm::AMDGPU::MTBUFFormat::getDfmt
int64_t getDfmt(const StringRef Name)
Definition: AMDGPUBaseInfo.cpp:1373
llvm::AMDGPU::MTBUFFormat::DfmtNfmt2UFmtGFX11
const unsigned DfmtNfmt2UFmtGFX11[]
Definition: AMDGPUAsmUtils.cpp:461
llvm::AMDGPU::VGPRIndexMode::Id
Id
Definition: SIDefines.h:238
llvm::AMDGPU::CustomOperandVal
Definition: AMDGPUAsmUtils.h:34
llvm::AMDGPU::Waitcnt::ExpCnt
unsigned ExpCnt
Definition: AMDGPUBaseInfo.h:522
llvm::AMDGPU::getIntegerPairAttribute
std::pair< int, int > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< int, int > Default, bool OnlyFirstRequired)
Definition: AMDGPUBaseInfo.cpp:962
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::isSramEccSupported
bool isSramEccSupported() const
Definition: AMDGPUBaseInfo.h:145
llvm::AMDGPU::SendMsg::ID_GS_PreGFX11
@ ID_GS_PreGFX11
Definition: SIDefines.h:318
llvm::AMDGPU::MTBUFInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.cpp:258
llvm::AMDGPU::isKernelCC
bool isKernelCC(const Function *Func)
Definition: AMDGPUBaseInfo.cpp:1665
llvm::AMDGPU::MTBUFInfo::has_srsrc
bool has_srsrc
Definition: AMDGPUBaseInfo.cpp:262
llvm::AMDGPU::getHostcallImplicitArgPosition
unsigned getHostcallImplicitArgPosition()
Definition: AMDGPUBaseInfo.cpp:174
llvm::AMDGPU::SendMsg::getMsgIdMask
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1488
llvm::AMDGPU::SendMsg::OP_MASK_
@ OP_MASK_
Definition: SIDefines.h:351
llvm::AMDGPU::MTBUFFormat::getUnifiedFormatName
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1443
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:76
llvm::AMDGPU::MUBUFInfo
Definition: AMDGPUBaseInfo.cpp:247
llvm::AMDGPU::Exp::ET_MRT_MAX_IDX
@ ET_MRT_MAX_IDX
Definition: SIDefines.h:873
llvm::AMDGPU::getHasColorExport
bool getHasColorExport(const Function &F)
Definition: AMDGPUBaseInfo.cpp:1605
llvm::AMDGPU::getHasDepthExport
bool getHasDepthExport(const Function &F)
Definition: AMDGPUBaseInfo.cpp:1612
llvm::ELF::ELFABIVERSION_AMDGPU_HSA_V3
@ ELFABIVERSION_AMDGPU_HSA_V3
Definition: ELF.h:375
llvm::AMDGPU::IsaInfo::getVGPRAllocGranule
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, Optional< bool > EnableWavefrontSize32)
Definition: AMDGPUBaseInfo.cpp:779
llvm::AMDGPU::SendMsg::getMsgName
StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1501
llvm::AMDGPU::getMaskedMIMGOp
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
Definition: AMDGPUBaseInfo.cpp:211
llvm::AMDGPU::getMTBUFElements
int getMTBUFElements(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:308
llvm::AMDGPU::OPERAND_REG_INLINE_C_LAST
@ OPERAND_REG_INLINE_C_LAST
Definition: SIDefines.h:195
llvm::AMDGPU::UfmtGFX10::UFMT_LAST
@ UFMT_LAST
Definition: SIDefines.h:629
AMDGPUBaseInfo.h
llvm::AMDGPU::OPERAND_REG_INLINE_AC_V2FP16
@ OPERAND_REG_INLINE_AC_V2FP16
Definition: SIDefines.h:187
llvm::AMDGPU::getOprIdx
static int getOprIdx(std::function< bool(const CustomOperand< T > &)> Test, const CustomOperand< T > OpInfo[], int OpInfoSize, T Context)
Definition: AMDGPUBaseInfo.cpp:1099
llvm::AMDGPU::MTBUFFormat::DFMT_MAX
@ DFMT_MAX
Definition: SIDefines.h:485