LLVM 19.0.0git
AMDGPUTargetStreamer.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetStreamer.cpp - Mips Target Streamer Methods -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides AMDGPU specific target streamer methods.
10//
11//===----------------------------------------------------------------------===//
12
15#include "AMDGPUPTNote.h"
16#include "AMDKernelCodeT.h"
21#include "llvm/MC/MCAssembler.h"
22#include "llvm/MC/MCContext.h"
33
34using namespace llvm;
35using namespace llvm::AMDGPU;
36
37//===----------------------------------------------------------------------===//
38// AMDGPUTargetStreamer
39//===----------------------------------------------------------------------===//
40
42 ForceGenericVersion("amdgpu-force-generic-version",
43 cl::desc("Force a specific generic_v<N> flag to be "
44 "added. For testing purposes only."),
46
48 msgpack::Document HSAMetadataDoc;
49 if (!HSAMetadataDoc.fromYAML(HSAMetadataString))
50 return false;
51 return EmitHSAMetadata(HSAMetadataDoc, false);
52}
53
56
57 // clang-format off
58 switch (ElfMach) {
59 case ELF::EF_AMDGPU_MACH_R600_R600: AK = GK_R600; break;
60 case ELF::EF_AMDGPU_MACH_R600_R630: AK = GK_R630; break;
70 case ELF::EF_AMDGPU_MACH_R600_SUMO: AK = GK_SUMO; break;
123 case ELF::EF_AMDGPU_MACH_NONE: AK = GK_NONE; break;
124 default: AK = GK_NONE; break;
125 }
126 // clang-format on
127
128 StringRef GPUName = getArchNameAMDGCN(AK);
129 if (GPUName != "")
130 return GPUName;
131 return getArchNameR600(AK);
132}
133
136 if (AK == AMDGPU::GPUKind::GK_NONE)
137 AK = parseArchR600(GPU);
138
139 // clang-format off
140 switch (AK) {
206 }
207 // clang-format on
208
209 llvm_unreachable("unknown GPU");
210}
211
212//===----------------------------------------------------------------------===//
213// AMDGPUTargetAsmStreamer
214//===----------------------------------------------------------------------===//
215
218 : AMDGPUTargetStreamer(S), OS(OS) { }
219
220// A hook for emitting stuff at the end.
221// We use it for emitting the accumulated PAL metadata as directives.
222// The PAL metadata is reset after it is emitted.
224 std::string S;
226 OS << S;
227
228 // Reset the pal metadata so its data will not affect a compilation that
229 // reuses this object.
231}
232
234 OS << "\t.amdgcn_target \"" << getTargetID()->toString() << "\"\n";
235}
236
238 unsigned COV) {
240 OS << "\t.amdhsa_code_object_version " << COV << '\n';
241}
242
243void
245 OS << "\t.amd_kernel_code_t\n";
246 dumpAmdKernelCode(&Header, OS, "\t\t");
247 OS << "\t.end_amd_kernel_code_t\n";
248}
249
251 unsigned Type) {
252 switch (Type) {
253 default: llvm_unreachable("Invalid AMDGPU symbol type");
255 OS << "\t.amdgpu_hsa_kernel " << SymbolName << '\n' ;
256 break;
257 }
258}
259
261 Align Alignment) {
262 OS << "\t.amdgpu_lds " << Symbol->getName() << ", " << Size << ", "
263 << Alignment.value() << '\n';
264}
265
267 OS << "\t.amd_amdgpu_isa \"" << getTargetID()->toString() << "\"\n";
268 return true;
269}
270
272 msgpack::Document &HSAMetadataDoc, bool Strict) {
274 if (!Verifier.verify(HSAMetadataDoc.getRoot()))
275 return false;
276
277 std::string HSAMetadataString;
278 raw_string_ostream StrOS(HSAMetadataString);
279 HSAMetadataDoc.toYAML(StrOS);
280
281 OS << '\t' << HSAMD::V3::AssemblerDirectiveBegin << '\n';
282 OS << StrOS.str() << '\n';
283 OS << '\t' << HSAMD::V3::AssemblerDirectiveEnd << '\n';
284 return true;
285}
286
288 const MCSubtargetInfo &STI, bool TrapEnabled) {
289 OS << (TrapEnabled ? "\ts_trap 2" : "\ts_endpgm")
290 << " ; Kernarg preload header. Trap with incompatible firmware that "
291 "doesn't support preloading kernel arguments.\n";
292 OS << "\t.fill 63, 4, 0xbf800000 ; s_nop 0\n";
293 return true;
294}
295
297 const uint32_t Encoded_s_code_end = 0xbf9f0000;
298 const uint32_t Encoded_s_nop = 0xbf800000;
299 uint32_t Encoded_pad = Encoded_s_code_end;
300
301 // Instruction cache line size in bytes.
302 const unsigned Log2CacheLineSize = AMDGPU::isGFX11Plus(STI) ? 7 : 6;
303 const unsigned CacheLineSize = 1u << Log2CacheLineSize;
304
305 // Extra padding amount in bytes to support prefetch mode 3.
306 unsigned FillSize = 3 * CacheLineSize;
307
308 if (AMDGPU::isGFX90A(STI)) {
309 Encoded_pad = Encoded_s_nop;
310 FillSize = 16 * CacheLineSize;
311 }
312
313 OS << "\t.p2alignl " << Log2CacheLineSize << ", " << Encoded_pad << '\n';
314 OS << "\t.fill " << (FillSize / 4) << ", 4, " << Encoded_pad << '\n';
315 return true;
316}
317
319 const MCSubtargetInfo &STI, StringRef KernelName,
320 const MCKernelDescriptor &KD, uint64_t NextVGPR, uint64_t NextSGPR,
321 bool ReserveVCC, bool ReserveFlatScr) {
322 IsaVersion IVersion = getIsaVersion(STI.getCPU());
323 const MCAsmInfo *MAI = getContext().getAsmInfo();
324
325 OS << "\t.amdhsa_kernel " << KernelName << '\n';
326
327 auto PrintField = [&](const MCExpr *Expr, uint32_t Shift, uint32_t Mask,
329 int64_t IVal;
330 OS << "\t\t" << Directive << ' ';
331 const MCExpr *pgm_rsrc1_bits =
332 MCKernelDescriptor::bits_get(Expr, Shift, Mask, getContext());
333 if (pgm_rsrc1_bits->evaluateAsAbsolute(IVal))
334 OS << static_cast<uint64_t>(IVal);
335 else
336 pgm_rsrc1_bits->print(OS, MAI);
337 OS << '\n';
338 };
339
340 OS << "\t\t.amdhsa_group_segment_fixed_size ";
341 KD.group_segment_fixed_size->print(OS, MAI);
342 OS << '\n';
343
344 OS << "\t\t.amdhsa_private_segment_fixed_size ";
346 OS << '\n';
347
348 OS << "\t\t.amdhsa_kernarg_size ";
349 KD.kernarg_size->print(OS, MAI);
350 OS << '\n';
351
352 PrintField(
353 KD.compute_pgm_rsrc2, amdhsa::COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_SHIFT,
354 amdhsa::COMPUTE_PGM_RSRC2_USER_SGPR_COUNT, ".amdhsa_user_sgpr_count");
355
357 PrintField(
359 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT,
360 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
361 ".amdhsa_user_sgpr_private_segment_buffer");
362 PrintField(KD.kernel_code_properties,
363 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT,
364 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR,
365 ".amdhsa_user_sgpr_dispatch_ptr");
366 PrintField(KD.kernel_code_properties,
367 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT,
368 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR,
369 ".amdhsa_user_sgpr_queue_ptr");
370 PrintField(KD.kernel_code_properties,
371 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT,
372 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
373 ".amdhsa_user_sgpr_kernarg_segment_ptr");
374 PrintField(KD.kernel_code_properties,
375 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT,
376 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID,
377 ".amdhsa_user_sgpr_dispatch_id");
379 PrintField(KD.kernel_code_properties,
380 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT,
381 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT,
382 ".amdhsa_user_sgpr_flat_scratch_init");
383 if (hasKernargPreload(STI)) {
384 PrintField(KD.kernarg_preload, amdhsa::KERNARG_PRELOAD_SPEC_LENGTH_SHIFT,
385 amdhsa::KERNARG_PRELOAD_SPEC_LENGTH,
386 ".amdhsa_user_sgpr_kernarg_preload_length");
387 PrintField(KD.kernarg_preload, amdhsa::KERNARG_PRELOAD_SPEC_OFFSET_SHIFT,
388 amdhsa::KERNARG_PRELOAD_SPEC_OFFSET,
389 ".amdhsa_user_sgpr_kernarg_preload_offset");
390 }
391 PrintField(
393 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT,
394 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
395 ".amdhsa_user_sgpr_private_segment_size");
396 if (IVersion.Major >= 10)
397 PrintField(KD.kernel_code_properties,
398 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32_SHIFT,
399 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
400 ".amdhsa_wavefront_size32");
402 PrintField(KD.kernel_code_properties,
403 amdhsa::KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK_SHIFT,
404 amdhsa::KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK,
405 ".amdhsa_uses_dynamic_stack");
406 PrintField(KD.compute_pgm_rsrc2,
407 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT_SHIFT,
408 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT,
410 ? ".amdhsa_enable_private_segment"
411 : ".amdhsa_system_sgpr_private_segment_wavefront_offset"));
412 PrintField(KD.compute_pgm_rsrc2,
413 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X_SHIFT,
414 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X,
415 ".amdhsa_system_sgpr_workgroup_id_x");
416 PrintField(KD.compute_pgm_rsrc2,
417 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y_SHIFT,
418 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y,
419 ".amdhsa_system_sgpr_workgroup_id_y");
420 PrintField(KD.compute_pgm_rsrc2,
421 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z_SHIFT,
422 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z,
423 ".amdhsa_system_sgpr_workgroup_id_z");
424 PrintField(KD.compute_pgm_rsrc2,
425 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO_SHIFT,
426 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO,
427 ".amdhsa_system_sgpr_workgroup_info");
428 PrintField(KD.compute_pgm_rsrc2,
429 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID_SHIFT,
430 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID,
431 ".amdhsa_system_vgpr_workitem_id");
432
433 // These directives are required.
434 OS << "\t\t.amdhsa_next_free_vgpr " << NextVGPR << '\n';
435 OS << "\t\t.amdhsa_next_free_sgpr " << NextSGPR << '\n';
436
437 if (AMDGPU::isGFX90A(STI)) {
438 // MCExpr equivalent of taking the (accum_offset + 1) * 4.
439 const MCExpr *accum_bits = MCKernelDescriptor::bits_get(
441 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET_SHIFT,
442 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET, getContext());
443 accum_bits = MCBinaryExpr::createAdd(
444 accum_bits, MCConstantExpr::create(1, getContext()), getContext());
445 accum_bits = MCBinaryExpr::createMul(
446 accum_bits, MCConstantExpr::create(4, getContext()), getContext());
447 OS << "\t\t.amdhsa_accum_offset ";
448 int64_t IVal;
449 if (accum_bits->evaluateAsAbsolute(IVal)) {
450 OS << static_cast<uint64_t>(IVal);
451 } else {
452 accum_bits->print(OS, MAI);
453 }
454 OS << '\n';
455 }
456
457 if (!ReserveVCC)
458 OS << "\t\t.amdhsa_reserve_vcc " << ReserveVCC << '\n';
459 if (IVersion.Major >= 7 && !ReserveFlatScr && !hasArchitectedFlatScratch(STI))
460 OS << "\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr << '\n';
461
462 switch (CodeObjectVersion) {
463 default:
464 break;
467 if (getTargetID()->isXnackSupported())
468 OS << "\t\t.amdhsa_reserve_xnack_mask " << getTargetID()->isXnackOnOrAny() << '\n';
469 break;
470 }
471
472 PrintField(KD.compute_pgm_rsrc1,
473 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32_SHIFT,
474 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32,
475 ".amdhsa_float_round_mode_32");
476 PrintField(KD.compute_pgm_rsrc1,
477 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64_SHIFT,
478 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64,
479 ".amdhsa_float_round_mode_16_64");
480 PrintField(KD.compute_pgm_rsrc1,
481 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32_SHIFT,
482 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32,
483 ".amdhsa_float_denorm_mode_32");
484 PrintField(KD.compute_pgm_rsrc1,
485 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64_SHIFT,
486 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
487 ".amdhsa_float_denorm_mode_16_64");
488 if (IVersion.Major < 12) {
489 PrintField(KD.compute_pgm_rsrc1,
490 amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP_SHIFT,
491 amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP,
492 ".amdhsa_dx10_clamp");
493 PrintField(KD.compute_pgm_rsrc1,
494 amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE_SHIFT,
495 amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE,
496 ".amdhsa_ieee_mode");
497 }
498 if (IVersion.Major >= 9) {
499 PrintField(KD.compute_pgm_rsrc1,
500 amdhsa::COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL_SHIFT,
501 amdhsa::COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL,
502 ".amdhsa_fp16_overflow");
503 }
504 if (AMDGPU::isGFX90A(STI))
505 PrintField(KD.compute_pgm_rsrc3,
506 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT_SHIFT,
507 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, ".amdhsa_tg_split");
508 if (IVersion.Major >= 10) {
509 PrintField(KD.compute_pgm_rsrc1,
510 amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE_SHIFT,
511 amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE,
512 ".amdhsa_workgroup_processor_mode");
513 PrintField(KD.compute_pgm_rsrc1,
514 amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED_SHIFT,
515 amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED,
516 ".amdhsa_memory_ordered");
517 PrintField(KD.compute_pgm_rsrc1,
518 amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS_SHIFT,
519 amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS,
520 ".amdhsa_forward_progress");
521 }
522 if (IVersion.Major >= 10 && IVersion.Major < 12) {
523 PrintField(KD.compute_pgm_rsrc3,
524 amdhsa::COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT_SHIFT,
525 amdhsa::COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT,
526 ".amdhsa_shared_vgpr_count");
527 }
528 if (IVersion.Major >= 12) {
529 PrintField(KD.compute_pgm_rsrc1,
530 amdhsa::COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN_SHIFT,
531 amdhsa::COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN,
532 ".amdhsa_round_robin_scheduling");
533 }
534 PrintField(
536 amdhsa::
537 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION_SHIFT,
538 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION,
539 ".amdhsa_exception_fp_ieee_invalid_op");
540 PrintField(
542 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE_SHIFT,
543 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
544 ".amdhsa_exception_fp_denorm_src");
545 PrintField(
547 amdhsa::
548 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO_SHIFT,
549 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO,
550 ".amdhsa_exception_fp_ieee_div_zero");
551 PrintField(
553 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW_SHIFT,
554 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
555 ".amdhsa_exception_fp_ieee_overflow");
556 PrintField(
558 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW_SHIFT,
559 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
560 ".amdhsa_exception_fp_ieee_underflow");
561 PrintField(
563 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT_SHIFT,
564 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
565 ".amdhsa_exception_fp_ieee_inexact");
566 PrintField(
568 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO_SHIFT,
569 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
570 ".amdhsa_exception_int_div_zero");
571
572 OS << "\t.end_amdhsa_kernel\n";
573}
574
575//===----------------------------------------------------------------------===//
576// AMDGPUTargetELFStreamer
577//===----------------------------------------------------------------------===//
578
580 const MCSubtargetInfo &STI)
581 : AMDGPUTargetStreamer(S), STI(STI), Streamer(S) {}
582
584 return static_cast<MCELFStreamer &>(Streamer);
585}
586
587// A hook for emitting stuff at the end.
588// We use it for emitting the accumulated PAL metadata as a .note record.
589// The PAL metadata is reset after it is emitted.
592 MCA.setELFHeaderEFlags(getEFlags());
595
596 std::string Blob;
597 const char *Vendor = getPALMetadata()->getVendor();
598 unsigned Type = getPALMetadata()->getType();
599 getPALMetadata()->toBlob(Type, Blob);
600 if (Blob.empty())
601 return;
602 EmitNote(Vendor, MCConstantExpr::create(Blob.size(), getContext()), Type,
603 [&](MCELFStreamer &OS) { OS.emitBytes(Blob); });
604
605 // Reset the pal metadata so its data will not affect a compilation that
606 // reuses this object.
608}
609
610void AMDGPUTargetELFStreamer::EmitNote(
611 StringRef Name, const MCExpr *DescSZ, unsigned NoteType,
612 function_ref<void(MCELFStreamer &)> EmitDesc) {
613 auto &S = getStreamer();
614 auto &Context = S.getContext();
615
616 auto NameSZ = Name.size() + 1;
617
618 unsigned NoteFlags = 0;
619 // TODO Apparently, this is currently needed for OpenCL as mentioned in
620 // https://reviews.llvm.org/D74995
621 if (isHsaAbi(STI))
622 NoteFlags = ELF::SHF_ALLOC;
623
624 S.pushSection();
625 S.switchSection(
626 Context.getELFSection(ElfNote::SectionName, ELF::SHT_NOTE, NoteFlags));
627 S.emitInt32(NameSZ); // namesz
628 S.emitValue(DescSZ, 4); // descz
629 S.emitInt32(NoteType); // type
630 S.emitBytes(Name); // name
631 S.emitValueToAlignment(Align(4), 0, 1, 0); // padding 0
632 EmitDesc(S); // desc
633 S.emitValueToAlignment(Align(4), 0, 1, 0); // padding 0
634 S.popSection();
635}
636
637unsigned AMDGPUTargetELFStreamer::getEFlags() {
638 switch (STI.getTargetTriple().getArch()) {
639 default:
640 llvm_unreachable("Unsupported Arch");
641 case Triple::r600:
642 return getEFlagsR600();
643 case Triple::amdgcn:
644 return getEFlagsAMDGCN();
645 }
646}
647
648unsigned AMDGPUTargetELFStreamer::getEFlagsR600() {
650
651 return getElfMach(STI.getCPU());
652}
653
654unsigned AMDGPUTargetELFStreamer::getEFlagsAMDGCN() {
656
657 switch (STI.getTargetTriple().getOS()) {
658 default:
659 // TODO: Why are some tests have "mingw" listed as OS?
660 // llvm_unreachable("Unsupported OS");
662 return getEFlagsUnknownOS();
663 case Triple::AMDHSA:
664 return getEFlagsAMDHSA();
665 case Triple::AMDPAL:
666 return getEFlagsAMDPAL();
667 case Triple::Mesa3D:
668 return getEFlagsMesa3D();
669 }
670}
671
672unsigned AMDGPUTargetELFStreamer::getEFlagsUnknownOS() {
673 // TODO: Why are some tests have "mingw" listed as OS?
674 // assert(STI.getTargetTriple().getOS() == Triple::UnknownOS);
675
676 return getEFlagsV3();
677}
678
679unsigned AMDGPUTargetELFStreamer::getEFlagsAMDHSA() {
680 assert(isHsaAbi(STI));
681
682 if (CodeObjectVersion >= 6)
683 return getEFlagsV6();
684 return getEFlagsV4();
685}
686
687unsigned AMDGPUTargetELFStreamer::getEFlagsAMDPAL() {
689
690 return getEFlagsV3();
691}
692
693unsigned AMDGPUTargetELFStreamer::getEFlagsMesa3D() {
695
696 return getEFlagsV3();
697}
698
699unsigned AMDGPUTargetELFStreamer::getEFlagsV3() {
700 unsigned EFlagsV3 = 0;
701
702 // mach.
703 EFlagsV3 |= getElfMach(STI.getCPU());
704
705 // xnack.
706 if (getTargetID()->isXnackOnOrAny())
708 // sramecc.
709 if (getTargetID()->isSramEccOnOrAny())
711
712 return EFlagsV3;
713}
714
715unsigned AMDGPUTargetELFStreamer::getEFlagsV4() {
716 unsigned EFlagsV4 = 0;
717
718 // mach.
719 EFlagsV4 |= getElfMach(STI.getCPU());
720
721 // xnack.
722 switch (getTargetID()->getXnackSetting()) {
725 break;
728 break;
731 break;
734 break;
735 }
736 // sramecc.
737 switch (getTargetID()->getSramEccSetting()) {
740 break;
743 break;
746 break;
749 break;
750 }
751
752 return EFlagsV4;
753}
754
755unsigned AMDGPUTargetELFStreamer::getEFlagsV6() {
756 unsigned Flags = getEFlagsV4();
757
758 unsigned Version = ForceGenericVersion;
759 if (!Version) {
760 switch (parseArchAMDGCN(STI.getCPU())) {
763 break;
766 break;
769 break;
772 break;
773 default:
774 break;
775 }
776 }
777
778 // Versions start at 1.
779 if (Version) {
781 report_fatal_error("Cannot encode generic code object version " +
782 Twine(Version) +
783 " - no ELF flag can represent this version!");
785 }
786
787 return Flags;
788}
789
791
792void
794
796 OS.pushSection();
797 OS.emitBytes(StringRef((const char*)&Header, sizeof(Header)));
798 OS.popSection();
799}
800
802 unsigned Type) {
803 MCSymbolELF *Symbol = cast<MCSymbolELF>(
804 getStreamer().getContext().getOrCreateSymbol(SymbolName));
805 Symbol->setType(Type);
806}
807
809 Align Alignment) {
810 MCSymbolELF *SymbolELF = cast<MCSymbolELF>(Symbol);
811 SymbolELF->setType(ELF::STT_OBJECT);
812
813 if (!SymbolELF->isBindingSet()) {
814 SymbolELF->setBinding(ELF::STB_GLOBAL);
815 SymbolELF->setExternal(true);
816 }
817
818 if (SymbolELF->declareCommon(Size, Alignment, true)) {
819 report_fatal_error("Symbol: " + Symbol->getName() +
820 " redeclared as different type");
821 }
822
823 SymbolELF->setIndex(ELF::SHN_AMDGPU_LDS);
825}
826
828 // Create two labels to mark the beginning and end of the desc field
829 // and a MCExpr to calculate the size of the desc field.
830 auto &Context = getContext();
831 auto *DescBegin = Context.createTempSymbol();
832 auto *DescEnd = Context.createTempSymbol();
833 auto *DescSZ = MCBinaryExpr::createSub(
836
838 [&](MCELFStreamer &OS) {
839 OS.emitLabel(DescBegin);
840 OS.emitBytes(getTargetID()->toString());
841 OS.emitLabel(DescEnd);
842 });
843 return true;
844}
845
847 bool Strict) {
849 if (!Verifier.verify(HSAMetadataDoc.getRoot()))
850 return false;
851
852 std::string HSAMetadataString;
853 HSAMetadataDoc.writeToBlob(HSAMetadataString);
854
855 // Create two labels to mark the beginning and end of the desc field
856 // and a MCExpr to calculate the size of the desc field.
857 auto &Context = getContext();
858 auto *DescBegin = Context.createTempSymbol();
859 auto *DescEnd = Context.createTempSymbol();
860 auto *DescSZ = MCBinaryExpr::createSub(
863
865 [&](MCELFStreamer &OS) {
866 OS.emitLabel(DescBegin);
867 OS.emitBytes(HSAMetadataString);
868 OS.emitLabel(DescEnd);
869 });
870 return true;
871}
872
874 const MCSubtargetInfo &STI, bool TrapEnabled) {
875 const uint32_t Encoded_s_nop = 0xbf800000;
876 const uint32_t Encoded_s_trap = 0xbf920002;
877 const uint32_t Encoded_s_endpgm = 0xbf810000;
878 const uint32_t TrapInstr = TrapEnabled ? Encoded_s_trap : Encoded_s_endpgm;
880 OS.emitInt32(TrapInstr);
881 for (int i = 0; i < 63; ++i) {
882 OS.emitInt32(Encoded_s_nop);
883 }
884 return true;
885}
886
888 const uint32_t Encoded_s_code_end = 0xbf9f0000;
889 const uint32_t Encoded_s_nop = 0xbf800000;
890 uint32_t Encoded_pad = Encoded_s_code_end;
891
892 // Instruction cache line size in bytes.
893 const unsigned Log2CacheLineSize = AMDGPU::isGFX11Plus(STI) ? 7 : 6;
894 const unsigned CacheLineSize = 1u << Log2CacheLineSize;
895
896 // Extra padding amount in bytes to support prefetch mode 3.
897 unsigned FillSize = 3 * CacheLineSize;
898
899 if (AMDGPU::isGFX90A(STI)) {
900 Encoded_pad = Encoded_s_nop;
901 FillSize = 16 * CacheLineSize;
902 }
903
905 OS.pushSection();
906 OS.emitValueToAlignment(Align(CacheLineSize), Encoded_pad, 4);
907 for (unsigned I = 0; I < FillSize; I += 4)
908 OS.emitInt32(Encoded_pad);
909 OS.popSection();
910 return true;
911}
912
914 const MCSubtargetInfo &STI, StringRef KernelName,
915 const MCKernelDescriptor &KernelDescriptor, uint64_t NextVGPR,
916 uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr) {
917 auto &Streamer = getStreamer();
918 auto &Context = Streamer.getContext();
919
920 MCSymbolELF *KernelCodeSymbol = cast<MCSymbolELF>(
921 Context.getOrCreateSymbol(Twine(KernelName)));
922 MCSymbolELF *KernelDescriptorSymbol = cast<MCSymbolELF>(
923 Context.getOrCreateSymbol(Twine(KernelName) + Twine(".kd")));
924
925 // Copy kernel descriptor symbol's binding, other and visibility from the
926 // kernel code symbol.
927 KernelDescriptorSymbol->setBinding(KernelCodeSymbol->getBinding());
928 KernelDescriptorSymbol->setOther(KernelCodeSymbol->getOther());
929 KernelDescriptorSymbol->setVisibility(KernelCodeSymbol->getVisibility());
930 // Kernel descriptor symbol's type and size are fixed.
931 KernelDescriptorSymbol->setType(ELF::STT_OBJECT);
932 KernelDescriptorSymbol->setSize(
934
935 // The visibility of the kernel code symbol must be protected or less to allow
936 // static relocations from the kernel descriptor to be used.
937 if (KernelCodeSymbol->getVisibility() == ELF::STV_DEFAULT)
938 KernelCodeSymbol->setVisibility(ELF::STV_PROTECTED);
939
940 Streamer.emitLabel(KernelDescriptorSymbol);
941 Streamer.emitValue(
942 KernelDescriptor.group_segment_fixed_size,
944 Streamer.emitValue(
945 KernelDescriptor.private_segment_fixed_size,
947 Streamer.emitValue(KernelDescriptor.kernarg_size,
949
950 for (uint32_t i = 0; i < sizeof(amdhsa::kernel_descriptor_t::reserved0); ++i)
951 Streamer.emitInt8(0u);
952
953 // FIXME: Remove the use of VK_AMDGPU_REL64 in the expression below. The
954 // expression being created is:
955 // (start of kernel code) - (start of kernel descriptor)
956 // It implies R_AMDGPU_REL64, but ends up being R_AMDGPU_ABS64.
957 Streamer.emitValue(
959 MCSymbolRefExpr::create(KernelCodeSymbol,
961 MCSymbolRefExpr::create(KernelDescriptorSymbol,
963 Context),
965 for (uint32_t i = 0; i < sizeof(amdhsa::kernel_descriptor_t::reserved1); ++i)
966 Streamer.emitInt8(0u);
967 Streamer.emitValue(KernelDescriptor.compute_pgm_rsrc3,
969 Streamer.emitValue(KernelDescriptor.compute_pgm_rsrc1,
971 Streamer.emitValue(KernelDescriptor.compute_pgm_rsrc2,
973 Streamer.emitValue(
974 KernelDescriptor.kernel_code_properties,
976 Streamer.emitValue(KernelDescriptor.kernarg_preload,
978 for (uint32_t i = 0; i < sizeof(amdhsa::kernel_descriptor_t::reserved3); ++i)
979 Streamer.emitInt8(0u);
980}
AMDHSA kernel descriptor MCExpr struct for use in MC layer.
This is a verifier for AMDGPU HSA metadata, which can verify both well-typed metadata and untyped met...
AMDGPU metadata definitions and in-memory representations.
Enums and constants for AMDGPU PT_NOTE sections.
static cl::opt< unsigned > ForceGenericVersion("amdgpu-force-generic-version", cl::desc("Force a specific generic_v<N> flag to be " "added. For testing purposes only."), cl::ReallyHidden, cl::init(0))
AMDHSA kernel descriptor definitions.
std::string Name
uint64_t Size
#define I(x, y, z)
Definition: MD5.cpp:58
LLVMContext & Context
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
static cl::opt< unsigned > CacheLineSize("cache-line-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target cache line size when " "specified by the user."))
const char * getVendor() const
void toBlob(unsigned Type, std::string &S)
void toString(std::string &S)
void EmitAMDKernelCodeT(const amd_kernel_code_t &Header) override
AMDGPUTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS)
bool EmitHSAMetadata(msgpack::Document &HSAMetadata, bool Strict) override
void EmitAMDGPUSymbolType(StringRef SymbolName, unsigned Type) override
void EmitDirectiveAMDHSACodeObjectVersion(unsigned COV) override
void EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName, const AMDGPU::MCKernelDescriptor &KernelDescriptor, uint64_t NextVGPR, uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr) override
bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI, bool TrapEnabled) override
bool EmitCodeEnd(const MCSubtargetInfo &STI) override
void emitAMDGPULDS(MCSymbol *Sym, unsigned Size, Align Alignment) override
bool EmitCodeEnd(const MCSubtargetInfo &STI) override
void EmitAMDKernelCodeT(const amd_kernel_code_t &Header) override
bool EmitHSAMetadata(msgpack::Document &HSAMetadata, bool Strict) override
AMDGPUTargetELFStreamer(MCStreamer &S, const MCSubtargetInfo &STI)
void emitAMDGPULDS(MCSymbol *Sym, unsigned Size, Align Alignment) override
void EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName, const AMDGPU::MCKernelDescriptor &KernelDescriptor, uint64_t NextVGPR, uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr) override
void EmitAMDGPUSymbolType(StringRef SymbolName, unsigned Type) override
bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI, bool TrapEnabled) override
virtual bool EmitHSAMetadata(msgpack::Document &HSAMetadata, bool Strict)
Emit HSA Metadata.
AMDGPUPALMetadata * getPALMetadata()
virtual void EmitDirectiveAMDHSACodeObjectVersion(unsigned COV)
virtual bool EmitHSAMetadataV3(StringRef HSAMetadataString)
static unsigned getElfMach(StringRef GPU)
MCContext & getContext() const
static StringRef getArchNameFromElfMach(unsigned ElfMach)
const std::optional< AMDGPU::IsaInfo::AMDGPUTargetID > & getTargetID() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
MCObjectWriter & getWriter() const
Definition: MCAssembler.h:338
void setELFHeaderEFlags(unsigned Flags)
Definition: MCAssembler.h:282
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:536
static const MCBinaryExpr * createMul(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:591
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:621
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:194
const MCAsmInfo * getAsmInfo() const
Definition: MCContext.h:453
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
void print(raw_ostream &OS, const MCAsmInfo *MAI, bool InParens=false) const
Definition: MCExpr.cpp:41
MCAssembler & getAssembler()
virtual void setOverrideABIVersion(uint8_t ABIVersion)
ELF only, override the default ABIVersion in the ELF header.
Streaming machine code generation interface.
Definition: MCStreamer.h:212
MCContext & getContext() const
Definition: MCStreamer.h:297
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
Definition: MCStreamer.cpp:180
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
Definition: MCStreamer.cpp:424
void emitInt8(uint64_t Value)
Definition: MCStreamer.h:752
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
StringRef getCPU() const
unsigned getOther() const
void setVisibility(unsigned Visibility)
void setSize(const MCExpr *SS)
Definition: MCSymbolELF.h:22
bool isBindingSet() const
void setBinding(unsigned Binding) const
Definition: MCSymbolELF.cpp:43
unsigned getVisibility() const
unsigned getBinding() const
Definition: MCSymbolELF.cpp:66
void setType(unsigned Type) const
Definition: MCSymbolELF.cpp:94
void setOther(unsigned Other)
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:397
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:40
void setExternal(bool Value) const
Definition: MCSymbol.h:407
void setIndex(uint32_t Value) const
Set the (implementation defined) index.
Definition: MCSymbol.h:321
bool declareCommon(uint64_t Size, Align Alignment, bool Target=false)
Declare this symbol as being 'common'.
Definition: MCSymbol.h:375
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:370
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:361
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
An efficient, type-erasing, non-owning reference to a callable.
Simple in-memory representation of a document of msgpack objects with ability to find and create arra...
DocNode & getRoot()
Get ref to the document's root element.
void toYAML(raw_ostream &OS)
Convert MsgPack Document to YAML text.
void writeToBlob(std::string &Blob)
Write a MsgPack document to a binary MsgPack blob.
bool fromYAML(StringRef S)
Read YAML text into the MsgPack document. Returns false on failure.
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:660
std::string & str()
Returns the string's reference.
Definition: raw_ostream.h:678
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const char NoteNameV2[]
Definition: AMDGPUPTNote.h:26
const char SectionName[]
Definition: AMDGPUPTNote.h:24
const char NoteNameV3[]
Definition: AMDGPUPTNote.h:27
static constexpr unsigned GFX10_1
static constexpr unsigned GFX10_3
static constexpr unsigned GFX11
static constexpr unsigned GFX9
constexpr char AssemblerDirectiveBegin[]
HSA metadata beginning assembler directive.
constexpr char AssemblerDirectiveEnd[]
HSA metadata ending assembler directive.
StringRef getArchNameR600(GPUKind AK)
GPUKind
GPU kinds supported by the AMDGPU target.
Definition: TargetParser.h:35
bool isHsaAbi(const MCSubtargetInfo &STI)
IsaVersion getIsaVersion(StringRef GPU)
bool isGFX90A(const MCSubtargetInfo &STI)
GPUKind parseArchAMDGCN(StringRef CPU)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
StringRef getArchNameAMDGCN(GPUKind AK)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion)
GPUKind parseArchR600(StringRef CPU)
@ SHF_ALLOC
Definition: ELF.h:1157
@ STV_PROTECTED
Definition: ELF.h:1343
@ STV_DEFAULT
Definition: ELF.h:1340
@ EF_AMDGPU_GENERIC_VERSION_MAX
Definition: ELF.h:857
@ EF_AMDGPU_FEATURE_XNACK_ANY_V4
Definition: ELF.h:834
@ EF_AMDGPU_MACH_AMDGCN_GFX703
Definition: ELF.h:750
@ EF_AMDGPU_MACH_AMDGCN_GFX1035
Definition: ELF.h:774
@ EF_AMDGPU_FEATURE_SRAMECC_V3
Definition: ELF.h:825
@ EF_AMDGPU_MACH_AMDGCN_GFX1031
Definition: ELF.h:768
@ EF_AMDGPU_GENERIC_VERSION_OFFSET
Definition: ELF.h:855
@ EF_AMDGPU_MACH_R600_CAYMAN
Definition: ELF.h:732
@ EF_AMDGPU_FEATURE_SRAMECC_UNSUPPORTED_V4
Definition: ELF.h:845
@ EF_AMDGPU_MACH_AMDGCN_GFX704
Definition: ELF.h:751
@ EF_AMDGPU_MACH_AMDGCN_GFX902
Definition: ELF.h:758
@ EF_AMDGPU_MACH_AMDGCN_GFX810
Definition: ELF.h:756
@ EF_AMDGPU_MACH_AMDGCN_GFX1036
Definition: ELF.h:782
@ EF_AMDGPU_MACH_AMDGCN_GFX1102
Definition: ELF.h:784
@ EF_AMDGPU_MACH_R600_RV730
Definition: ELF.h:721
@ EF_AMDGPU_MACH_R600_RV710
Definition: ELF.h:720
@ EF_AMDGPU_MACH_AMDGCN_GFX908
Definition: ELF.h:761
@ EF_AMDGPU_MACH_AMDGCN_GFX1011
Definition: ELF.h:765
@ EF_AMDGPU_MACH_R600_CYPRESS
Definition: ELF.h:725
@ EF_AMDGPU_MACH_AMDGCN_GFX1032
Definition: ELF.h:769
@ EF_AMDGPU_MACH_R600_R600
Definition: ELF.h:715
@ EF_AMDGPU_MACH_AMDGCN_GFX940
Definition: ELF.h:777
@ EF_AMDGPU_MACH_AMDGCN_GFX941
Definition: ELF.h:788
@ EF_AMDGPU_MACH_R600_TURKS
Definition: ELF.h:733
@ EF_AMDGPU_MACH_R600_JUNIPER
Definition: ELF.h:726
@ EF_AMDGPU_FEATURE_SRAMECC_OFF_V4
Definition: ELF.h:849
@ EF_AMDGPU_FEATURE_XNACK_UNSUPPORTED_V4
Definition: ELF.h:832
@ EF_AMDGPU_MACH_AMDGCN_GFX601
Definition: ELF.h:746
@ EF_AMDGPU_MACH_AMDGCN_GFX942
Definition: ELF.h:789
@ EF_AMDGPU_MACH_R600_R630
Definition: ELF.h:716
@ EF_AMDGPU_MACH_R600_REDWOOD
Definition: ELF.h:727
@ EF_AMDGPU_MACH_R600_RV770
Definition: ELF.h:722
@ EF_AMDGPU_FEATURE_XNACK_OFF_V4
Definition: ELF.h:836
@ EF_AMDGPU_MACH_AMDGCN_GFX600
Definition: ELF.h:745
@ EF_AMDGPU_FEATURE_XNACK_V3
Definition: ELF.h:820
@ EF_AMDGPU_MACH_AMDGCN_GFX602
Definition: ELF.h:771
@ EF_AMDGPU_MACH_AMDGCN_GFX1101
Definition: ELF.h:783
@ EF_AMDGPU_MACH_AMDGCN_GFX1100
Definition: ELF.h:778
@ EF_AMDGPU_MACH_AMDGCN_GFX1033
Definition: ELF.h:770
@ EF_AMDGPU_MACH_AMDGCN_GFX801
Definition: ELF.h:753
@ EF_AMDGPU_MACH_AMDGCN_GFX705
Definition: ELF.h:772
@ EF_AMDGPU_MACH_AMDGCN_GFX1010
Definition: ELF.h:764
@ EF_AMDGPU_MACH_R600_RV670
Definition: ELF.h:718
@ EF_AMDGPU_MACH_AMDGCN_GFX701
Definition: ELF.h:748
@ EF_AMDGPU_MACH_AMDGCN_GFX10_3_GENERIC
Definition: ELF.h:796
@ EF_AMDGPU_MACH_AMDGCN_GFX1012
Definition: ELF.h:766
@ EF_AMDGPU_MACH_AMDGCN_GFX1151
Definition: ELF.h:787
@ EF_AMDGPU_MACH_AMDGCN_GFX1030
Definition: ELF.h:767
@ EF_AMDGPU_MACH_R600_CEDAR
Definition: ELF.h:724
@ EF_AMDGPU_MACH_AMDGCN_GFX1200
Definition: ELF.h:785
@ EF_AMDGPU_MACH_AMDGCN_GFX700
Definition: ELF.h:747
@ EF_AMDGPU_MACH_AMDGCN_GFX11_GENERIC
Definition: ELF.h:797
@ EF_AMDGPU_MACH_AMDGCN_GFX803
Definition: ELF.h:755
@ EF_AMDGPU_MACH_AMDGCN_GFX802
Definition: ELF.h:754
@ EF_AMDGPU_MACH_AMDGCN_GFX90C
Definition: ELF.h:763
@ EF_AMDGPU_FEATURE_XNACK_ON_V4
Definition: ELF.h:838
@ EF_AMDGPU_MACH_AMDGCN_GFX900
Definition: ELF.h:757
@ EF_AMDGPU_MACH_AMDGCN_GFX909
Definition: ELF.h:762
@ EF_AMDGPU_MACH_AMDGCN_GFX906
Definition: ELF.h:760
@ EF_AMDGPU_MACH_NONE
Definition: ELF.h:710
@ EF_AMDGPU_MACH_AMDGCN_GFX9_GENERIC
Definition: ELF.h:794
@ EF_AMDGPU_MACH_AMDGCN_GFX1103
Definition: ELF.h:781
@ EF_AMDGPU_MACH_R600_CAICOS
Definition: ELF.h:731
@ EF_AMDGPU_MACH_AMDGCN_GFX90A
Definition: ELF.h:776
@ EF_AMDGPU_MACH_AMDGCN_GFX1034
Definition: ELF.h:775
@ EF_AMDGPU_MACH_AMDGCN_GFX1013
Definition: ELF.h:779
@ EF_AMDGPU_MACH_AMDGCN_GFX10_1_GENERIC
Definition: ELF.h:795
@ EF_AMDGPU_MACH_AMDGCN_GFX904
Definition: ELF.h:759
@ EF_AMDGPU_MACH_R600_RS880
Definition: ELF.h:717
@ EF_AMDGPU_MACH_AMDGCN_GFX805
Definition: ELF.h:773
@ EF_AMDGPU_MACH_AMDGCN_GFX1201
Definition: ELF.h:791
@ EF_AMDGPU_MACH_AMDGCN_GFX1150
Definition: ELF.h:780
@ EF_AMDGPU_MACH_R600_SUMO
Definition: ELF.h:728
@ EF_AMDGPU_MACH_R600_BARTS
Definition: ELF.h:730
@ EF_AMDGPU_FEATURE_SRAMECC_ANY_V4
Definition: ELF.h:847
@ EF_AMDGPU_FEATURE_SRAMECC_ON_V4
Definition: ELF.h:851
@ EF_AMDGPU_MACH_AMDGCN_GFX702
Definition: ELF.h:749
@ SHT_NOTE
Definition: ELF.h:1069
@ NT_AMDGPU_METADATA
Definition: ELF.h:1867
@ NT_AMD_HSA_ISA_NAME
Definition: ELF.h:1860
@ STB_GLOBAL
Definition: ELF.h:1311
@ SHN_AMDGPU_LDS
Definition: ELF.h:1850
@ STT_AMDGPU_HSA_KERNEL
Definition: ELF.h:1336
@ STT_OBJECT
Definition: ELF.h:1323
const uint64_t Version
Definition: InstrProf.h:1153
@ ReallyHidden
Definition: CommandLine.h:139
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
std::optional< const char * > toString(const std::optional< DWARFFormValue > &V)
Take an optional DWARFFormValue and try to extract a string value from it.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
void dumpAmdKernelCode(const amd_kernel_code_t *C, raw_ostream &OS, const char *tab)
AMD Kernel Code Object (amd_kernel_code_t).
Instruction set architecture version.
Definition: TargetParser.h:125
static const MCExpr * bits_get(const MCExpr *Src, uint32_t Shift, uint32_t Mask, MCContext &Ctx)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85