LLVM 18.0.0git
AMDHSAKernelDescriptor.h
Go to the documentation of this file.
1//===--- AMDHSAKernelDescriptor.h -----------------------------*- C++ -*---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// AMDHSA kernel descriptor definitions. For more information, visit
11/// https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor
12///
13/// \warning
14/// Any changes to this file should also be audited for corresponding changes
15/// needed in both the assembler and disassembler, namely:
16/// * AMDGPUAsmPrinter.{cpp,h}
17/// * AMDGPUTargetStreamer.{cpp,h}
18/// * AMDGPUDisassembler.{cpp,h}
19//
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
23#define LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
24
25#include <cstddef>
26#include <cstdint>
27
28// Gets offset of specified member in specified type.
29#ifndef offsetof
30#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE*)0)->MEMBER)
31#endif // offsetof
32
33// Creates enumeration entries used for packing bits into integers. Enumeration
34// entries include bit shift amount, bit width, and bit mask.
35#ifndef AMDHSA_BITS_ENUM_ENTRY
36#define AMDHSA_BITS_ENUM_ENTRY(NAME, SHIFT, WIDTH) \
37 NAME ## _SHIFT = (SHIFT), \
38 NAME ## _WIDTH = (WIDTH), \
39 NAME = (((1 << (WIDTH)) - 1) << (SHIFT))
40#endif // AMDHSA_BITS_ENUM_ENTRY
41
42// Gets bits for specified bit mask from specified source.
43#ifndef AMDHSA_BITS_GET
44#define AMDHSA_BITS_GET(SRC, MSK) ((SRC & MSK) >> MSK ## _SHIFT)
45#endif // AMDHSA_BITS_GET
46
47// Sets bits for specified bit mask in specified destination.
48#ifndef AMDHSA_BITS_SET
49#define AMDHSA_BITS_SET(DST, MSK, VAL) \
50 DST &= ~MSK; \
51 DST |= ((VAL << MSK ## _SHIFT) & MSK)
52#endif // AMDHSA_BITS_SET
53
54namespace llvm {
55namespace amdhsa {
56
57// Floating point rounding modes. Must match hardware definition.
58enum : uint8_t {
63};
64
65// Floating point denorm modes. Must match hardware definition.
66enum : uint8_t {
71};
72
73// System VGPR workitem IDs. Must match hardware definition.
74enum : uint8_t {
79};
80
81// Compute program resource register 1. Must match hardware definition.
82// GFX6+.
83#define COMPUTE_PGM_RSRC1(NAME, SHIFT, WIDTH) \
84 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_ ## NAME, SHIFT, WIDTH)
85// [GFX6-GFX8].
86#define COMPUTE_PGM_RSRC1_GFX6_GFX8(NAME, SHIFT, WIDTH) \
87 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX8_ ## NAME, SHIFT, WIDTH)
88// [GFX6-GFX9].
89#define COMPUTE_PGM_RSRC1_GFX6_GFX9(NAME, SHIFT, WIDTH) \
90 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX9_ ## NAME, SHIFT, WIDTH)
91// GFX9+.
92#define COMPUTE_PGM_RSRC1_GFX9_PLUS(NAME, SHIFT, WIDTH) \
93 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX9_PLUS_ ## NAME, SHIFT, WIDTH)
94// GFX10+.
95#define COMPUTE_PGM_RSRC1_GFX10_PLUS(NAME, SHIFT, WIDTH) \
96 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX10_PLUS_ ## NAME, SHIFT, WIDTH)
97enum : int32_t {
98 COMPUTE_PGM_RSRC1(GRANULATED_WORKITEM_VGPR_COUNT, 0, 6),
99 COMPUTE_PGM_RSRC1(GRANULATED_WAVEFRONT_SGPR_COUNT, 6, 4),
100 COMPUTE_PGM_RSRC1(PRIORITY, 10, 2),
101 COMPUTE_PGM_RSRC1(FLOAT_ROUND_MODE_32, 12, 2),
102 COMPUTE_PGM_RSRC1(FLOAT_ROUND_MODE_16_64, 14, 2),
103 COMPUTE_PGM_RSRC1(FLOAT_DENORM_MODE_32, 16, 2),
104 COMPUTE_PGM_RSRC1(FLOAT_DENORM_MODE_16_64, 18, 2),
105 COMPUTE_PGM_RSRC1(PRIV, 20, 1),
106 COMPUTE_PGM_RSRC1(ENABLE_DX10_CLAMP, 21, 1),
107 COMPUTE_PGM_RSRC1(DEBUG_MODE, 22, 1),
108 COMPUTE_PGM_RSRC1(ENABLE_IEEE_MODE, 23, 1),
109 COMPUTE_PGM_RSRC1(BULKY, 24, 1),
110 COMPUTE_PGM_RSRC1(CDBG_USER, 25, 1),
113 COMPUTE_PGM_RSRC1(RESERVED1, 27, 2),
116 COMPUTE_PGM_RSRC1_GFX10_PLUS(MEM_ORDERED, 30, 1),
117 COMPUTE_PGM_RSRC1_GFX10_PLUS(FWD_PROGRESS, 31, 1),
118};
119#undef COMPUTE_PGM_RSRC1
120
121// Compute program resource register 2. Must match hardware definition.
122#define COMPUTE_PGM_RSRC2(NAME, SHIFT, WIDTH) \
123 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_ ## NAME, SHIFT, WIDTH)
124enum : int32_t {
125 COMPUTE_PGM_RSRC2(ENABLE_PRIVATE_SEGMENT, 0, 1),
126 COMPUTE_PGM_RSRC2(USER_SGPR_COUNT, 1, 5),
127 COMPUTE_PGM_RSRC2(ENABLE_TRAP_HANDLER, 6, 1),
128 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_X, 7, 1),
129 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_Y, 8, 1),
130 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_Z, 9, 1),
131 COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_INFO, 10, 1),
132 COMPUTE_PGM_RSRC2(ENABLE_VGPR_WORKITEM_ID, 11, 2),
133 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_ADDRESS_WATCH, 13, 1),
134 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_MEMORY, 14, 1),
135 COMPUTE_PGM_RSRC2(GRANULATED_LDS_SIZE, 15, 9),
136 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, 24, 1),
137 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_FP_DENORMAL_SOURCE, 25, 1),
138 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, 26, 1),
139 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW, 27, 1),
140 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW, 28, 1),
141 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_INEXACT, 29, 1),
142 COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO, 30, 1),
143 COMPUTE_PGM_RSRC2(RESERVED0, 31, 1),
144};
145#undef COMPUTE_PGM_RSRC2
146
147// Compute program resource register 3 for GFX90A+. Must match hardware
148// definition.
149#define COMPUTE_PGM_RSRC3_GFX90A(NAME, SHIFT, WIDTH) \
150 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX90A_ ## NAME, SHIFT, WIDTH)
151enum : int32_t {
152 COMPUTE_PGM_RSRC3_GFX90A(ACCUM_OFFSET, 0, 6),
153 COMPUTE_PGM_RSRC3_GFX90A(RESERVED0, 6, 10),
154 COMPUTE_PGM_RSRC3_GFX90A(TG_SPLIT, 16, 1),
155 COMPUTE_PGM_RSRC3_GFX90A(RESERVED1, 17, 15),
156};
157#undef COMPUTE_PGM_RSRC3_GFX90A
158
159// Compute program resource register 3 for GFX10+. Must match hardware
160// definition.
161// [GFX10].
162#define COMPUTE_PGM_RSRC3_GFX10(NAME, SHIFT, WIDTH) \
163 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_ ## NAME, SHIFT, WIDTH)
164// GFX10+.
165#define COMPUTE_PGM_RSRC3_GFX10_PLUS(NAME, SHIFT, WIDTH) \
166 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_PLUS_ ## NAME, SHIFT, WIDTH)
167// GFX11+.
168#define COMPUTE_PGM_RSRC3_GFX11_PLUS(NAME, SHIFT, WIDTH) \
169 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX11_PLUS_ ## NAME, SHIFT, WIDTH)
170enum : int32_t {
171 COMPUTE_PGM_RSRC3_GFX10_PLUS(SHARED_VGPR_COUNT, 0, 4),
172 COMPUTE_PGM_RSRC3_GFX10(RESERVED0, 4, 8),
173 COMPUTE_PGM_RSRC3_GFX11_PLUS(INST_PREF_SIZE, 4, 6),
174 COMPUTE_PGM_RSRC3_GFX11_PLUS(TRAP_ON_START, 10, 1),
175 COMPUTE_PGM_RSRC3_GFX11_PLUS(TRAP_ON_END, 11, 1),
176 COMPUTE_PGM_RSRC3_GFX10_PLUS(RESERVED1, 12, 19),
177 COMPUTE_PGM_RSRC3_GFX10(RESERVED2, 31, 1),
178 COMPUTE_PGM_RSRC3_GFX11_PLUS(IMAGE_OP, 31, 1),
179};
180#undef COMPUTE_PGM_RSRC3_GFX10_PLUS
181
182// Kernel code properties. Must be kept backwards compatible.
183#define KERNEL_CODE_PROPERTY(NAME, SHIFT, WIDTH) \
184 AMDHSA_BITS_ENUM_ENTRY(KERNEL_CODE_PROPERTY_ ## NAME, SHIFT, WIDTH)
185enum : int32_t {
186 KERNEL_CODE_PROPERTY(ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER, 0, 1),
187 KERNEL_CODE_PROPERTY(ENABLE_SGPR_DISPATCH_PTR, 1, 1),
188 KERNEL_CODE_PROPERTY(ENABLE_SGPR_QUEUE_PTR, 2, 1),
189 KERNEL_CODE_PROPERTY(ENABLE_SGPR_KERNARG_SEGMENT_PTR, 3, 1),
190 KERNEL_CODE_PROPERTY(ENABLE_SGPR_DISPATCH_ID, 4, 1),
191 KERNEL_CODE_PROPERTY(ENABLE_SGPR_FLAT_SCRATCH_INIT, 5, 1),
192 KERNEL_CODE_PROPERTY(ENABLE_SGPR_PRIVATE_SEGMENT_SIZE, 6, 1),
193 KERNEL_CODE_PROPERTY(RESERVED0, 7, 3),
194 KERNEL_CODE_PROPERTY(ENABLE_WAVEFRONT_SIZE32, 10, 1), // GFX10+
195 KERNEL_CODE_PROPERTY(USES_DYNAMIC_STACK, 11, 1),
196 KERNEL_CODE_PROPERTY(RESERVED1, 12, 4),
197};
198#undef KERNEL_CODE_PROPERTY
199
200// Kernarg preload specification.
201#define KERNARG_PRELOAD_SPEC(NAME, SHIFT, WIDTH) \
202 AMDHSA_BITS_ENUM_ENTRY(KERNARG_PRELOAD_SPEC_##NAME, SHIFT, WIDTH)
203enum : int32_t {
204 KERNARG_PRELOAD_SPEC(LENGTH, 0, 7),
205 KERNARG_PRELOAD_SPEC(OFFSET, 7, 9),
206};
207#undef KERNARG_PRELOAD_SPEC
208
209// Kernel descriptor. Must be kept backwards compatible.
214 uint8_t reserved0[4];
216 uint8_t reserved1[20];
217 uint32_t compute_pgm_rsrc3; // GFX10+ and GFX90A+
222 uint8_t reserved3[4];
223};
224
225enum : uint32_t {
239
240static_assert(
241 sizeof(kernel_descriptor_t) == 64,
242 "invalid size for kernel_descriptor_t");
243static_assert(offsetof(kernel_descriptor_t, group_segment_fixed_size) ==
245 "invalid offset for group_segment_fixed_size");
246static_assert(offsetof(kernel_descriptor_t, private_segment_fixed_size) ==
248 "invalid offset for private_segment_fixed_size");
249static_assert(offsetof(kernel_descriptor_t, kernarg_size) ==
251 "invalid offset for kernarg_size");
252static_assert(offsetof(kernel_descriptor_t, reserved0) == RESERVED0_OFFSET,
253 "invalid offset for reserved0");
254static_assert(offsetof(kernel_descriptor_t, kernel_code_entry_byte_offset) ==
256 "invalid offset for kernel_code_entry_byte_offset");
257static_assert(offsetof(kernel_descriptor_t, reserved1) == RESERVED1_OFFSET,
258 "invalid offset for reserved1");
259static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc3) ==
261 "invalid offset for compute_pgm_rsrc3");
262static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc1) ==
264 "invalid offset for compute_pgm_rsrc1");
265static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc2) ==
267 "invalid offset for compute_pgm_rsrc2");
268static_assert(offsetof(kernel_descriptor_t, kernel_code_properties) ==
270 "invalid offset for kernel_code_properties");
271static_assert(offsetof(kernel_descriptor_t, kernarg_preload) ==
273 "invalid offset for kernarg_preload");
274static_assert(offsetof(kernel_descriptor_t, reserved3) == RESERVED3_OFFSET,
275 "invalid offset for reserved3");
276
277} // end namespace amdhsa
278} // end namespace llvm
279
280#endif // LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
#define offsetof(TYPE, MEMBER)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18