22#ifndef LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
23#define LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
30#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE*)0)->MEMBER)
35#ifndef AMDHSA_BITS_ENUM_ENTRY
36#define AMDHSA_BITS_ENUM_ENTRY(NAME, SHIFT, WIDTH) \
37 NAME ## _SHIFT = (SHIFT), \
38 NAME ## _WIDTH = (WIDTH), \
39 NAME = (((1 << (WIDTH)) - 1) << (SHIFT))
43#ifndef AMDHSA_BITS_GET
44#define AMDHSA_BITS_GET(SRC, MSK) ((SRC & MSK) >> MSK ## _SHIFT)
48#ifndef AMDHSA_BITS_SET
49#define AMDHSA_BITS_SET(DST, MSK, VAL) \
53 DST |= ((local << MSK##_SHIFT) & MSK); \
86#define COMPUTE_PGM_RSRC1(NAME, SHIFT, WIDTH) \
87 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_ ## NAME, SHIFT, WIDTH)
89#define COMPUTE_PGM_RSRC1_GFX6_GFX8(NAME, SHIFT, WIDTH) \
90 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX8_ ## NAME, SHIFT, WIDTH)
92#define COMPUTE_PGM_RSRC1_GFX6_GFX9(NAME, SHIFT, WIDTH) \
93 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX9_ ## NAME, SHIFT, WIDTH)
95#define COMPUTE_PGM_RSRC1_GFX6_GFX11(NAME, SHIFT, WIDTH) \
96 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX6_GFX11_##NAME, SHIFT, WIDTH)
98#define COMPUTE_PGM_RSRC1_GFX9_PLUS(NAME, SHIFT, WIDTH) \
99 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX9_PLUS_ ## NAME, SHIFT, WIDTH)
101#define COMPUTE_PGM_RSRC1_GFX10_PLUS(NAME, SHIFT, WIDTH) \
102 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX10_PLUS_ ## NAME, SHIFT, WIDTH)
104#define COMPUTE_PGM_RSRC1_GFX12_PLUS(NAME, SHIFT, WIDTH) \
105 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_GFX12_PLUS_##NAME, SHIFT, WIDTH)
130#undef COMPUTE_PGM_RSRC1
134#define COMPUTE_PGM_RSRC2(NAME, SHIFT, WIDTH) \
135 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_ ## NAME, SHIFT, WIDTH)
137#define COMPUTE_PGM_RSRC2_GFX6_GFX11(NAME, SHIFT, WIDTH) \
138 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX6_GFX11_##NAME, SHIFT, WIDTH)
140#define COMPUTE_PGM_RSRC2_GFX12_PLUS(NAME, SHIFT, WIDTH) \
141 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_GFX12_PLUS_##NAME, SHIFT, WIDTH)
164#undef COMPUTE_PGM_RSRC2
168#define COMPUTE_PGM_RSRC3_GFX90A(NAME, SHIFT, WIDTH) \
169 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX90A_ ## NAME, SHIFT, WIDTH)
176#undef COMPUTE_PGM_RSRC3_GFX90A
181#define COMPUTE_PGM_RSRC3_GFX10_PLUS(NAME, SHIFT, WIDTH) \
182 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_PLUS_ ## NAME, SHIFT, WIDTH)
184#define COMPUTE_PGM_RSRC3_GFX10(NAME, SHIFT, WIDTH) \
185 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_##NAME, SHIFT, WIDTH)
187#define COMPUTE_PGM_RSRC3_GFX10_GFX11(NAME, SHIFT, WIDTH) \
188 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_GFX11_##NAME, SHIFT, WIDTH)
190#define COMPUTE_PGM_RSRC3_GFX11_PLUS(NAME, SHIFT, WIDTH) \
191 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX11_PLUS_ ## NAME, SHIFT, WIDTH)
193#define COMPUTE_PGM_RSRC3_GFX11(NAME, SHIFT, WIDTH) \
194 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX11_##NAME, SHIFT, WIDTH)
196#define COMPUTE_PGM_RSRC3_GFX12_PLUS(NAME, SHIFT, WIDTH) \
197 AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX12_PLUS_##NAME, SHIFT, WIDTH)
213#undef COMPUTE_PGM_RSRC3_GFX10_PLUS
216#define KERNEL_CODE_PROPERTY(NAME, SHIFT, WIDTH) \
217 AMDHSA_BITS_ENUM_ENTRY(KERNEL_CODE_PROPERTY_ ## NAME, SHIFT, WIDTH)
231#undef KERNEL_CODE_PROPERTY
234#define KERNARG_PRELOAD_SPEC(NAME, SHIFT, WIDTH) \
235 AMDHSA_BITS_ENUM_ENTRY(KERNARG_PRELOAD_SPEC_##NAME, SHIFT, WIDTH)
240#undef KERNARG_PRELOAD_SPEC
274 sizeof(kernel_descriptor_t) == 64,
275 "invalid size for kernel_descriptor_t");
276static_assert(
offsetof(kernel_descriptor_t, group_segment_fixed_size) ==
278 "invalid offset for group_segment_fixed_size");
279static_assert(
offsetof(kernel_descriptor_t, private_segment_fixed_size) ==
281 "invalid offset for private_segment_fixed_size");
282static_assert(
offsetof(kernel_descriptor_t, kernarg_size) ==
284 "invalid offset for kernarg_size");
286 "invalid offset for reserved0");
287static_assert(
offsetof(kernel_descriptor_t, kernel_code_entry_byte_offset) ==
289 "invalid offset for kernel_code_entry_byte_offset");
291 "invalid offset for reserved1");
292static_assert(
offsetof(kernel_descriptor_t, compute_pgm_rsrc3) ==
294 "invalid offset for compute_pgm_rsrc3");
295static_assert(
offsetof(kernel_descriptor_t, compute_pgm_rsrc1) ==
297 "invalid offset for compute_pgm_rsrc1");
298static_assert(
offsetof(kernel_descriptor_t, compute_pgm_rsrc2) ==
300 "invalid offset for compute_pgm_rsrc2");
301static_assert(
offsetof(kernel_descriptor_t, kernel_code_properties) ==
303 "invalid offset for kernel_code_properties");
304static_assert(
offsetof(kernel_descriptor_t, kernarg_preload) ==
306 "invalid offset for kernarg_preload");
308 "invalid offset for reserved3");
#define offsetof(TYPE, MEMBER)
@ COMPUTE_PGM_RSRC3_GFX90A
@ COMPUTE_PGM_RSRC2_GFX6_GFX11
@ COMPUTE_PGM_RSRC2_GFX12_PLUS
@ COMPUTE_PGM_RSRC3_GFX11
@ COMPUTE_PGM_RSRC3_GFX10
@ COMPUTE_PGM_RSRC3_GFX10_PLUS
@ COMPUTE_PGM_RSRC3_GFX12_PLUS
@ COMPUTE_PGM_RSRC3_GFX11_PLUS
@ COMPUTE_PGM_RSRC3_GFX10_GFX11
@ FLOAT_ROUND_MODE_PLUS_INFINITY
@ FLOAT_ROUND_MODE_NEAR_EVEN
@ FLOAT_ROUND_MODE_MINUS_INFINITY
@ FLOAT_DENORM_MODE_FLUSH_SRC
@ FLOAT_DENORM_MODE_FLUSH_DST
@ FLOAT_DENORM_MODE_FLUSH_SRC_DST
@ FLOAT_DENORM_MODE_FLUSH_NONE
@ SYSTEM_VGPR_WORKITEM_ID_UNDEFINED
@ SYSTEM_VGPR_WORKITEM_ID_X
@ SYSTEM_VGPR_WORKITEM_ID_X_Y
@ SYSTEM_VGPR_WORKITEM_ID_X_Y_Z
@ COMPUTE_PGM_RSRC1_GFX10_PLUS
@ COMPUTE_PGM_RSRC1_GFX12_PLUS
@ COMPUTE_PGM_RSRC1_GFX6_GFX8
@ COMPUTE_PGM_RSRC1_GFX6_GFX9
@ COMPUTE_PGM_RSRC1_GFX6_GFX11
@ COMPUTE_PGM_RSRC1_GFX9_PLUS
@ KERNEL_CODE_PROPERTIES_OFFSET
@ GROUP_SEGMENT_FIXED_SIZE_OFFSET
@ COMPUTE_PGM_RSRC3_OFFSET
@ KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET
@ COMPUTE_PGM_RSRC1_OFFSET
@ COMPUTE_PGM_RSRC2_OFFSET
@ PRIVATE_SEGMENT_FIXED_SIZE_OFFSET
This is an optimization pass for GlobalISel generic memory operations.
uint32_t group_segment_fixed_size
uint32_t compute_pgm_rsrc1
uint32_t private_segment_fixed_size
uint32_t compute_pgm_rsrc2
uint16_t kernel_code_properties
uint32_t compute_pgm_rsrc3
int64_t kernel_code_entry_byte_offset