23 #ifndef __CLANG_CUDA_INTRINSICS_H__ 24 #define __CLANG_CUDA_INTRINSICS_H__ 26 #error "This file is for CUDA compilation only." 31 #define __SM_30_INTRINSICS_H__ 32 #define __SM_30_INTRINSICS_HPP__ 34 #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 36 #pragma push_macro("__MAKE_SHUFFLES") 37 #define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask, \ 39 inline __device__ int __FnName(int __val, __Type __offset, \ 40 int __width = warpSize) { \ 41 return __IntIntrinsic(__val, __offset, \ 42 ((warpSize - __width) << 8) | (__Mask)); \ 44 inline __device__ float __FnName(float __val, __Type __offset, \ 45 int __width = warpSize) { \ 46 return __FloatIntrinsic(__val, __offset, \ 47 ((warpSize - __width) << 8) | (__Mask)); \ 49 inline __device__ unsigned int __FnName(unsigned int __val, __Type __offset, \ 50 int __width = warpSize) { \ 51 return static_cast<unsigned int>( \ 52 ::__FnName(static_cast<int>(__val), __offset, __width)); \ 54 inline __device__ long long __FnName(long long __val, __Type __offset, \ 55 int __width = warpSize) { \ 59 _Static_assert(sizeof(__val) == sizeof(__Bits)); \ 60 _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \ 62 memcpy(&__val, &__tmp, sizeof(__val)); \ 63 __tmp.__a = ::__FnName(__tmp.__a, __offset, __width); \ 64 __tmp.__b = ::__FnName(__tmp.__b, __offset, __width); \ 66 memcpy(&__ret, &__tmp, sizeof(__tmp)); \ 69 inline __device__ long __FnName(long __val, __Type __offset, \ 70 int __width = warpSize) { \ 71 _Static_assert(sizeof(long) == sizeof(long long) || \ 72 sizeof(long) == sizeof(int)); \ 73 if (sizeof(long) == sizeof(long long)) { \ 74 return static_cast<long>( \ 75 ::__FnName(static_cast<long long>(__val), __offset, __width)); \ 76 } else if (sizeof(long) == sizeof(int)) { \ 77 return static_cast<long>( \ 78 ::__FnName(static_cast<int>(__val), __offset, __width)); \ 81 inline __device__ unsigned long __FnName( \ 82 unsigned long __val, __Type __offset, int __width = warpSize) { \ 83 return static_cast<unsigned long>( \ 84 ::__FnName(static_cast<long>(__val), __offset, __width)); \ 86 inline __device__ unsigned long long __FnName( \ 87 unsigned long long __val, __Type __offset, int __width = warpSize) { \ 88 return static_cast<unsigned long long>(::__FnName( \ 89 static_cast<unsigned long long>(__val), __offset, __width)); \ 91 inline __device__ double __FnName(double __val, __Type __offset, \ 92 int __width = warpSize) { \ 94 _Static_assert(sizeof(__tmp) == sizeof(__val)); \ 95 memcpy(&__tmp, &__val, sizeof(__val)); \ 96 __tmp = ::__FnName(__tmp, __offset, __width); \ 98 memcpy(&__ret, &__tmp, sizeof(__ret)); \ 102 __MAKE_SHUFFLES(__shfl, __nvvm_shfl_idx_i32, __nvvm_shfl_idx_f32, 0x1f,
int);
107 __MAKE_SHUFFLES(__shfl_down, __nvvm_shfl_down_i32, __nvvm_shfl_down_f32, 0x1f,
109 __MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f,
111 #pragma pop_macro("__MAKE_SHUFFLES") 113 #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 115 #if CUDA_VERSION >= 9000 116 #if (!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300) 118 #pragma push_macro("__MAKE_SYNC_SHUFFLES") 119 #define __MAKE_SYNC_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, \ 121 inline __device__ int __FnName(unsigned int __mask, int __val, \ 122 __Type __offset, int __width = warpSize) { \ 123 return __IntIntrinsic(__mask, __val, __offset, \ 124 ((warpSize - __width) << 8) | (__Mask)); \ 126 inline __device__ float __FnName(unsigned int __mask, float __val, \ 127 __Type __offset, int __width = warpSize) { \ 128 return __FloatIntrinsic(__mask, __val, __offset, \ 129 ((warpSize - __width) << 8) | (__Mask)); \ 131 inline __device__ unsigned int __FnName(unsigned int __mask, \ 132 unsigned int __val, __Type __offset, \ 133 int __width = warpSize) { \ 134 return static_cast<unsigned int>( \ 135 ::__FnName(__mask, static_cast<int>(__val), __offset, __width)); \ 137 inline __device__ long long __FnName(unsigned int __mask, long long __val, \ 139 int __width = warpSize) { \ 143 _Static_assert(sizeof(__val) == sizeof(__Bits)); \ 144 _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \ 146 memcpy(&__val, &__tmp, sizeof(__val)); \ 147 __tmp.__a = ::__FnName(__mask, __tmp.__a, __offset, __width); \ 148 __tmp.__b = ::__FnName(__mask, __tmp.__b, __offset, __width); \ 150 memcpy(&__ret, &__tmp, sizeof(__tmp)); \ 153 inline __device__ unsigned long long __FnName( \ 154 unsigned int __mask, unsigned long long __val, __Type __offset, \ 155 int __width = warpSize) { \ 156 return static_cast<unsigned long long>(::__FnName( \ 157 __mask, static_cast<unsigned long long>(__val), __offset, __width)); \ 159 inline __device__ long __FnName(unsigned int __mask, long __val, \ 160 __Type __offset, int __width = warpSize) { \ 161 _Static_assert(sizeof(long) == sizeof(long long) || \ 162 sizeof(long) == sizeof(int)); \ 163 if (sizeof(long) == sizeof(long long)) { \ 164 return static_cast<long>(::__FnName( \ 165 __mask, static_cast<long long>(__val), __offset, __width)); \ 166 } else if (sizeof(long) == sizeof(int)) { \ 167 return static_cast<long>( \ 168 ::__FnName(__mask, static_cast<int>(__val), __offset, __width)); \ 171 inline __device__ unsigned long __FnName( \ 172 unsigned int __mask, unsigned long __val, __Type __offset, \ 173 int __width = warpSize) { \ 174 return static_cast<unsigned long>( \ 175 ::__FnName(__mask, static_cast<long>(__val), __offset, __width)); \ 177 inline __device__ double __FnName(unsigned int __mask, double __val, \ 178 __Type __offset, int __width = warpSize) { \ 180 _Static_assert(sizeof(__tmp) == sizeof(__val)); \ 181 memcpy(&__tmp, &__val, sizeof(__val)); \ 182 __tmp = ::__FnName(__mask, __tmp, __offset, __width); \ 184 memcpy(&__ret, &__tmp, sizeof(__ret)); \ 187 __MAKE_SYNC_SHUFFLES(__shfl_sync, __nvvm_shfl_sync_idx_i32,
188 __nvvm_shfl_sync_idx_f32, 0x1f,
int);
191 __MAKE_SYNC_SHUFFLES(__shfl_up_sync, __nvvm_shfl_sync_up_i32,
192 __nvvm_shfl_sync_up_f32, 0,
unsigned int);
193 __MAKE_SYNC_SHUFFLES(__shfl_down_sync, __nvvm_shfl_sync_down_i32,
194 __nvvm_shfl_sync_down_f32, 0x1f,
unsigned int);
195 __MAKE_SYNC_SHUFFLES(__shfl_xor_sync, __nvvm_shfl_sync_bfly_i32,
196 __nvvm_shfl_sync_bfly_f32, 0x1f,
int);
197 #pragma pop_macro("__MAKE_SYNC_SHUFFLES") 199 inline __device__
void __syncwarp(
unsigned int mask = 0xffffffff) {
200 return __nvvm_bar_warp_sync(mask);
203 inline __device__
void __barrier_sync(
unsigned int id) {
204 __nvvm_barrier_sync(
id);
207 inline __device__
void __barrier_sync_count(
unsigned int id,
208 unsigned int count) {
209 __nvvm_barrier_sync_cnt(
id, count);
212 inline __device__
int __all_sync(
unsigned int mask,
int pred) {
213 return __nvvm_vote_all_sync(mask, pred);
216 inline __device__
int __any_sync(
unsigned int mask,
int pred) {
217 return __nvvm_vote_any_sync(mask, pred);
220 inline __device__
int __uni_sync(
unsigned int mask,
int pred) {
221 return __nvvm_vote_uni_sync(mask, pred);
224 inline __device__
unsigned int __ballot_sync(
unsigned int mask,
int pred) {
225 return __nvvm_vote_ballot_sync(mask, pred);
228 inline __device__
unsigned int __activemask() {
return __nvvm_vote_ballot(1); }
230 inline __device__
unsigned int __fns(
unsigned mask,
unsigned base,
int offset) {
231 return __nvvm_fns(mask, base, offset);
234 #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 237 #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 238 inline __device__
unsigned int __match32_any_sync(
unsigned int mask,
239 unsigned int value) {
240 return __nvvm_match_any_sync_i32(mask, value);
243 inline __device__
unsigned long long 244 __match64_any_sync(
unsigned int mask,
unsigned long long value) {
245 return __nvvm_match_any_sync_i64(mask, value);
248 inline __device__
unsigned int 249 __match32_all_sync(
unsigned int mask,
unsigned int value,
int *pred) {
250 return __nvvm_match_all_sync_i32p(mask, value, pred);
253 inline __device__
unsigned long long 254 __match64_all_sync(
unsigned int mask,
unsigned long long value,
int *pred) {
255 return __nvvm_match_all_sync_i64p(mask, value, pred);
257 #include "crt/sm_70_rt.hpp" 259 #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 260 #endif // __CUDA_VERSION >= 9000 265 #define __SM_32_INTRINSICS_H__ 266 #define __SM_32_INTRINSICS_HPP__ 268 #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 270 inline __device__
char __ldg(
const char *ptr) {
return __nvvm_ldg_c(ptr); }
271 inline __device__
short __ldg(
const short *ptr) {
return __nvvm_ldg_s(ptr); }
272 inline __device__
int __ldg(
const int *ptr) {
return __nvvm_ldg_i(ptr); }
273 inline __device__
long __ldg(
const long *ptr) {
return __nvvm_ldg_l(ptr); }
274 inline __device__
long long __ldg(
const long long *ptr) {
275 return __nvvm_ldg_ll(ptr);
277 inline __device__
unsigned char __ldg(
const unsigned char *ptr) {
278 return __nvvm_ldg_uc(ptr);
280 inline __device__
signed char __ldg(
const signed char *ptr) {
281 return __nvvm_ldg_uc((
const unsigned char *)ptr);
283 inline __device__
unsigned short __ldg(
const unsigned short *ptr) {
284 return __nvvm_ldg_us(ptr);
286 inline __device__
unsigned int __ldg(
const unsigned int *ptr) {
287 return __nvvm_ldg_ui(ptr);
289 inline __device__
unsigned long __ldg(
const unsigned long *ptr) {
290 return __nvvm_ldg_ul(ptr);
292 inline __device__
unsigned long long __ldg(
const unsigned long long *ptr) {
293 return __nvvm_ldg_ull(ptr);
295 inline __device__
float __ldg(
const float *ptr) {
return __nvvm_ldg_f(ptr); }
296 inline __device__
double __ldg(
const double *ptr) {
return __nvvm_ldg_d(ptr); }
298 inline __device__ char2
__ldg(
const char2 *ptr) {
303 c2 rv = __nvvm_ldg_c2(reinterpret_cast<const c2 *>(ptr));
309 inline __device__ char4
__ldg(
const char4 *ptr) {
311 c4 rv = __nvvm_ldg_c4(reinterpret_cast<const c4 *>(ptr));
319 inline __device__ short2
__ldg(
const short2 *ptr) {
321 s2 rv = __nvvm_ldg_s2(reinterpret_cast<const s2 *>(ptr));
327 inline __device__ short4
__ldg(
const short4 *ptr) {
329 s4 rv = __nvvm_ldg_s4(reinterpret_cast<const s4 *>(ptr));
337 inline __device__ int2
__ldg(
const int2 *ptr) {
339 i2 rv = __nvvm_ldg_i2(reinterpret_cast<const i2 *>(ptr));
345 inline __device__ int4
__ldg(
const int4 *ptr) {
347 i4 rv = __nvvm_ldg_i4(reinterpret_cast<const i4 *>(ptr));
355 inline __device__ longlong2
__ldg(
const longlong2 *ptr) {
357 ll2 rv = __nvvm_ldg_ll2(reinterpret_cast<const ll2 *>(ptr));
364 inline __device__ uchar2
__ldg(
const uchar2 *ptr) {
365 typedef unsigned char uc2
__attribute__((ext_vector_type(2)));
366 uc2 rv = __nvvm_ldg_uc2(reinterpret_cast<const uc2 *>(ptr));
372 inline __device__ uchar4
__ldg(
const uchar4 *ptr) {
373 typedef unsigned char uc4
__attribute__((ext_vector_type(4)));
374 uc4 rv = __nvvm_ldg_uc4(reinterpret_cast<const uc4 *>(ptr));
382 inline __device__ ushort2
__ldg(
const ushort2 *ptr) {
383 typedef unsigned short us2
__attribute__((ext_vector_type(2)));
384 us2 rv = __nvvm_ldg_us2(reinterpret_cast<const us2 *>(ptr));
390 inline __device__ ushort4
__ldg(
const ushort4 *ptr) {
391 typedef unsigned short us4
__attribute__((ext_vector_type(4)));
392 us4 rv = __nvvm_ldg_us4(reinterpret_cast<const us4 *>(ptr));
400 inline __device__ uint2
__ldg(
const uint2 *ptr) {
401 typedef unsigned int ui2
__attribute__((ext_vector_type(2)));
402 ui2 rv = __nvvm_ldg_ui2(reinterpret_cast<const ui2 *>(ptr));
408 inline __device__ uint4
__ldg(
const uint4 *ptr) {
409 typedef unsigned int ui4
__attribute__((ext_vector_type(4)));
410 ui4 rv = __nvvm_ldg_ui4(reinterpret_cast<const ui4 *>(ptr));
418 inline __device__ ulonglong2
__ldg(
const ulonglong2 *ptr) {
419 typedef unsigned long long ull2
__attribute__((ext_vector_type(2)));
420 ull2 rv = __nvvm_ldg_ull2(reinterpret_cast<const ull2 *>(ptr));
427 inline __device__ float2
__ldg(
const float2 *ptr) {
429 f2 rv = __nvvm_ldg_f2(reinterpret_cast<const f2 *>(ptr));
435 inline __device__ float4
__ldg(
const float4 *ptr) {
437 f4 rv = __nvvm_ldg_f4(reinterpret_cast<const f4 *>(ptr));
445 inline __device__ double2
__ldg(
const double2 *ptr) {
447 d2 rv = __nvvm_ldg_d2(reinterpret_cast<const d2 *>(ptr));
458 unsigned shiftWidth) {
460 asm(
"shf.l.wrap.b32 %0, %1, %2, %3;" 462 :
"r"(low32),
"r"(high32),
"r"(shiftWidth));
466 unsigned shiftWidth) {
468 asm(
"shf.l.clamp.b32 %0, %1, %2, %3;" 470 :
"r"(low32),
"r"(high32),
"r"(shiftWidth));
474 unsigned shiftWidth) {
476 asm(
"shf.r.wrap.b32 %0, %1, %2, %3;" 478 :
"r"(low32),
"r"(high32),
"r"(shiftWidth));
482 unsigned shiftWidth) {
484 asm(
"shf.r.clamp.b32 %0, %1, %2, %3;" 486 :
"r"(low32),
"r"(high32),
"r"(shiftWidth));
490 #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 492 #endif // defined(__CLANG_CUDA_INTRINSICS_H__) __device__ char __ldg(const char *ptr)
__device__ unsigned __funnelshift_r(unsigned low32, unsigned high32, unsigned shiftWidth)
__device__ unsigned __funnelshift_l(unsigned low32, unsigned high32, unsigned shiftWidth)
__device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32, unsigned shiftWidth)
__device__ unsigned __funnelshift_lc(unsigned low32, unsigned high32, unsigned shiftWidth)
char __v64qi __attribute__((__vector_size__(64)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask, __Type)