13 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
14 #ifndef cl_khr_depth_images
15 #define cl_khr_depth_images
16 #endif //cl_khr_depth_images
17 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
19 #if __OPENCL_C_VERSION__ < CL_VERSION_2_0
20 #ifdef cl_khr_3d_image_writes
21 #pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
22 #endif //cl_khr_3d_image_writes
23 #endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
25 #define __ovld __attribute__((overloadable))
26 #define __conv __attribute__((convergent))
29 #define __purefn __attribute__((pure))
30 #define __cnfn __attribute__((const))
131 #pragma OPENCL EXTENSION cl_khr_fp16 : enable
139 #if __OPENCL_C_VERSION__ < CL_VERSION_1_2
140 #pragma OPENCL EXTENSION cl_khr_fp64 : enable
146 typedef double double16
__attribute__((ext_vector_type(16)));
149 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
150 #define NULL ((void*)0)
157 #define MAXFLOAT 0x1.fffffep127f
164 #define HUGE_VALF (__builtin_huge_valf())
171 #define HUGE_VAL (__builtin_huge_val())
177 #define INFINITY (__builtin_inff())
182 #define NAN as_float(INT_MAX)
184 #define FP_ILOGB0 INT_MIN
185 #define FP_ILOGBNAN INT_MAX
188 #define FLT_MANT_DIG 24
189 #define FLT_MAX_10_EXP +38
190 #define FLT_MAX_EXP +128
191 #define FLT_MIN_10_EXP -37
192 #define FLT_MIN_EXP -125
194 #define FLT_MAX 0x1.fffffep127f
195 #define FLT_MIN 0x1.0p-126f
196 #define FLT_EPSILON 0x1.0p-23f
198 #define M_E_F 2.71828182845904523536028747135266250f
199 #define M_LOG2E_F 1.44269504088896340735992468100189214f
200 #define M_LOG10E_F 0.434294481903251827651128918916605082f
201 #define M_LN2_F 0.693147180559945309417232121458176568f
202 #define M_LN10_F 2.30258509299404568401799145468436421f
203 #define M_PI_F 3.14159265358979323846264338327950288f
204 #define M_PI_2_F 1.57079632679489661923132169163975144f
205 #define M_PI_4_F 0.785398163397448309615660845819875721f
206 #define M_1_PI_F 0.318309886183790671537767526745028724f
207 #define M_2_PI_F 0.636619772367581343075535053490057448f
208 #define M_2_SQRTPI_F 1.12837916709551257389615890312154517f
209 #define M_SQRT2_F 1.41421356237309504880168872420969808f
210 #define M_SQRT1_2_F 0.707106781186547524400844362104849039f
213 #define DBL_MANT_DIG 53
214 #define DBL_MAX_10_EXP +308
215 #define DBL_MAX_EXP +1024
216 #define DBL_MIN_10_EXP -307
217 #define DBL_MIN_EXP -1021
219 #define DBL_MAX 0x1.fffffffffffffp1023
220 #define DBL_MIN 0x1.0p-1022
221 #define DBL_EPSILON 0x1.0p-52
223 #define M_E 0x1.5bf0a8b145769p+1
224 #define M_LOG2E 0x1.71547652b82fep+0
225 #define M_LOG10E 0x1.bcb7b1526e50ep-2
226 #define M_LN2 0x1.62e42fefa39efp-1
227 #define M_LN10 0x1.26bb1bbb55516p+1
228 #define M_PI 0x1.921fb54442d18p+1
229 #define M_PI_2 0x1.921fb54442d18p+0
230 #define M_PI_4 0x1.921fb54442d18p-1
231 #define M_1_PI 0x1.45f306dc9c883p-2
232 #define M_2_PI 0x1.45f306dc9c883p-1
233 #define M_2_SQRTPI 0x1.20dd750429b6dp+0
234 #define M_SQRT2 0x1.6a09e667f3bcdp+0
235 #define M_SQRT1_2 0x1.6a09e667f3bcdp-1
240 #define HALF_MANT_DIG 11
241 #define HALF_MAX_10_EXP +4
242 #define HALF_MAX_EXP +16
243 #define HALF_MIN_10_EXP -4
244 #define HALF_MIN_EXP -13
246 #define HALF_MAX ((0x1.ffcp15h))
247 #define HALF_MIN ((0x1.0p-14h))
248 #define HALF_EPSILON ((0x1.0p-10h))
250 #define M_E_H 2.71828182845904523536028747135266250h
251 #define M_LOG2E_H 1.44269504088896340735992468100189214h
252 #define M_LOG10E_H 0.434294481903251827651128918916605082h
253 #define M_LN2_H 0.693147180559945309417232121458176568h
254 #define M_LN10_H 2.30258509299404568401799145468436421h
255 #define M_PI_H 3.14159265358979323846264338327950288h
256 #define M_PI_2_H 1.57079632679489661923132169163975144h
257 #define M_PI_4_H 0.785398163397448309615660845819875721h
258 #define M_1_PI_H 0.318309886183790671537767526745028724h
259 #define M_2_PI_H 0.636619772367581343075535053490057448h
260 #define M_2_SQRTPI_H 1.12837916709551257389615890312154517h
261 #define M_SQRT2_H 1.41421356237309504880168872420969808h
262 #define M_SQRT1_2_H 0.707106781186547524400844362104849039h
267 #define SCHAR_MAX 127
268 #define SCHAR_MIN (-128)
269 #define UCHAR_MAX 255
270 #define CHAR_MAX SCHAR_MAX
271 #define CHAR_MIN SCHAR_MIN
272 #define USHRT_MAX 65535
273 #define SHRT_MAX 32767
274 #define SHRT_MIN (-32768)
275 #define UINT_MAX 0xffffffff
276 #define INT_MAX 2147483647
277 #define INT_MIN (-2147483647-1)
278 #define ULONG_MAX 0xffffffffffffffffUL
279 #define LONG_MAX 0x7fffffffffffffffL
280 #define LONG_MIN (-0x7fffffffffffffffL-1)
5698 #endif //cl_khr_fp64
6579 #endif //cl_khr_fp64
6581 #endif // cl_khr_fp16
6587 #define as_char(x) __builtin_astype((x), char)
6588 #define as_char2(x) __builtin_astype((x), char2)
6589 #define as_char3(x) __builtin_astype((x), char3)
6590 #define as_char4(x) __builtin_astype((x), char4)
6591 #define as_char8(x) __builtin_astype((x), char8)
6592 #define as_char16(x) __builtin_astype((x), char16)
6594 #define as_uchar(x) __builtin_astype((x), uchar)
6595 #define as_uchar2(x) __builtin_astype((x), uchar2)
6596 #define as_uchar3(x) __builtin_astype((x), uchar3)
6597 #define as_uchar4(x) __builtin_astype((x), uchar4)
6598 #define as_uchar8(x) __builtin_astype((x), uchar8)
6599 #define as_uchar16(x) __builtin_astype((x), uchar16)
6601 #define as_short(x) __builtin_astype((x), short)
6602 #define as_short2(x) __builtin_astype((x), short2)
6603 #define as_short3(x) __builtin_astype((x), short3)
6604 #define as_short4(x) __builtin_astype((x), short4)
6605 #define as_short8(x) __builtin_astype((x), short8)
6606 #define as_short16(x) __builtin_astype((x), short16)
6608 #define as_ushort(x) __builtin_astype((x), ushort)
6609 #define as_ushort2(x) __builtin_astype((x), ushort2)
6610 #define as_ushort3(x) __builtin_astype((x), ushort3)
6611 #define as_ushort4(x) __builtin_astype((x), ushort4)
6612 #define as_ushort8(x) __builtin_astype((x), ushort8)
6613 #define as_ushort16(x) __builtin_astype((x), ushort16)
6615 #define as_int(x) __builtin_astype((x), int)
6616 #define as_int2(x) __builtin_astype((x), int2)
6617 #define as_int3(x) __builtin_astype((x), int3)
6618 #define as_int4(x) __builtin_astype((x), int4)
6619 #define as_int8(x) __builtin_astype((x), int8)
6620 #define as_int16(x) __builtin_astype((x), int16)
6622 #define as_uint(x) __builtin_astype((x), uint)
6623 #define as_uint2(x) __builtin_astype((x), uint2)
6624 #define as_uint3(x) __builtin_astype((x), uint3)
6625 #define as_uint4(x) __builtin_astype((x), uint4)
6626 #define as_uint8(x) __builtin_astype((x), uint8)
6627 #define as_uint16(x) __builtin_astype((x), uint16)
6629 #define as_long(x) __builtin_astype((x), long)
6630 #define as_long2(x) __builtin_astype((x), long2)
6631 #define as_long3(x) __builtin_astype((x), long3)
6632 #define as_long4(x) __builtin_astype((x), long4)
6633 #define as_long8(x) __builtin_astype((x), long8)
6634 #define as_long16(x) __builtin_astype((x), long16)
6636 #define as_ulong(x) __builtin_astype((x), ulong)
6637 #define as_ulong2(x) __builtin_astype((x), ulong2)
6638 #define as_ulong3(x) __builtin_astype((x), ulong3)
6639 #define as_ulong4(x) __builtin_astype((x), ulong4)
6640 #define as_ulong8(x) __builtin_astype((x), ulong8)
6641 #define as_ulong16(x) __builtin_astype((x), ulong16)
6643 #define as_float(x) __builtin_astype((x), float)
6644 #define as_float2(x) __builtin_astype((x), float2)
6645 #define as_float3(x) __builtin_astype((x), float3)
6646 #define as_float4(x) __builtin_astype((x), float4)
6647 #define as_float8(x) __builtin_astype((x), float8)
6648 #define as_float16(x) __builtin_astype((x), float16)
6651 #define as_double(x) __builtin_astype((x), double)
6652 #define as_double2(x) __builtin_astype((x), double2)
6653 #define as_double3(x) __builtin_astype((x), double3)
6654 #define as_double4(x) __builtin_astype((x), double4)
6655 #define as_double8(x) __builtin_astype((x), double8)
6656 #define as_double16(x) __builtin_astype((x), double16)
6657 #endif //cl_khr_fp64
6660 #define as_half(x) __builtin_astype((x), half)
6661 #define as_half2(x) __builtin_astype((x), half2)
6662 #define as_half3(x) __builtin_astype((x), half3)
6663 #define as_half4(x) __builtin_astype((x), half4)
6664 #define as_half8(x) __builtin_astype((x), half8)
6665 #define as_half16(x) __builtin_astype((x), half16)
6666 #endif //cl_khr_fp16
6670 #define __kernel_exec(X, typen) __kernel \
6671 __attribute__((work_group_size_hint(X, 1, 1))) \
6672 __attribute__((vec_type_hint(typen)))
6674 #define kernel_exec(X, typen) __kernel \
6675 __attribute__((work_group_size_hint(X, 1, 1))) \
6676 __attribute__((vec_type_hint(typen)))
6763 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
6767 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
6787 #endif //cl_khr_fp64
6795 #endif //cl_khr_fp16
6813 #endif //cl_khr_fp64
6821 #endif //cl_khr_fp16
6839 #endif //cl_khr_fp64
6847 #endif //cl_khr_fp16
6865 #endif //cl_khr_fp64
6873 #endif //cl_khr_fp16
6891 #endif //cl_khr_fp64
6899 #endif //cl_khr_fp16
6917 #endif //cl_khr_fp64
6925 #endif //cl_khr_fp16
6943 #endif //cl_khr_fp64
6951 #endif //cl_khr_fp16
6969 #endif //cl_khr_fp64
6977 #endif //cl_khr_fp16
6995 #endif //cl_khr_fp64
7003 #endif //cl_khr_fp16
7021 #endif //cl_khr_fp64
7029 #endif //cl_khr_fp16
7047 #endif //cl_khr_fp64
7055 #endif //cl_khr_fp16
7073 #endif //cl_khr_fp64
7081 #endif //cl_khr_fp16
7100 #endif //cl_khr_fp64
7108 #endif //cl_khr_fp16
7126 #endif //cl_khr_fp64
7134 #endif //cl_khr_fp16
7152 #endif //cl_khr_fp64
7160 #endif //cl_khr_fp16
7178 #endif //cl_khr_fp64
7186 #endif //cl_khr_fp16
7204 #endif //cl_khr_fp64
7212 #endif //cl_khr_fp16
7230 #endif //cl_khr_fp64
7238 #endif //cl_khr_fp16
7257 #endif //cl_khr_fp64
7265 #endif //cl_khr_fp16
7283 #endif //cl_khr_fp64
7291 #endif //cl_khr_fp16
7309 #endif //cl_khr_fp64
7317 #endif //cl_khr_fp16
7335 #endif //cl_khr_fp64
7343 #endif //cl_khr_fp16
7361 #endif //cl_khr_fp64
7369 #endif //cl_khr_fp16
7387 #endif //cl_khr_fp64
7395 #endif //cl_khr_fp16
7413 #endif //cl_khr_fp64
7421 #endif //cl_khr_fp16
7440 #endif //cl_khr_fp64
7448 #endif //cl_khr_fp16
7470 #endif //cl_khr_fp64
7478 #endif //cl_khr_fp16
7509 #endif //cl_khr_fp64
7522 #endif //cl_khr_fp16
7553 #endif //cl_khr_fp64
7566 #endif //cl_khr_fp16
7584 #endif //cl_khr_fp64
7592 #endif //cl_khr_fp16
7598 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
7611 double16
__ovld fract(double16 x, double16 *iptr);
7612 #endif //cl_khr_fp64
7620 #endif //cl_khr_fp16
7622 float __ovld fract(
float x, __global
float *iptr);
7623 float2
__ovld fract(float2 x, __global float2 *iptr);
7624 float3
__ovld fract(float3 x, __global float3 *iptr);
7625 float4
__ovld fract(float4 x, __global float4 *iptr);
7626 float8
__ovld fract(float8 x, __global float8 *iptr);
7627 float16
__ovld fract(float16 x, __global float16 *iptr);
7629 float2
__ovld fract(float2 x, __local float2 *iptr);
7630 float3
__ovld fract(float3 x, __local float3 *iptr);
7631 float4
__ovld fract(float4 x, __local float4 *iptr);
7632 float8
__ovld fract(float8 x, __local float8 *iptr);
7633 float16
__ovld fract(float16 x, __local float16 *iptr);
7634 float __ovld fract(
float x, __private
float *iptr);
7635 float2
__ovld fract(float2 x, __private float2 *iptr);
7636 float3
__ovld fract(float3 x, __private float3 *iptr);
7637 float4
__ovld fract(float4 x, __private float4 *iptr);
7638 float8
__ovld fract(float8 x, __private float8 *iptr);
7639 float16
__ovld fract(float16 x, __private float16 *iptr);
7641 double __ovld fract(
double x, __global
double *iptr);
7642 double2
__ovld fract(double2 x, __global double2 *iptr);
7643 double3
__ovld fract(double3 x, __global double3 *iptr);
7644 double4
__ovld fract(double4 x, __global double4 *iptr);
7645 double8
__ovld fract(double8 x, __global double8 *iptr);
7646 double16
__ovld fract(double16 x, __global double16 *iptr);
7647 double __ovld fract(
double x, __local
double *iptr);
7648 double2
__ovld fract(double2 x, __local double2 *iptr);
7649 double3
__ovld fract(double3 x, __local double3 *iptr);
7650 double4
__ovld fract(double4 x, __local double4 *iptr);
7651 double8
__ovld fract(double8 x, __local double8 *iptr);
7652 double16
__ovld fract(double16 x, __local double16 *iptr);
7653 double __ovld fract(
double x, __private
double *iptr);
7654 double2
__ovld fract(double2 x, __private double2 *iptr);
7655 double3
__ovld fract(double3 x, __private double3 *iptr);
7656 double4
__ovld fract(double4 x, __private double4 *iptr);
7657 double8
__ovld fract(double8 x, __private double8 *iptr);
7658 double16
__ovld fract(double16 x, __private double16 *iptr);
7659 #endif //cl_khr_fp64
7662 half2
__ovld fract(half2 x, __global half2 *iptr);
7663 half3
__ovld fract(half3 x, __global half3 *iptr);
7664 half4
__ovld fract(half4 x, __global half4 *iptr);
7665 half8
__ovld fract(half8 x, __global half8 *iptr);
7666 half16
__ovld fract(half16 x, __global half16 *iptr);
7672 half16
__ovld fract(half16 x, __local half16 *iptr);
7674 half2
__ovld fract(half2 x, __private half2 *iptr);
7675 half3
__ovld fract(half3 x, __private half3 *iptr);
7676 half4
__ovld fract(half4 x, __private half4 *iptr);
7677 half8
__ovld fract(half8 x, __private half8 *iptr);
7678 half16
__ovld fract(half16 x, __private half16 *iptr);
7679 #endif //cl_khr_fp16
7680 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
7688 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
7702 #endif //cl_khr_fp64
7710 #endif //cl_khr_fp16
7749 #endif //cl_khr_fp64
7769 #endif //cl_khr_fp16
7770 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
7789 #endif //cl_khr_fp64
7797 #endif //cl_khr_fp16
7815 #endif //cl_khr_fp64
7823 #endif //cl_khr_fp16
7851 #endif //cl_khr_fp64
7864 #endif //cl_khr_fp16
7885 #endif //cl_khr_fp64
7893 #endif //cl_khr_fp16
7895 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
7909 #endif //cl_khr_fp64
7917 #endif //cl_khr_fp16
7956 #endif //cl_khr_fp64
7976 #endif //cl_khr_fp16
7977 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
7995 #endif //cl_khr_fp64
8003 #endif //cl_khr_fp16
8021 #endif //cl_khr_fp64
8029 #endif //cl_khr_fp16
8047 #endif //cl_khr_fp64
8055 #endif //cl_khr_fp16
8073 #endif //cl_khr_fp64
8081 #endif //cl_khr_fp16
8100 #endif //cl_khr_fp64
8108 #endif //cl_khr_fp16
8130 #endif //cl_khr_fp64
8138 #endif //cl_khr_fp16
8157 #endif //cl_khr_fp64
8165 #endif //cl_khr_fp16
8184 #endif //cl_khr_fp64
8192 #endif //cl_khr_fp16
8201 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
8207 float16
__ovld modf(float16 x, float16 *iptr);
8210 double2
__ovld modf(double2 x, double2 *iptr);
8211 double3
__ovld modf(double3 x, double3 *iptr);
8212 double4
__ovld modf(double4 x, double4 *iptr);
8213 double8
__ovld modf(double8 x, double8 *iptr);
8214 double16
__ovld modf(double16 x, double16 *iptr);
8215 #endif //cl_khr_fp64
8223 #endif //cl_khr_fp16
8225 float __ovld modf(
float x, __global
float *iptr);
8226 float2
__ovld modf(float2 x, __global float2 *iptr);
8227 float3
__ovld modf(float3 x, __global float3 *iptr);
8228 float4
__ovld modf(float4 x, __global float4 *iptr);
8229 float8
__ovld modf(float8 x, __global float8 *iptr);
8230 float16
__ovld modf(float16 x, __global float16 *iptr);
8231 float __ovld modf(
float x, __local
float *iptr);
8232 float2
__ovld modf(float2 x, __local float2 *iptr);
8233 float3
__ovld modf(float3 x, __local float3 *iptr);
8234 float4
__ovld modf(float4 x, __local float4 *iptr);
8235 float8
__ovld modf(float8 x, __local float8 *iptr);
8236 float16
__ovld modf(float16 x, __local float16 *iptr);
8237 float __ovld modf(
float x, __private
float *iptr);
8238 float2
__ovld modf(float2 x, __private float2 *iptr);
8239 float3
__ovld modf(float3 x, __private float3 *iptr);
8240 float4
__ovld modf(float4 x, __private float4 *iptr);
8241 float8
__ovld modf(float8 x, __private float8 *iptr);
8242 float16
__ovld modf(float16 x, __private float16 *iptr);
8244 double __ovld modf(
double x, __global
double *iptr);
8245 double2
__ovld modf(double2 x, __global double2 *iptr);
8246 double3
__ovld modf(double3 x, __global double3 *iptr);
8247 double4
__ovld modf(double4 x, __global double4 *iptr);
8248 double8
__ovld modf(double8 x, __global double8 *iptr);
8249 double16
__ovld modf(double16 x, __global double16 *iptr);
8250 double __ovld modf(
double x, __local
double *iptr);
8251 double2
__ovld modf(double2 x, __local double2 *iptr);
8252 double3
__ovld modf(double3 x, __local double3 *iptr);
8253 double4
__ovld modf(double4 x, __local double4 *iptr);
8254 double8
__ovld modf(double8 x, __local double8 *iptr);
8255 double16
__ovld modf(double16 x, __local double16 *iptr);
8256 double __ovld modf(
double x, __private
double *iptr);
8257 double2
__ovld modf(double2 x, __private double2 *iptr);
8258 double3
__ovld modf(double3 x, __private double3 *iptr);
8259 double4
__ovld modf(double4 x, __private double4 *iptr);
8260 double8
__ovld modf(double8 x, __private double8 *iptr);
8261 double16
__ovld modf(double16 x, __private double16 *iptr);
8262 #endif //cl_khr_fp64
8264 half
__ovld modf(half x, __global half *iptr);
8265 half2
__ovld modf(half2 x, __global half2 *iptr);
8266 half3
__ovld modf(half3 x, __global half3 *iptr);
8267 half4
__ovld modf(half4 x, __global half4 *iptr);
8268 half8
__ovld modf(half8 x, __global half8 *iptr);
8269 half16
__ovld modf(half16 x, __global half16 *iptr);
8270 half
__ovld modf(half x, __local half *iptr);
8271 half2
__ovld modf(half2 x, __local half2 *iptr);
8272 half3
__ovld modf(half3 x, __local half3 *iptr);
8273 half4
__ovld modf(half4 x, __local half4 *iptr);
8274 half8
__ovld modf(half8 x, __local half8 *iptr);
8275 half16
__ovld modf(half16 x, __local half16 *iptr);
8276 half
__ovld modf(half x, __private half *iptr);
8277 half2
__ovld modf(half2 x, __private half2 *iptr);
8278 half3
__ovld modf(half3 x, __private half3 *iptr);
8279 half4
__ovld modf(half4 x, __private half4 *iptr);
8280 half8
__ovld modf(half8 x, __private half8 *iptr);
8281 half16
__ovld modf(half16 x, __private half16 *iptr);
8282 #endif //cl_khr_fp16
8283 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
8302 #endif //cl_khr_fp64
8310 #endif //cl_khr_fp16
8332 #endif //cl_khr_fp64
8340 #endif //cl_khr_fp16
8358 #endif //cl_khr_fp64
8366 #endif //cl_khr_fp16
8384 #endif //cl_khr_fp64
8392 #endif //cl_khr_fp16
8410 #endif //cl_khr_fp64
8418 #endif //cl_khr_fp16
8439 #endif //cl_khr_fp64
8447 #endif //cl_khr_fp16
8461 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
8467 float16
__ovld remquo(float16 x, float16 y, int16 *quo);
8470 double2
__ovld remquo(double2 x, double2 y, int2 *quo);
8471 double3
__ovld remquo(double3 x, double3 y, int3 *quo);
8472 double4
__ovld remquo(double4 x, double4 y, int4 *quo);
8473 double8
__ovld remquo(double8 x, double8 y, int8 *quo);
8474 double16
__ovld remquo(double16 x, double16 y, int16 *quo);
8475 #endif //cl_khr_fp64
8484 #endif //cl_khr_fp16
8486 float __ovld remquo(
float x,
float y, __global
int *quo);
8487 float2
__ovld remquo(float2 x, float2 y, __global int2 *quo);
8488 float3
__ovld remquo(float3 x, float3 y, __global int3 *quo);
8489 float4
__ovld remquo(float4 x, float4 y, __global int4 *quo);
8490 float8
__ovld remquo(float8 x, float8 y, __global int8 *quo);
8491 float16
__ovld remquo(float16 x, float16 y, __global int16 *quo);
8492 float __ovld remquo(
float x,
float y, __local
int *quo);
8493 float2
__ovld remquo(float2 x, float2 y, __local int2 *quo);
8494 float3
__ovld remquo(float3 x, float3 y, __local int3 *quo);
8495 float4
__ovld remquo(float4 x, float4 y, __local int4 *quo);
8496 float8
__ovld remquo(float8 x, float8 y, __local int8 *quo);
8497 float16
__ovld remquo(float16 x, float16 y, __local int16 *quo);
8498 float __ovld remquo(
float x,
float y, __private
int *quo);
8499 float2
__ovld remquo(float2 x, float2 y, __private int2 *quo);
8500 float3
__ovld remquo(float3 x, float3 y, __private int3 *quo);
8501 float4
__ovld remquo(float4 x, float4 y, __private int4 *quo);
8502 float8
__ovld remquo(float8 x, float8 y, __private int8 *quo);
8503 float16
__ovld remquo(float16 x, float16 y, __private int16 *quo);
8505 double __ovld remquo(
double x,
double y, __global
int *quo);
8506 double2
__ovld remquo(double2 x, double2 y, __global int2 *quo);
8507 double3
__ovld remquo(double3 x, double3 y, __global int3 *quo);
8508 double4
__ovld remquo(double4 x, double4 y, __global int4 *quo);
8509 double8
__ovld remquo(double8 x, double8 y, __global int8 *quo);
8510 double16
__ovld remquo(double16 x, double16 y, __global int16 *quo);
8511 double __ovld remquo(
double x,
double y, __local
int *quo);
8512 double2
__ovld remquo(double2 x, double2 y, __local int2 *quo);
8513 double3
__ovld remquo(double3 x, double3 y, __local int3 *quo);
8514 double4
__ovld remquo(double4 x, double4 y, __local int4 *quo);
8515 double8
__ovld remquo(double8 x, double8 y, __local int8 *quo);
8516 double16
__ovld remquo(double16 x, double16 y, __local int16 *quo);
8517 double __ovld remquo(
double x,
double y, __private
int *quo);
8518 double2
__ovld remquo(double2 x, double2 y, __private int2 *quo);
8519 double3
__ovld remquo(double3 x, double3 y, __private int3 *quo);
8520 double4
__ovld remquo(double4 x, double4 y, __private int4 *quo);
8521 double8
__ovld remquo(double8 x, double8 y, __private int8 *quo);
8522 double16
__ovld remquo(double16 x, double16 y, __private int16 *quo);
8523 #endif //cl_khr_fp64
8526 half2
__ovld remquo(half2 x, half2 y, __global int2 *quo);
8527 half3
__ovld remquo(half3 x, half3 y, __global int3 *quo);
8528 half4
__ovld remquo(half4 x, half4 y, __global int4 *quo);
8529 half8
__ovld remquo(half8 x, half8 y, __global int8 *quo);
8530 half16
__ovld remquo(half16 x, half16 y, __global int16 *quo);
8532 half2
__ovld remquo(half2 x, half2 y, __local int2 *quo);
8533 half3
__ovld remquo(half3 x, half3 y, __local int3 *quo);
8534 half4
__ovld remquo(half4 x, half4 y, __local int4 *quo);
8535 half8
__ovld remquo(half8 x, half8 y, __local int8 *quo);
8536 half16
__ovld remquo(half16 x, half16 y, __local int16 *quo);
8537 half
__ovld remquo(half x, half y, __private
int *quo);
8538 half2
__ovld remquo(half2 x, half2 y, __private int2 *quo);
8539 half3
__ovld remquo(half3 x, half3 y, __private int3 *quo);
8540 half4
__ovld remquo(half4 x, half4 y, __private int4 *quo);
8541 half8
__ovld remquo(half8 x, half8 y, __private int8 *quo);
8542 half16
__ovld remquo(half16 x, half16 y, __private int16 *quo);
8543 #endif //cl_khr_fp16
8544 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
8564 #endif //cl_khr_fp64
8572 #endif //cl_khr_fp16
8590 #endif //cl_khr_fp64
8598 #endif //cl_khr_fp16
8618 #endif //cl_khr_fp64
8626 #endif //cl_khr_fp16
8644 #endif //cl_khr_fp64
8652 #endif //cl_khr_fp16
8670 #endif //cl_khr_fp64
8678 #endif //cl_khr_fp16
8685 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
8699 #endif //cl_khr_fp64
8707 #endif //cl_khr_fp16
8710 float2
__ovld sincos(float2 x, __global float2 *cosval);
8711 float3
__ovld sincos(float3 x, __global float3 *cosval);
8712 float4
__ovld sincos(float4 x, __global float4 *cosval);
8713 float8
__ovld sincos(float8 x, __global float8 *cosval);
8714 float16
__ovld sincos(float16 x, __global float16 *cosval);
8716 float2
__ovld sincos(float2 x, __local float2 *cosval);
8717 float3
__ovld sincos(float3 x, __local float3 *cosval);
8718 float4
__ovld sincos(float4 x, __local float4 *cosval);
8719 float8
__ovld sincos(float8 x, __local float8 *cosval);
8720 float16
__ovld sincos(float16 x, __local float16 *cosval);
8722 float2
__ovld sincos(float2 x, __private float2 *cosval);
8723 float3
__ovld sincos(float3 x, __private float3 *cosval);
8724 float4
__ovld sincos(float4 x, __private float4 *cosval);
8725 float8
__ovld sincos(float8 x, __private float8 *cosval);
8726 float16
__ovld sincos(float16 x, __private float16 *cosval);
8728 double __ovld sincos(
double x, __global
double *cosval);
8729 double2
__ovld sincos(double2 x, __global double2 *cosval);
8730 double3
__ovld sincos(double3 x, __global double3 *cosval);
8731 double4
__ovld sincos(double4 x, __global double4 *cosval);
8732 double8
__ovld sincos(double8 x, __global double8 *cosval);
8733 double16
__ovld sincos(double16 x, __global double16 *cosval);
8734 double __ovld sincos(
double x, __local
double *cosval);
8735 double2
__ovld sincos(double2 x, __local double2 *cosval);
8736 double3
__ovld sincos(double3 x, __local double3 *cosval);
8737 double4
__ovld sincos(double4 x, __local double4 *cosval);
8738 double8
__ovld sincos(double8 x, __local double8 *cosval);
8739 double16
__ovld sincos(double16 x, __local double16 *cosval);
8740 double __ovld sincos(
double x, __private
double *cosval);
8741 double2
__ovld sincos(double2 x, __private double2 *cosval);
8742 double3
__ovld sincos(double3 x, __private double3 *cosval);
8743 double4
__ovld sincos(double4 x, __private double4 *cosval);
8744 double8
__ovld sincos(double8 x, __private double8 *cosval);
8745 double16
__ovld sincos(double16 x, __private double16 *cosval);
8746 #endif //cl_khr_fp64
8753 half16
__ovld sincos(half16 x, __global half16 *cosval);
8759 half16
__ovld sincos(half16 x, __local half16 *cosval);
8765 half16
__ovld sincos(half16 x, __private half16 *cosval);
8766 #endif //cl_khr_fp16
8767 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
8785 #endif //cl_khr_fp64
8793 #endif //cl_khr_fp16
8811 #endif //cl_khr_fp64
8819 #endif //cl_khr_fp16
8837 #endif //cl_khr_fp64
8845 #endif //cl_khr_fp16
8863 #endif //cl_khr_fp64
8871 #endif //cl_khr_fp16
8889 #endif //cl_khr_fp64
8897 #endif //cl_khr_fp16
8915 #endif //cl_khr_fp64
8923 #endif //cl_khr_fp16
8941 #endif //cl_khr_fp64
8949 #endif //cl_khr_fp16
8968 #endif //cl_khr_fp64
8976 #endif //cl_khr_fp16
9700 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
9749 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
10390 #endif //cl_khr_fp64
10403 #endif //cl_khr_fp16
10422 #endif //cl_khr_fp64
10430 #endif //cl_khr_fp16
10459 #endif //cl_khr_fp64
10472 #endif //cl_khr_fp16
10501 #endif //cl_khr_fp64
10514 #endif //cl_khr_fp16
10546 #endif //cl_khr_fp64
10559 #endif //cl_khr_fp16
10578 #endif //cl_khr_fp64
10586 #endif //cl_khr_fp16
10614 #endif //cl_khr_fp64
10628 #endif //cl_khr_fp16
10666 #endif //cl_khr_fp64
10680 #endif //cl_khr_fp16
10699 #endif //cl_khr_fp64
10707 #endif //cl_khr_fp16
10720 #endif //cl_khr_fp64
10724 #endif //cl_khr_fp16
10738 #endif //cl_khr_fp64
10744 #endif //cl_khr_fp16
10759 #endif //cl_khr_fp64
10765 #endif //cl_khr_fp16
10780 #endif //cl_khr_fp64
10786 #endif //cl_khr_fp16
10801 #endif //cl_khr_fp64
10807 #endif //cl_khr_fp16
10821 #endif //cl_khr_fp16
10836 #endif //cl_khr_fp16
10868 #endif //cl_khr_fp16
10889 #endif //cl_khr_fp64
10897 #endif //cl_khr_fp16
10915 #endif //cl_khr_fp64
10923 #endif //cl_khr_fp16
10941 #endif //cl_khr_fp64
10949 #endif //cl_khr_fp16
10967 #endif //cl_khr_fp64
10975 #endif //cl_khr_fp16
10993 #endif //cl_khr_fp64
11001 #endif //cl_khr_fp16
11019 #endif //cl_khr_fp64
11027 #endif //cl_khr_fp16
11046 #endif //cl_khr_fp64
11054 #endif //cl_khr_fp16
11072 #endif //cl_khr_fp64
11080 #endif //cl_khr_fp16
11098 #endif //cl_khr_fp64
11106 #endif //cl_khr_fp16
11124 #endif //cl_khr_fp64
11132 #endif //cl_khr_fp16
11150 #endif //cl_khr_fp64
11158 #endif //cl_khr_fp16
11178 #endif //cl_khr_fp64
11186 #endif //cl_khr_fp16
11206 #endif //cl_khr_fp64
11214 #endif //cl_khr_fp16
11236 #endif //cl_khr_fp64
11244 #endif //cl_khr_fp16
11370 #endif //cl_khr_fp64
11378 #endif //cl_khr_fp16
11830 #endif //cl_khr_fp64
11844 #endif //cl_khr_fp16
11864 char2
__ovld vload2(
size_t offset,
const __constant
char *p);
11866 short2
__ovld vload2(
size_t offset,
const __constant
short *p);
11868 int2
__ovld vload2(
size_t offset,
const __constant
int *p);
11870 long2
__ovld vload2(
size_t offset,
const __constant
long *p);
11872 float2
__ovld vload2(
size_t offset,
const __constant
float *p);
11873 char3
__ovld vload3(
size_t offset,
const __constant
char *p);
11875 short3
__ovld vload3(
size_t offset,
const __constant
short *p);
11877 int3
__ovld vload3(
size_t offset,
const __constant
int *p);
11879 long3
__ovld vload3(
size_t offset,
const __constant
long *p);
11881 float3
__ovld vload3(
size_t offset,
const __constant
float *p);
11882 char4
__ovld vload4(
size_t offset,
const __constant
char *p);
11884 short4
__ovld vload4(
size_t offset,
const __constant
short *p);
11886 int4
__ovld vload4(
size_t offset,
const __constant
int *p);
11888 long4
__ovld vload4(
size_t offset,
const __constant
long *p);
11890 float4
__ovld vload4(
size_t offset,
const __constant
float *p);
11891 char8
__ovld vload8(
size_t offset,
const __constant
char *p);
11893 short8
__ovld vload8(
size_t offset,
const __constant
short *p);
11895 int8
__ovld vload8(
size_t offset,
const __constant
int *p);
11897 long8
__ovld vload8(
size_t offset,
const __constant
long *p);
11899 float8
__ovld vload8(
size_t offset,
const __constant
float *p);
11900 char16
__ovld vload16(
size_t offset,
const __constant
char *p);
11902 short16
__ovld vload16(
size_t offset,
const __constant
short *p);
11904 int16
__ovld vload16(
size_t offset,
const __constant
int *p);
11906 long16
__ovld vload16(
size_t offset,
const __constant
long *p);
11908 float16
__ovld vload16(
size_t offset,
const __constant
float *p);
11910 double2
__ovld vload2(
size_t offset,
const __constant
double *p);
11911 double3
__ovld vload3(
size_t offset,
const __constant
double *p);
11912 double4
__ovld vload4(
size_t offset,
const __constant
double *p);
11913 double8
__ovld vload8(
size_t offset,
const __constant
double *p);
11914 double16
__ovld vload16(
size_t offset,
const __constant
double *p);
11915 #endif //cl_khr_fp64
11918 half
__ovld vload(
size_t offset,
const __constant half *p);
11919 half2
__ovld vload2(
size_t offset,
const __constant half *p);
11920 half3
__ovld vload3(
size_t offset,
const __constant half *p);
11921 half4
__ovld vload4(
size_t offset,
const __constant half *p);
11922 half8
__ovld vload8(
size_t offset,
const __constant half *p);
11923 half16
__ovld vload16(
size_t offset,
const __constant half *p);
11924 #endif //cl_khr_fp16
11926 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
11974 double2
__ovld vload2(
size_t offset,
const double *p);
11975 double3
__ovld vload3(
size_t offset,
const double *p);
11976 double4
__ovld vload4(
size_t offset,
const double *p);
11977 double8
__ovld vload8(
size_t offset,
const double *p);
11979 #endif //cl_khr_fp64
11982 half
__ovld vload(
size_t offset,
const half *p);
11988 #endif //cl_khr_fp16
11990 char2
__ovld vload2(
size_t offset,
const __global
char *p);
11992 short2
__ovld vload2(
size_t offset,
const __global
short *p);
11994 int2
__ovld vload2(
size_t offset,
const __global
int *p);
11996 long2
__ovld vload2(
size_t offset,
const __global
long *p);
11998 float2
__ovld vload2(
size_t offset,
const __global
float *p);
11999 char3
__ovld vload3(
size_t offset,
const __global
char *p);
12001 short3
__ovld vload3(
size_t offset,
const __global
short *p);
12003 int3
__ovld vload3(
size_t offset,
const __global
int *p);
12005 long3
__ovld vload3(
size_t offset,
const __global
long *p);
12007 float3
__ovld vload3(
size_t offset,
const __global
float *p);
12008 char4
__ovld vload4(
size_t offset,
const __global
char *p);
12010 short4
__ovld vload4(
size_t offset,
const __global
short *p);
12012 int4
__ovld vload4(
size_t offset,
const __global
int *p);
12014 long4
__ovld vload4(
size_t offset,
const __global
long *p);
12016 float4
__ovld vload4(
size_t offset,
const __global
float *p);
12017 char8
__ovld vload8(
size_t offset,
const __global
char *p);
12019 short8
__ovld vload8(
size_t offset,
const __global
short *p);
12021 int8
__ovld vload8(
size_t offset,
const __global
int *p);
12023 long8
__ovld vload8(
size_t offset,
const __global
long *p);
12025 float8
__ovld vload8(
size_t offset,
const __global
float *p);
12026 char16
__ovld vload16(
size_t offset,
const __global
char *p);
12028 short16
__ovld vload16(
size_t offset,
const __global
short *p);
12030 int16
__ovld vload16(
size_t offset,
const __global
int *p);
12032 long16
__ovld vload16(
size_t offset,
const __global
long *p);
12034 float16
__ovld vload16(
size_t offset,
const __global
float *p);
12035 char2
__ovld vload2(
size_t offset,
const __local
char *p);
12037 short2
__ovld vload2(
size_t offset,
const __local
short *p);
12039 int2
__ovld vload2(
size_t offset,
const __local
int *p);
12041 long2
__ovld vload2(
size_t offset,
const __local
long *p);
12043 float2
__ovld vload2(
size_t offset,
const __local
float *p);
12044 char3
__ovld vload3(
size_t offset,
const __local
char *p);
12046 short3
__ovld vload3(
size_t offset,
const __local
short *p);
12048 int3
__ovld vload3(
size_t offset,
const __local
int *p);
12050 long3
__ovld vload3(
size_t offset,
const __local
long *p);
12052 float3
__ovld vload3(
size_t offset,
const __local
float *p);
12053 char4
__ovld vload4(
size_t offset,
const __local
char *p);
12055 short4
__ovld vload4(
size_t offset,
const __local
short *p);
12057 int4
__ovld vload4(
size_t offset,
const __local
int *p);
12059 long4
__ovld vload4(
size_t offset,
const __local
long *p);
12061 float4
__ovld vload4(
size_t offset,
const __local
float *p);
12062 char8
__ovld vload8(
size_t offset,
const __local
char *p);
12064 short8
__ovld vload8(
size_t offset,
const __local
short *p);
12066 int8
__ovld vload8(
size_t offset,
const __local
int *p);
12068 long8
__ovld vload8(
size_t offset,
const __local
long *p);
12070 float8
__ovld vload8(
size_t offset,
const __local
float *p);
12071 char16
__ovld vload16(
size_t offset,
const __local
char *p);
12073 short16
__ovld vload16(
size_t offset,
const __local
short *p);
12077 long16
__ovld vload16(
size_t offset,
const __local
long *p);
12079 float16
__ovld vload16(
size_t offset,
const __local
float *p);
12080 char2
__ovld vload2(
size_t offset,
const __private
char *p);
12082 short2
__ovld vload2(
size_t offset,
const __private
short *p);
12084 int2
__ovld vload2(
size_t offset,
const __private
int *p);
12086 long2
__ovld vload2(
size_t offset,
const __private
long *p);
12088 float2
__ovld vload2(
size_t offset,
const __private
float *p);
12089 char3
__ovld vload3(
size_t offset,
const __private
char *p);
12091 short3
__ovld vload3(
size_t offset,
const __private
short *p);
12093 int3
__ovld vload3(
size_t offset,
const __private
int *p);
12095 long3
__ovld vload3(
size_t offset,
const __private
long *p);
12097 float3
__ovld vload3(
size_t offset,
const __private
float *p);
12098 char4
__ovld vload4(
size_t offset,
const __private
char *p);
12100 short4
__ovld vload4(
size_t offset,
const __private
short *p);
12102 int4
__ovld vload4(
size_t offset,
const __private
int *p);
12104 long4
__ovld vload4(
size_t offset,
const __private
long *p);
12106 float4
__ovld vload4(
size_t offset,
const __private
float *p);
12107 char8
__ovld vload8(
size_t offset,
const __private
char *p);
12109 short8
__ovld vload8(
size_t offset,
const __private
short *p);
12111 int8
__ovld vload8(
size_t offset,
const __private
int *p);
12113 long8
__ovld vload8(
size_t offset,
const __private
long *p);
12115 float8
__ovld vload8(
size_t offset,
const __private
float *p);
12116 char16
__ovld vload16(
size_t offset,
const __private
char *p);
12118 short16
__ovld vload16(
size_t offset,
const __private
short *p);
12120 int16
__ovld vload16(
size_t offset,
const __private
int *p);
12122 long16
__ovld vload16(
size_t offset,
const __private
long *p);
12124 float16
__ovld vload16(
size_t offset,
const __private
float *p);
12127 double2
__ovld vload2(
size_t offset,
const __global
double *p);
12128 double3
__ovld vload3(
size_t offset,
const __global
double *p);
12129 double4
__ovld vload4(
size_t offset,
const __global
double *p);
12130 double8
__ovld vload8(
size_t offset,
const __global
double *p);
12131 double16
__ovld vload16(
size_t offset,
const __global
double *p);
12132 double2
__ovld vload2(
size_t offset,
const __local
double *p);
12133 double3
__ovld vload3(
size_t offset,
const __local
double *p);
12134 double4
__ovld vload4(
size_t offset,
const __local
double *p);
12135 double8
__ovld vload8(
size_t offset,
const __local
double *p);
12136 double16
__ovld vload16(
size_t offset,
const __local
double *p);
12137 double2
__ovld vload2(
size_t offset,
const __private
double *p);
12138 double3
__ovld vload3(
size_t offset,
const __private
double *p);
12139 double4
__ovld vload4(
size_t offset,
const __private
double *p);
12140 double8
__ovld vload8(
size_t offset,
const __private
double *p);
12141 double16
__ovld vload16(
size_t offset,
const __private
double *p);
12142 #endif //cl_khr_fp64
12145 half
__ovld vload(
size_t offset,
const __global half *p);
12146 half2
__ovld vload2(
size_t offset,
const __global half *p);
12147 half3
__ovld vload3(
size_t offset,
const __global half *p);
12148 half4
__ovld vload4(
size_t offset,
const __global half *p);
12149 half8
__ovld vload8(
size_t offset,
const __global half *p);
12150 half16
__ovld vload16(
size_t offset,
const __global half *p);
12151 half
__ovld vload(
size_t offset,
const __local half *p);
12152 half2
__ovld vload2(
size_t offset,
const __local half *p);
12153 half3
__ovld vload3(
size_t offset,
const __local half *p);
12154 half4
__ovld vload4(
size_t offset,
const __local half *p);
12155 half8
__ovld vload8(
size_t offset,
const __local half *p);
12156 half16
__ovld vload16(
size_t offset,
const __local half *p);
12157 half
__ovld vload(
size_t offset,
const __private half *p);
12158 half2
__ovld vload2(
size_t offset,
const __private half *p);
12159 half3
__ovld vload3(
size_t offset,
const __private half *p);
12160 half4
__ovld vload4(
size_t offset,
const __private half *p);
12161 half8
__ovld vload8(
size_t offset,
const __private half *p);
12162 half16
__ovld vload16(
size_t offset,
const __private half *p);
12163 #endif //cl_khr_fp16
12164 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
12166 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
12213 void __ovld vstore2(double2 data,
size_t offset,
double *p);
12214 void __ovld vstore3(double3 data,
size_t offset,
double *p);
12215 void __ovld vstore4(double4 data,
size_t offset,
double *p);
12216 void __ovld vstore8(double8 data,
size_t offset,
double *p);
12218 #endif //cl_khr_fp64
12220 void __ovld vstore(half data,
size_t offset, half *p);
12226 #endif //cl_khr_fp16
12228 void __ovld vstore2(char2 data,
size_t offset, __global
char *p);
12230 void __ovld vstore2(short2 data,
size_t offset, __global
short *p);
12232 void __ovld vstore2(int2 data,
size_t offset, __global
int *p);
12234 void __ovld vstore2(long2 data,
size_t offset, __global
long *p);
12236 void __ovld vstore2(float2 data,
size_t offset, __global
float *p);
12237 void __ovld vstore3(char3 data,
size_t offset, __global
char *p);
12239 void __ovld vstore3(short3 data,
size_t offset, __global
short *p);
12241 void __ovld vstore3(int3 data,
size_t offset, __global
int *p);
12243 void __ovld vstore3(long3 data,
size_t offset, __global
long *p);
12245 void __ovld vstore3(float3 data,
size_t offset, __global
float *p);
12246 void __ovld vstore4(char4 data,
size_t offset, __global
char *p);
12248 void __ovld vstore4(short4 data,
size_t offset, __global
short *p);
12250 void __ovld vstore4(int4 data,
size_t offset, __global
int *p);
12252 void __ovld vstore4(long4 data,
size_t offset, __global
long *p);
12254 void __ovld vstore4(float4 data,
size_t offset, __global
float *p);
12255 void __ovld vstore8(char8 data,
size_t offset, __global
char *p);
12257 void __ovld vstore8(short8 data,
size_t offset, __global
short *p);
12259 void __ovld vstore8(int8 data,
size_t offset, __global
int *p);
12261 void __ovld vstore8(long8 data,
size_t offset, __global
long *p);
12263 void __ovld vstore8(float8 data,
size_t offset, __global
float *p);
12264 void __ovld vstore16(char16 data,
size_t offset, __global
char *p);
12266 void __ovld vstore16(short16 data,
size_t offset, __global
short *p);
12268 void __ovld vstore16(int16 data,
size_t offset, __global
int *p);
12270 void __ovld vstore16(long16 data,
size_t offset, __global
long *p);
12272 void __ovld vstore16(float16 data,
size_t offset, __global
float *p);
12273 void __ovld vstore2(char2 data,
size_t offset, __local
char *p);
12275 void __ovld vstore2(short2 data,
size_t offset, __local
short *p);
12277 void __ovld vstore2(int2 data,
size_t offset, __local
int *p);
12279 void __ovld vstore2(long2 data,
size_t offset, __local
long *p);
12281 void __ovld vstore2(float2 data,
size_t offset, __local
float *p);
12282 void __ovld vstore3(char3 data,
size_t offset, __local
char *p);
12284 void __ovld vstore3(short3 data,
size_t offset, __local
short *p);
12286 void __ovld vstore3(int3 data,
size_t offset, __local
int *p);
12288 void __ovld vstore3(long3 data,
size_t offset, __local
long *p);
12290 void __ovld vstore3(float3 data,
size_t offset, __local
float *p);
12291 void __ovld vstore4(char4 data,
size_t offset, __local
char *p);
12293 void __ovld vstore4(short4 data,
size_t offset, __local
short *p);
12295 void __ovld vstore4(int4 data,
size_t offset, __local
int *p);
12297 void __ovld vstore4(long4 data,
size_t offset, __local
long *p);
12299 void __ovld vstore4(float4 data,
size_t offset, __local
float *p);
12300 void __ovld vstore8(char8 data,
size_t offset, __local
char *p);
12302 void __ovld vstore8(short8 data,
size_t offset, __local
short *p);
12304 void __ovld vstore8(int8 data,
size_t offset, __local
int *p);
12306 void __ovld vstore8(long8 data,
size_t offset, __local
long *p);
12308 void __ovld vstore8(float8 data,
size_t offset, __local
float *p);
12309 void __ovld vstore16(char16 data,
size_t offset, __local
char *p);
12311 void __ovld vstore16(short16 data,
size_t offset, __local
short *p);
12313 void __ovld vstore16(int16 data,
size_t offset, __local
int *p);
12315 void __ovld vstore16(long16 data,
size_t offset, __local
long *p);
12317 void __ovld vstore16(float16 data,
size_t offset, __local
float *p);
12318 void __ovld vstore2(char2 data,
size_t offset, __private
char *p);
12320 void __ovld vstore2(short2 data,
size_t offset, __private
short *p);
12322 void __ovld vstore2(int2 data,
size_t offset, __private
int *p);
12324 void __ovld vstore2(long2 data,
size_t offset, __private
long *p);
12326 void __ovld vstore2(float2 data,
size_t offset, __private
float *p);
12327 void __ovld vstore3(char3 data,
size_t offset, __private
char *p);
12329 void __ovld vstore3(short3 data,
size_t offset, __private
short *p);
12331 void __ovld vstore3(int3 data,
size_t offset, __private
int *p);
12333 void __ovld vstore3(long3 data,
size_t offset, __private
long *p);
12335 void __ovld vstore3(float3 data,
size_t offset, __private
float *p);
12336 void __ovld vstore4(char4 data,
size_t offset, __private
char *p);
12338 void __ovld vstore4(short4 data,
size_t offset, __private
short *p);
12340 void __ovld vstore4(int4 data,
size_t offset, __private
int *p);
12342 void __ovld vstore4(long4 data,
size_t offset, __private
long *p);
12344 void __ovld vstore4(float4 data,
size_t offset, __private
float *p);
12345 void __ovld vstore8(char8 data,
size_t offset, __private
char *p);
12347 void __ovld vstore8(short8 data,
size_t offset, __private
short *p);
12349 void __ovld vstore8(int8 data,
size_t offset, __private
int *p);
12351 void __ovld vstore8(long8 data,
size_t offset, __private
long *p);
12353 void __ovld vstore8(float8 data,
size_t offset, __private
float *p);
12354 void __ovld vstore16(char16 data,
size_t offset, __private
char *p);
12356 void __ovld vstore16(short16 data,
size_t offset, __private
short *p);
12358 void __ovld vstore16(int16 data,
size_t offset, __private
int *p);
12360 void __ovld vstore16(long16 data,
size_t offset, __private
long *p);
12362 void __ovld vstore16(float16 data,
size_t offset, __private
float *p);
12364 void __ovld vstore2(double2 data,
size_t offset, __global
double *p);
12365 void __ovld vstore3(double3 data,
size_t offset, __global
double *p);
12366 void __ovld vstore4(double4 data,
size_t offset, __global
double *p);
12367 void __ovld vstore8(double8 data,
size_t offset, __global
double *p);
12368 void __ovld vstore16(double16 data,
size_t offset, __global
double *p);
12369 void __ovld vstore2(double2 data,
size_t offset, __local
double *p);
12370 void __ovld vstore3(double3 data,
size_t offset, __local
double *p);
12371 void __ovld vstore4(double4 data,
size_t offset, __local
double *p);
12372 void __ovld vstore8(double8 data,
size_t offset, __local
double *p);
12373 void __ovld vstore16(double16 data,
size_t offset, __local
double *p);
12374 void __ovld vstore2(double2 data,
size_t offset, __private
double *p);
12375 void __ovld vstore3(double3 data,
size_t offset, __private
double *p);
12376 void __ovld vstore4(double4 data,
size_t offset, __private
double *p);
12377 void __ovld vstore8(double8 data,
size_t offset, __private
double *p);
12378 void __ovld vstore16(double16 data,
size_t offset, __private
double *p);
12379 #endif //cl_khr_fp64
12381 void __ovld vstore(half data,
size_t offset, __global half *p);
12382 void __ovld vstore2(half2 data,
size_t offset, __global half *p);
12383 void __ovld vstore3(half3 data,
size_t offset, __global half *p);
12384 void __ovld vstore4(half4 data,
size_t offset, __global half *p);
12385 void __ovld vstore8(half8 data,
size_t offset, __global half *p);
12386 void __ovld vstore16(half16 data,
size_t offset, __global half *p);
12387 void __ovld vstore(half data,
size_t offset, __local half *p);
12388 void __ovld vstore2(half2 data,
size_t offset, __local half *p);
12389 void __ovld vstore3(half3 data,
size_t offset, __local half *p);
12390 void __ovld vstore4(half4 data,
size_t offset, __local half *p);
12391 void __ovld vstore8(half8 data,
size_t offset, __local half *p);
12392 void __ovld vstore16(half16 data,
size_t offset, __local half *p);
12393 void __ovld vstore(half data,
size_t offset, __private half *p);
12394 void __ovld vstore2(half2 data,
size_t offset, __private half *p);
12395 void __ovld vstore3(half3 data,
size_t offset, __private half *p);
12396 void __ovld vstore4(half4 data,
size_t offset, __private half *p);
12397 void __ovld vstore8(half8 data,
size_t offset, __private half *p);
12398 void __ovld vstore16(half16 data,
size_t offset, __private half *p);
12399 #endif //cl_khr_fp16
12400 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
12411 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
12417 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
12432 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
12454 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
12467 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
12479 #endif //cl_khr_fp64
12512 #endif //cl_khr_fp64
12513 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
12526 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
12578 #endif //cl_khr_fp64
12731 #endif //cl_khr_fp64
12732 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
12753 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
12779 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
12797 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
12868 #endif //cl_khr_fp64
13081 #endif //cl_khr_fp64
13082 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
13093 #define CLK_LOCAL_MEM_FENCE 0x01
13099 #define CLK_GLOBAL_MEM_FENCE 0x02
13101 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
13107 #define CLK_IMAGE_MEM_FENCE 0x04
13108 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
13142 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
13155 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
13200 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
13211 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
13363 #endif //cl_khr_fp64
13377 #endif //cl_khr_fp16
13526 #endif //cl_khr_fp64
13540 #endif //cl_khr_fp16
13562 void __ovld prefetch(
const __global
char *p,
size_t num_elements);
13564 void __ovld prefetch(
const __global
short *p,
size_t num_elements);
13566 void __ovld prefetch(
const __global
int *p,
size_t num_elements);
13568 void __ovld prefetch(
const __global
long *p,
size_t num_elements);
13570 void __ovld prefetch(
const __global
float *p,
size_t num_elements);
13571 void __ovld prefetch(
const __global char2 *p,
size_t num_elements);
13572 void __ovld prefetch(
const __global uchar2 *p,
size_t num_elements);
13573 void __ovld prefetch(
const __global short2 *p,
size_t num_elements);
13574 void __ovld prefetch(
const __global ushort2 *p,
size_t num_elements);
13575 void __ovld prefetch(
const __global int2 *p,
size_t num_elements);
13576 void __ovld prefetch(
const __global uint2 *p,
size_t num_elements);
13577 void __ovld prefetch(
const __global long2 *p,
size_t num_elements);
13578 void __ovld prefetch(
const __global ulong2 *p,
size_t num_elements);
13579 void __ovld prefetch(
const __global float2 *p,
size_t num_elements);
13580 void __ovld prefetch(
const __global char3 *p,
size_t num_elements);
13581 void __ovld prefetch(
const __global uchar3 *p,
size_t num_elements);
13582 void __ovld prefetch(
const __global short3 *p,
size_t num_elements);
13583 void __ovld prefetch(
const __global ushort3 *p,
size_t num_elements);
13584 void __ovld prefetch(
const __global int3 *p,
size_t num_elements);
13585 void __ovld prefetch(
const __global uint3 *p,
size_t num_elements);
13586 void __ovld prefetch(
const __global long3 *p,
size_t num_elements);
13587 void __ovld prefetch(
const __global ulong3 *p,
size_t num_elements);
13588 void __ovld prefetch(
const __global float3 *p,
size_t num_elements);
13589 void __ovld prefetch(
const __global char4 *p,
size_t num_elements);
13590 void __ovld prefetch(
const __global uchar4 *p,
size_t num_elements);
13591 void __ovld prefetch(
const __global short4 *p,
size_t num_elements);
13592 void __ovld prefetch(
const __global ushort4 *p,
size_t num_elements);
13593 void __ovld prefetch(
const __global int4 *p,
size_t num_elements);
13594 void __ovld prefetch(
const __global uint4 *p,
size_t num_elements);
13595 void __ovld prefetch(
const __global long4 *p,
size_t num_elements);
13596 void __ovld prefetch(
const __global ulong4 *p,
size_t num_elements);
13597 void __ovld prefetch(
const __global float4 *p,
size_t num_elements);
13598 void __ovld prefetch(
const __global char8 *p,
size_t num_elements);
13599 void __ovld prefetch(
const __global uchar8 *p,
size_t num_elements);
13600 void __ovld prefetch(
const __global short8 *p,
size_t num_elements);
13601 void __ovld prefetch(
const __global ushort8 *p,
size_t num_elements);
13602 void __ovld prefetch(
const __global int8 *p,
size_t num_elements);
13603 void __ovld prefetch(
const __global uint8 *p,
size_t num_elements);
13604 void __ovld prefetch(
const __global long8 *p,
size_t num_elements);
13605 void __ovld prefetch(
const __global ulong8 *p,
size_t num_elements);
13606 void __ovld prefetch(
const __global float8 *p,
size_t num_elements);
13607 void __ovld prefetch(
const __global char16 *p,
size_t num_elements);
13608 void __ovld prefetch(
const __global uchar16 *p,
size_t num_elements);
13609 void __ovld prefetch(
const __global short16 *p,
size_t num_elements);
13610 void __ovld prefetch(
const __global ushort16 *p,
size_t num_elements);
13611 void __ovld prefetch(
const __global int16 *p,
size_t num_elements);
13612 void __ovld prefetch(
const __global uint16 *p,
size_t num_elements);
13613 void __ovld prefetch(
const __global long16 *p,
size_t num_elements);
13614 void __ovld prefetch(
const __global ulong16 *p,
size_t num_elements);
13615 void __ovld prefetch(
const __global float16 *p,
size_t num_elements);
13617 void __ovld prefetch(
const __global
double *p,
size_t num_elements);
13618 void __ovld prefetch(
const __global double2 *p,
size_t num_elements);
13619 void __ovld prefetch(
const __global double3 *p,
size_t num_elements);
13620 void __ovld prefetch(
const __global double4 *p,
size_t num_elements);
13621 void __ovld prefetch(
const __global double8 *p,
size_t num_elements);
13622 void __ovld prefetch(
const __global double16 *p,
size_t num_elements);
13623 #endif //cl_khr_fp64
13625 void __ovld prefetch(
const __global half *p,
size_t num_elements);
13626 void __ovld prefetch(
const __global half2 *p,
size_t num_elements);
13627 void __ovld prefetch(
const __global half3 *p,
size_t num_elements);
13628 void __ovld prefetch(
const __global half4 *p,
size_t num_elements);
13629 void __ovld prefetch(
const __global half8 *p,
size_t num_elements);
13630 void __ovld prefetch(
const __global half16 *p,
size_t num_elements);
13631 #endif // cl_khr_fp16
13635 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
13636 #pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
13637 #pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
13646 unsigned int __ovld atomic_add(
volatile __global
unsigned int *p,
unsigned int val);
13648 unsigned int __ovld atomic_add(
volatile __local
unsigned int *p,
unsigned int val);
13650 #if defined(cl_khr_global_int32_base_atomics)
13651 int __ovld atom_add(
volatile __global
int *p,
int val);
13652 unsigned int __ovld atom_add(
volatile __global
unsigned int *p,
unsigned int val);
13654 #if defined(cl_khr_local_int32_base_atomics)
13655 int __ovld atom_add(
volatile __local
int *p,
int val);
13656 unsigned int __ovld atom_add(
volatile __local
unsigned int *p,
unsigned int val);
13659 #if defined(cl_khr_int64_base_atomics)
13660 long __ovld atom_add(
volatile __global
long *p,
long val);
13661 unsigned long __ovld atom_add(
volatile __global
unsigned long *p,
unsigned long val);
13662 long __ovld atom_add(
volatile __local
long *p,
long val);
13663 unsigned long __ovld atom_add(
volatile __local
unsigned long *p,
unsigned long val);
13672 unsigned int __ovld atomic_sub(
volatile __global
unsigned int *p,
unsigned int val);
13674 unsigned int __ovld atomic_sub(
volatile __local
unsigned int *p,
unsigned int val);
13676 #if defined(cl_khr_global_int32_base_atomics)
13677 int __ovld atom_sub(
volatile __global
int *p,
int val);
13678 unsigned int __ovld atom_sub(
volatile __global
unsigned int *p,
unsigned int val);
13680 #if defined(cl_khr_local_int32_base_atomics)
13681 int __ovld atom_sub(
volatile __local
int *p,
int val);
13682 unsigned int __ovld atom_sub(
volatile __local
unsigned int *p,
unsigned int val);
13685 #if defined(cl_khr_int64_base_atomics)
13686 long __ovld atom_sub(
volatile __global
long *p,
long val);
13687 unsigned long __ovld atom_sub(
volatile __global
unsigned long *p,
unsigned long val);
13688 long __ovld atom_sub(
volatile __local
long *p,
long val);
13689 unsigned long __ovld atom_sub(
volatile __local
unsigned long *p,
unsigned long val);
13698 unsigned int __ovld atomic_xchg(
volatile __global
unsigned int *p,
unsigned int val);
13700 unsigned int __ovld atomic_xchg(
volatile __local
unsigned int *p,
unsigned int val);
13704 #if defined(cl_khr_global_int32_base_atomics)
13705 int __ovld atom_xchg(
volatile __global
int *p,
int val);
13706 unsigned int __ovld atom_xchg(
volatile __global
unsigned int *p,
unsigned int val);
13708 #if defined(cl_khr_local_int32_base_atomics)
13709 int __ovld atom_xchg(
volatile __local
int *p,
int val);
13710 unsigned int __ovld atom_xchg(
volatile __local
unsigned int *p,
unsigned int val);
13713 #if defined(cl_khr_int64_base_atomics)
13714 long __ovld atom_xchg(
volatile __global
long *p,
long val);
13715 long __ovld atom_xchg(
volatile __local
long *p,
long val);
13716 unsigned long __ovld atom_xchg(
volatile __global
unsigned long *p,
unsigned long val);
13717 unsigned long __ovld atom_xchg(
volatile __local
unsigned long *p,
unsigned long val);
13731 #if defined(cl_khr_global_int32_base_atomics)
13732 int __ovld atom_inc(
volatile __global
int *p);
13733 unsigned int __ovld atom_inc(
volatile __global
unsigned int *p);
13735 #if defined(cl_khr_local_int32_base_atomics)
13736 int __ovld atom_inc(
volatile __local
int *p);
13737 unsigned int __ovld atom_inc(
volatile __local
unsigned int *p);
13740 #if defined(cl_khr_int64_base_atomics)
13741 long __ovld atom_inc(
volatile __global
long *p);
13742 unsigned long __ovld atom_inc(
volatile __global
unsigned long *p);
13743 long __ovld atom_inc(
volatile __local
long *p);
13744 unsigned long __ovld atom_inc(
volatile __local
unsigned long *p);
13758 #if defined(cl_khr_global_int32_base_atomics)
13759 int __ovld atom_dec(
volatile __global
int *p);
13760 unsigned int __ovld atom_dec(
volatile __global
unsigned int *p);
13762 #if defined(cl_khr_local_int32_base_atomics)
13763 int __ovld atom_dec(
volatile __local
int *p);
13764 unsigned int __ovld atom_dec(
volatile __local
unsigned int *p);
13767 #if defined(cl_khr_int64_base_atomics)
13768 long __ovld atom_dec(
volatile __global
long *p);
13769 unsigned long __ovld atom_dec(
volatile __global
unsigned long *p);
13770 long __ovld atom_dec(
volatile __local
long *p);
13771 unsigned long __ovld atom_dec(
volatile __local
unsigned long *p);
13782 unsigned int __ovld atomic_cmpxchg(
volatile __global
unsigned int *p,
unsigned int cmp,
unsigned int val);
13784 unsigned int __ovld atomic_cmpxchg(
volatile __local
unsigned int *p,
unsigned int cmp,
unsigned int val);
13786 #if defined(cl_khr_global_int32_base_atomics)
13787 int __ovld atom_cmpxchg(
volatile __global
int *p,
int cmp,
int val);
13788 unsigned int __ovld atom_cmpxchg(
volatile __global
unsigned int *p,
unsigned int cmp,
unsigned int val);
13790 #if defined(cl_khr_local_int32_base_atomics)
13791 int __ovld atom_cmpxchg(
volatile __local
int *p,
int cmp,
int val);
13792 unsigned int __ovld atom_cmpxchg(
volatile __local
unsigned int *p,
unsigned int cmp,
unsigned int val);
13795 #if defined(cl_khr_int64_base_atomics)
13796 long __ovld atom_cmpxchg(
volatile __global
long *p,
long cmp,
long val);
13797 unsigned long __ovld atom_cmpxchg(
volatile __global
unsigned long *p,
unsigned long cmp,
unsigned long val);
13798 long __ovld atom_cmpxchg(
volatile __local
long *p,
long cmp,
long val);
13799 unsigned long __ovld atom_cmpxchg(
volatile __local
unsigned long *p,
unsigned long cmp,
unsigned long val);
13810 unsigned int __ovld atomic_min(
volatile __global
unsigned int *p,
unsigned int val);
13812 unsigned int __ovld atomic_min(
volatile __local
unsigned int *p,
unsigned int val);
13814 #if defined(cl_khr_global_int32_extended_atomics)
13815 int __ovld atom_min(
volatile __global
int *p,
int val);
13816 unsigned int __ovld atom_min(
volatile __global
unsigned int *p,
unsigned int val);
13818 #if defined(cl_khr_local_int32_extended_atomics)
13819 int __ovld atom_min(
volatile __local
int *p,
int val);
13820 unsigned int __ovld atom_min(
volatile __local
unsigned int *p,
unsigned int val);
13823 #if defined(cl_khr_int64_extended_atomics)
13824 long __ovld atom_min(
volatile __global
long *p,
long val);
13825 unsigned long __ovld atom_min(
volatile __global
unsigned long *p,
unsigned long val);
13826 long __ovld atom_min(
volatile __local
long *p,
long val);
13827 unsigned long __ovld atom_min(
volatile __local
unsigned long *p,
unsigned long val);
13838 unsigned int __ovld atomic_max(
volatile __global
unsigned int *p,
unsigned int val);
13840 unsigned int __ovld atomic_max(
volatile __local
unsigned int *p,
unsigned int val);
13842 #if defined(cl_khr_global_int32_extended_atomics)
13843 int __ovld atom_max(
volatile __global
int *p,
int val);
13844 unsigned int __ovld atom_max(
volatile __global
unsigned int *p,
unsigned int val);
13846 #if defined(cl_khr_local_int32_extended_atomics)
13847 int __ovld atom_max(
volatile __local
int *p,
int val);
13848 unsigned int __ovld atom_max(
volatile __local
unsigned int *p,
unsigned int val);
13851 #if defined(cl_khr_int64_extended_atomics)
13852 long __ovld atom_max(
volatile __global
long *p,
long val);
13853 unsigned long __ovld atom_max(
volatile __global
unsigned long *p,
unsigned long val);
13854 long __ovld atom_max(
volatile __local
long *p,
long val);
13855 unsigned long __ovld atom_max(
volatile __local
unsigned long *p,
unsigned long val);
13865 unsigned int __ovld atomic_and(
volatile __global
unsigned int *p,
unsigned int val);
13867 unsigned int __ovld atomic_and(
volatile __local
unsigned int *p,
unsigned int val);
13869 #if defined(cl_khr_global_int32_extended_atomics)
13870 int __ovld atom_and(
volatile __global
int *p,
int val);
13871 unsigned int __ovld atom_and(
volatile __global
unsigned int *p,
unsigned int val);
13873 #if defined(cl_khr_local_int32_extended_atomics)
13874 int __ovld atom_and(
volatile __local
int *p,
int val);
13875 unsigned int __ovld atom_and(
volatile __local
unsigned int *p,
unsigned int val);
13878 #if defined(cl_khr_int64_extended_atomics)
13879 long __ovld atom_and(
volatile __global
long *p,
long val);
13880 unsigned long __ovld atom_and(
volatile __global
unsigned long *p,
unsigned long val);
13881 long __ovld atom_and(
volatile __local
long *p,
long val);
13882 unsigned long __ovld atom_and(
volatile __local
unsigned long *p,
unsigned long val);
13892 unsigned int __ovld atomic_or(
volatile __global
unsigned int *p,
unsigned int val);
13894 unsigned int __ovld atomic_or(
volatile __local
unsigned int *p,
unsigned int val);
13896 #if defined(cl_khr_global_int32_extended_atomics)
13897 int __ovld atom_or(
volatile __global
int *p,
int val);
13898 unsigned int __ovld atom_or(
volatile __global
unsigned int *p,
unsigned int val);
13900 #if defined(cl_khr_local_int32_extended_atomics)
13901 int __ovld atom_or(
volatile __local
int *p,
int val);
13902 unsigned int __ovld atom_or(
volatile __local
unsigned int *p,
unsigned int val);
13905 #if defined(cl_khr_int64_extended_atomics)
13906 long __ovld atom_or(
volatile __global
long *p,
long val);
13907 unsigned long __ovld atom_or(
volatile __global
unsigned long *p,
unsigned long val);
13908 long __ovld atom_or(
volatile __local
long *p,
long val);
13909 unsigned long __ovld atom_or(
volatile __local
unsigned long *p,
unsigned long val);
13919 unsigned int __ovld atomic_xor(
volatile __global
unsigned int *p,
unsigned int val);
13921 unsigned int __ovld atomic_xor(
volatile __local
unsigned int *p,
unsigned int val);
13923 #if defined(cl_khr_global_int32_extended_atomics)
13924 int __ovld atom_xor(
volatile __global
int *p,
int val);
13925 unsigned int __ovld atom_xor(
volatile __global
unsigned int *p,
unsigned int val);
13927 #if defined(cl_khr_local_int32_extended_atomics)
13928 int __ovld atom_xor(
volatile __local
int *p,
int val);
13929 unsigned int __ovld atom_xor(
volatile __local
unsigned int *p,
unsigned int val);
13932 #if defined(cl_khr_int64_extended_atomics)
13933 long __ovld atom_xor(
volatile __global
long *p,
long val);
13934 unsigned long __ovld atom_xor(
volatile __global
unsigned long *p,
unsigned long val);
13935 long __ovld atom_xor(
volatile __local
long *p,
long val);
13936 unsigned long __ovld atom_xor(
volatile __local
unsigned long *p,
unsigned long val);
13939 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
13940 #pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable
13941 #pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable
13946 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
13947 #ifndef ATOMIC_VAR_INIT
13948 #define ATOMIC_VAR_INIT(x) (x)
13949 #endif //ATOMIC_VAR_INIT
13950 #define ATOMIC_FLAG_INIT 0
13963 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
13964 #pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
13965 #pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
13972 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
13977 #endif //cl_khr_fp64
14034 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
14083 #endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
14089 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
14141 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
14146 #endif //cl_khr_fp64
14166 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
14171 #endif //cl_khr_fp64
14191 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
14196 #endif //cl_khr_fp64
14237 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
14249 #endif //cl_khr_fp64
14281 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
14527 #endif //cl_khr_fp64
14549 #endif //cl_khr_fp16
14751 #endif //cl_khr_fp64
14773 #endif //cl_khr_fp16
14775 #if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
14778 int printf(__constant
const char* st, ...);
14787 #define CLK_ADDRESS_NONE 0
14788 #define CLK_ADDRESS_CLAMP_TO_EDGE 2
14789 #define CLK_ADDRESS_CLAMP 4
14790 #define CLK_ADDRESS_REPEAT 6
14791 #define CLK_ADDRESS_MIRRORED_REPEAT 8
14796 #define CLK_NORMALIZED_COORDS_FALSE 0
14797 #define CLK_NORMALIZED_COORDS_TRUE 1
14802 #define CLK_FILTER_NEAREST 0x10
14803 #define CLK_FILTER_LINEAR 0x20
14805 #ifdef cl_khr_gl_msaa_sharing
14806 #pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
14807 #endif //cl_khr_gl_msaa_sharing
14942 #ifdef cl_khr_depth_images
14948 #endif //cl_khr_depth_images
14950 #if defined(cl_khr_gl_msaa_sharing)
14962 #endif //cl_khr_gl_msaa_sharing
14965 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
14966 #ifdef cl_khr_mipmap_image
14992 float4
__purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14993 int4
__purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14994 uint4
__purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14996 float4
__purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14997 int4
__purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14998 uint4
__purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
15000 float4
__purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
15001 int4
__purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
15002 uint4
__purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
15004 float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
15006 float4
__purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
15007 int4
__purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
15008 uint4
__purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
15010 float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
15012 float4
__purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
15013 int4
__purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
15014 uint4
__purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
15040 #endif //cl_khr_mipmap_image
15041 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15067 #ifdef cl_khr_depth_images
15070 #endif //cl_khr_depth_images
15078 half4
__purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler,
int coord);
15079 half4
__purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler,
float coord);
15080 half4
__purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
15081 half4
__purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
15082 half4
__purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
15083 half4
__purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
15084 half4
__purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
15085 half4
__purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
15086 half4
__purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
15087 half4
__purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
15088 half4
__purefn __ovld read_imageh(read_only image1d_t image,
int coord);
15089 half4
__purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
15090 half4
__purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
15091 half4
__purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
15092 half4
__purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
15093 half4
__purefn __ovld read_imageh(read_only image1d_buffer_t image,
int coord);
15094 #endif //cl_khr_fp16
15097 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15122 #ifdef cl_khr_depth_images
15125 #endif //cl_khr_depth_images
15127 #if cl_khr_gl_msaa_sharing
15138 #endif //cl_khr_gl_msaa_sharing
15140 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15141 #ifdef cl_khr_mipmap_image
15146 float4
__purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float lod);
15156 float4
__purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord,
float lod);
15166 float4
__purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
15167 int4
__purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
15168 uint4
__purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
15170 float4
__purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
15171 int4
__purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
15172 uint4
__purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
15174 float4
__purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
15175 int4
__purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
15176 uint4
__purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
15178 float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
15180 float4
__purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
15181 int4
__purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
15182 uint4
__purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
15184 float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
15186 float4
__purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
15187 int4
__purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
15188 uint4
__purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
15194 float4
__purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float lod);
15204 float4
__purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord,
float lod);
15213 #endif //cl_khr_mipmap_image
15214 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15218 half4
__purefn __ovld read_imageh(read_write image1d_t image,
int coord);
15219 half4
__purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
15220 half4
__purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
15221 half4
__purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
15222 half4
__purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
15223 half4
__purefn __ovld read_imageh(read_write image1d_buffer_t image,
int coord);
15224 #endif //cl_khr_fp16
15225 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15298 void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
15299 void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
15300 void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
15306 void __ovld write_imagef(write_only image1d_buffer_t image,
int coord, float4 color);
15310 void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
15311 void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
15312 void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
15314 #ifdef cl_khr_3d_image_writes
15320 #ifdef cl_khr_depth_images
15321 void __ovld write_imagef(write_only image2d_depth_t image, int2 coord,
float color);
15322 void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord,
float color);
15323 #endif //cl_khr_depth_images
15326 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15327 #ifdef cl_khr_mipmap_image
15328 void __ovld write_imagef(write_only image1d_t image,
int coord,
int lod, float4 color);
15329 void __ovld write_imagei(write_only image1d_t image,
int coord,
int lod, int4 color);
15332 void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord,
int lod, float4 color);
15333 void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord,
int lod, int4 color);
15334 void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord,
int lod, uint4 color);
15336 void __ovld write_imagef(write_only image2d_t image, int2 coord,
int lod, float4 color);
15337 void __ovld write_imagei(write_only image2d_t image, int2 coord,
int lod, int4 color);
15340 void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord,
int lod, float4 color);
15341 void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord,
int lod, int4 color);
15342 void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord,
int lod, uint4 color);
15344 void __ovld write_imagef(write_only image2d_depth_t image, int2 coord,
int lod,
float color);
15345 void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord,
int lod,
float color);
15347 #ifdef cl_khr_3d_image_writes
15348 void __ovld write_imagef(write_only image3d_t image, int4 coord,
int lod, float4 color);
15349 void __ovld write_imagei(write_only image3d_t image, int4 coord,
int lod, int4 color);
15352 #endif //cl_khr_mipmap_image
15353 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15357 void __ovld write_imageh(write_only image1d_t image,
int coord, half4 color);
15358 void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
15359 #ifdef cl_khr_3d_image_writes
15360 void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
15362 void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
15363 void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
15364 void __ovld write_imageh(write_only image1d_buffer_t image,
int coord, half4 color);
15365 #endif //cl_khr_fp16
15368 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15373 void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
15374 void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
15375 void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
15381 void __ovld write_imagef(read_write image1d_buffer_t image,
int coord, float4 color);
15385 void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
15386 void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
15387 void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
15389 #ifdef cl_khr_3d_image_writes
15395 #ifdef cl_khr_depth_images
15396 void __ovld write_imagef(read_write image2d_depth_t image, int2 coord,
float color);
15397 void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord,
float color);
15398 #endif //cl_khr_depth_images
15400 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15401 #ifdef cl_khr_mipmap_image
15402 void __ovld write_imagef(read_write image1d_t image,
int coord,
int lod, float4 color);
15403 void __ovld write_imagei(read_write image1d_t image,
int coord,
int lod, int4 color);
15406 void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord,
int lod, float4 color);
15407 void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord,
int lod, int4 color);
15408 void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord,
int lod, uint4 color);
15410 void __ovld write_imagef(read_write image2d_t image, int2 coord,
int lod, float4 color);
15411 void __ovld write_imagei(read_write image2d_t image, int2 coord,
int lod, int4 color);
15414 void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord,
int lod, float4 color);
15415 void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord,
int lod, int4 color);
15416 void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord,
int lod, uint4 color);
15418 void __ovld write_imagef(read_write image2d_depth_t image, int2 coord,
int lod,
float color);
15419 void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord,
int lod,
float color);
15421 #ifdef cl_khr_3d_image_writes
15422 void __ovld write_imagef(read_write image3d_t image, int4 coord,
int lod, float4 color);
15423 void __ovld write_imagei(read_write image3d_t image, int4 coord,
int lod, int4 color);
15426 #endif //cl_khr_mipmap_image
15427 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15431 void __ovld write_imageh(read_write image1d_t image,
int coord, half4 color);
15432 void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
15433 #ifdef cl_khr_3d_image_writes
15434 void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
15436 void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
15437 void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
15438 void __ovld write_imageh(read_write image1d_buffer_t image,
int coord, half4 color);
15439 #endif //cl_khr_fp16
15440 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15453 #ifdef cl_khr_3d_image_writes
15458 #ifdef cl_khr_depth_images
15461 #endif //cl_khr_depth_images
15462 #if defined(cl_khr_gl_msaa_sharing)
15467 #endif //cl_khr_gl_msaa_sharing
15472 #ifdef cl_khr_3d_image_writes
15477 #ifdef cl_khr_depth_images
15480 #endif //cl_khr_depth_images
15481 #if defined(cl_khr_gl_msaa_sharing)
15486 #endif //cl_khr_gl_msaa_sharing
15488 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15495 #ifdef cl_khr_depth_images
15498 #endif //cl_khr_depth_images
15499 #if defined(cl_khr_gl_msaa_sharing)
15504 #endif //cl_khr_gl_msaa_sharing
15505 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15513 #ifdef cl_khr_depth_images
15516 #endif //cl_khr_depth_images
15517 #if defined(cl_khr_gl_msaa_sharing)
15522 #endif //cl_khr_gl_msaa_sharing
15525 #ifdef cl_khr_3d_image_writes
15529 #ifdef cl_khr_depth_images
15532 #endif //cl_khr_depth_images
15533 #if defined(cl_khr_gl_msaa_sharing)
15538 #endif //cl_khr_gl_msaa_sharing
15540 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15544 #ifdef cl_khr_depth_images
15547 #endif //cl_khr_depth_images
15548 #if defined(cl_khr_gl_msaa_sharing)
15553 #endif //cl_khr_gl_msaa_sharing
15554 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15561 #ifdef cl_khr_3d_image_writes
15565 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15567 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15570 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15571 #ifdef cl_khr_mipmap_image
15576 int __ovld get_image_num_mip_levels(read_only image1d_t image);
15577 int __ovld get_image_num_mip_levels(read_only image2d_t image);
15578 int __ovld get_image_num_mip_levels(read_only image3d_t image);
15580 int __ovld get_image_num_mip_levels(write_only image1d_t image);
15581 int __ovld get_image_num_mip_levels(write_only image2d_t image);
15582 #ifdef cl_khr_3d_image_writes
15583 int __ovld get_image_num_mip_levels(write_only image3d_t image);
15586 int __ovld get_image_num_mip_levels(read_write image1d_t image);
15587 int __ovld get_image_num_mip_levels(read_write image2d_t image);
15588 int __ovld get_image_num_mip_levels(read_write image3d_t image);
15590 int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
15591 int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
15592 int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
15593 int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
15595 int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
15596 int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
15597 int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
15598 int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
15600 int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
15601 int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
15602 int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
15603 int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
15605 #endif //cl_khr_mipmap_image
15606 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15630 #define CLK_SNORM_INT8 0x10D0
15631 #define CLK_SNORM_INT16 0x10D1
15632 #define CLK_UNORM_INT8 0x10D2
15633 #define CLK_UNORM_INT16 0x10D3
15634 #define CLK_UNORM_SHORT_565 0x10D4
15635 #define CLK_UNORM_SHORT_555 0x10D5
15636 #define CLK_UNORM_INT_101010 0x10D6
15637 #define CLK_SIGNED_INT8 0x10D7
15638 #define CLK_SIGNED_INT16 0x10D8
15639 #define CLK_SIGNED_INT32 0x10D9
15640 #define CLK_UNSIGNED_INT8 0x10DA
15641 #define CLK_UNSIGNED_INT16 0x10DB
15642 #define CLK_UNSIGNED_INT32 0x10DC
15643 #define CLK_HALF_FLOAT 0x10DD
15644 #define CLK_FLOAT 0x10DE
15645 #define CLK_UNORM_INT24 0x10DF
15653 #ifdef cl_khr_depth_images
15656 #endif //cl_khr_depth_images
15657 #if defined(cl_khr_gl_msaa_sharing)
15662 #endif //cl_khr_gl_msaa_sharing
15667 #ifdef cl_khr_3d_image_writes
15672 #ifdef cl_khr_depth_images
15675 #endif //cl_khr_depth_images
15676 #if defined(cl_khr_gl_msaa_sharing)
15681 #endif //cl_khr_gl_msaa_sharing
15683 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15690 #ifdef cl_khr_depth_images
15693 #endif //cl_khr_depth_images
15694 #if defined(cl_khr_gl_msaa_sharing)
15699 #endif //cl_khr_gl_msaa_sharing
15700 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15720 #define CLK_R 0x10B0
15721 #define CLK_A 0x10B1
15722 #define CLK_RG 0x10B2
15723 #define CLK_RA 0x10B3
15724 #define CLK_RGB 0x10B4
15725 #define CLK_RGBA 0x10B5
15726 #define CLK_BGRA 0x10B6
15727 #define CLK_ARGB 0x10B7
15728 #define CLK_INTENSITY 0x10B8
15729 #define CLK_LUMINANCE 0x10B9
15730 #define CLK_Rx 0x10BA
15731 #define CLK_RGx 0x10BB
15732 #define CLK_RGBx 0x10BC
15733 #define CLK_DEPTH 0x10BD
15734 #define CLK_DEPTH_STENCIL 0x10BE
15735 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15736 #define CLK_sRGB 0x10BF
15737 #define CLK_sRGBA 0x10C1
15738 #define CLK_sRGBx 0x10C0
15739 #define CLK_sBGRA 0x10C2
15740 #define CLK_ABGR 0x10C3
15741 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15749 #ifdef cl_khr_depth_images
15752 #endif //cl_khr_depth_images
15753 #if defined(cl_khr_gl_msaa_sharing)
15758 #endif //cl_khr_gl_msaa_sharing
15763 #ifdef cl_khr_3d_image_writes
15768 #ifdef cl_khr_depth_images
15771 #endif //cl_khr_depth_images
15772 #if defined(cl_khr_gl_msaa_sharing)
15777 #endif //cl_khr_gl_msaa_sharing
15779 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15786 #ifdef cl_khr_depth_images
15789 #endif //cl_khr_depth_images
15790 #if defined(cl_khr_gl_msaa_sharing)
15795 #endif //cl_khr_gl_msaa_sharing
15796 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15805 #ifdef cl_khr_depth_images
15808 #endif //cl_khr_depth_images
15809 #if defined(cl_khr_gl_msaa_sharing)
15814 #endif //cl_khr_gl_msaa_sharing
15818 #ifdef cl_khr_depth_images
15821 #endif //cl_khr_depth_images
15822 #if defined(cl_khr_gl_msaa_sharing)
15827 #endif //cl_khr_gl_msaa_sharing
15829 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15832 #ifdef cl_khr_depth_images
15835 #endif //cl_khr_depth_images
15836 #if defined(cl_khr_gl_msaa_sharing)
15841 #endif //cl_khr_gl_msaa_sharing
15842 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15851 #ifdef cl_khr_3d_image_writes
15854 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15856 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15864 #ifdef cl_khr_depth_images
15866 #endif //cl_khr_depth_images
15867 #if defined(cl_khr_gl_msaa_sharing)
15870 #endif //cl_khr_gl_msaa_sharing
15874 #ifdef cl_khr_depth_images
15876 #endif //cl_khr_depth_images
15877 #if defined(cl_khr_gl_msaa_sharing)
15880 #endif //cl_khr_gl_msaa_sharing
15882 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15885 #ifdef cl_khr_depth_images
15887 #endif //cl_khr_depth_images
15888 #if defined(cl_khr_gl_msaa_sharing)
15891 #endif //cl_khr_gl_msaa_sharing
15892 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15897 #if defined(cl_khr_gl_msaa_sharing)
15898 int __ovld get_image_num_samples(read_only image2d_msaa_t image);
15899 int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
15900 int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
15901 int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
15902 int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
15904 int __ovld get_image_num_samples(write_only image2d_msaa_t image);
15905 int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
15906 int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
15907 int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
15908 int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
15910 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15911 int __ovld get_image_num_samples(read_write image2d_msaa_t image);
15912 int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
15913 int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
15914 int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
15915 int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
15916 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
15921 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
15949 #endif //cl_khr_fp64
16017 #endif //cl_khr_fp64
16019 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
16022 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
16023 #define PIPE_RESERVE_ID_VALID_BIT (1U << 30)
16024 #define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
16026 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
16030 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
16032 #define CL_COMPLETE 0x0
16033 #define CL_RUNNING 0x1
16034 #define CL_SUBMITTED 0x2
16035 #define CL_QUEUED 0x3
16037 #define CLK_SUCCESS 0
16038 #define CLK_ENQUEUE_FAILURE -101
16039 #define CLK_INVALID_QUEUE -102
16040 #define CLK_INVALID_NDRANGE -160
16041 #define CLK_INVALID_EVENT_WAIT_LIST -57
16042 #define CLK_DEVICE_QUEUE_FULL -161
16043 #define CLK_INVALID_ARG_SIZE -51
16044 #define CLK_EVENT_ALLOCATION_FAILURE -100
16045 #define CLK_OUT_OF_RESOURCES -5
16047 #define CLK_NULL_QUEUE 0
16048 #define CLK_NULL_EVENT (__builtin_astype(((void*)(__SIZE_MAX__)), clk_event_t))
16051 #define CLK_ENQUEUE_FLAGS_NO_WAIT 0x0
16052 #define CLK_ENQUEUE_FLAGS_WAIT_KERNEL 0x1
16053 #define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP 0x2
16059 #define CLK_PROFILING_COMMAND_EXEC_TIME 0x1
16061 #define MAX_WORK_DIM 3
16097 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
16101 #if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
16106 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
16107 uint __ovld get_enqueued_num_sub_groups(
void);
16108 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
16112 void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
16113 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
16115 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
16124 float __ovld __conv sub_group_broadcast(
float x,
uint sub_group_local_id);
16142 int __ovld __conv sub_group_scan_exclusive_add(
int x);
16144 long __ovld __conv sub_group_scan_exclusive_add(
long x);
16146 float __ovld __conv sub_group_scan_exclusive_add(
float x);
16147 int __ovld __conv sub_group_scan_exclusive_min(
int x);
16149 long __ovld __conv sub_group_scan_exclusive_min(
long x);
16151 float __ovld __conv sub_group_scan_exclusive_min(
float x);
16152 int __ovld __conv sub_group_scan_exclusive_max(
int x);
16154 long __ovld __conv sub_group_scan_exclusive_max(
long x);
16156 float __ovld __conv sub_group_scan_exclusive_max(
float x);
16158 int __ovld __conv sub_group_scan_inclusive_add(
int x);
16160 long __ovld __conv sub_group_scan_inclusive_add(
long x);
16162 float __ovld __conv sub_group_scan_inclusive_add(
float x);
16163 int __ovld __conv sub_group_scan_inclusive_min(
int x);
16165 long __ovld __conv sub_group_scan_inclusive_min(
long x);
16167 float __ovld __conv sub_group_scan_inclusive_min(
float x);
16168 int __ovld __conv sub_group_scan_inclusive_max(
int x);
16170 long __ovld __conv sub_group_scan_inclusive_max(
long x);
16172 float __ovld __conv sub_group_scan_inclusive_max(
float x);
16179 half
__ovld __conv sub_group_scan_exclusive_add(half x);
16180 half
__ovld __conv sub_group_scan_exclusive_min(half x);
16181 half
__ovld __conv sub_group_scan_exclusive_max(half x);
16182 half
__ovld __conv sub_group_scan_inclusive_add(half x);
16183 half
__ovld __conv sub_group_scan_inclusive_min(half x);
16184 half
__ovld __conv sub_group_scan_inclusive_max(half x);
16185 #endif //cl_khr_fp16
16188 double __ovld __conv sub_group_broadcast(
double x,
uint sub_group_local_id);
16192 double __ovld __conv sub_group_scan_exclusive_add(
double x);
16193 double __ovld __conv sub_group_scan_exclusive_min(
double x);
16194 double __ovld __conv sub_group_scan_exclusive_max(
double x);
16195 double __ovld __conv sub_group_scan_inclusive_add(
double x);
16196 double __ovld __conv sub_group_scan_inclusive_min(
double x);
16197 double __ovld __conv sub_group_scan_inclusive_max(
double x);
16198 #endif //cl_khr_fp64
16200 #endif //cl_khr_subgroups cl_intel_subgroups
16202 #ifdef cl_amd_media_ops
16204 uint2
__ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
16205 uint3
__ovld amd_bitalign(uint3 a, uint3 b, uint3 c);
16206 uint4
__ovld amd_bitalign(uint4 a, uint4 b, uint4 c);
16207 uint8
__ovld amd_bitalign(uint8 a, uint8 b, uint8 c);
16208 uint16
__ovld amd_bitalign(uint16 a, uint16 b, uint16 c);
16211 uint2
__ovld amd_bytealign(uint2 a, uint2 b, uint2 c);
16212 uint3
__ovld amd_bytealign(uint3 a, uint3 b, uint3 c);
16213 uint4
__ovld amd_bytealign(uint4 a, uint4 b, uint4 c);
16214 uint8
__ovld amd_bytealign(uint8 a, uint8 b, uint8 c);
16215 uint16
__ovld amd_bytealign(uint16 a, uint16 b, uint16 c);
16218 uint2
__ovld amd_lerp(uint2 a, uint2 b, uint2 c);
16219 uint3
__ovld amd_lerp(uint3 a, uint3 b, uint3 c);
16220 uint4
__ovld amd_lerp(uint4 a, uint4 b, uint4 c);
16221 uint8
__ovld amd_lerp(uint8 a, uint8 b, uint8 c);
16222 uint16
__ovld amd_lerp(uint16 a, uint16 b, uint16 c);
16229 uint2
__ovld amd_sadhi(uint2 a, uint2 b, uint2 c);
16230 uint3
__ovld amd_sadhi(uint3 a, uint3 b, uint3 c);
16231 uint4
__ovld amd_sadhi(uint4 a, uint4 b, uint4 c);
16232 uint8
__ovld amd_sadhi(uint8 a, uint8 b, uint8 c);
16233 uint16
__ovld amd_sadhi(uint16 a, uint16 b, uint16 c);
16236 uint2
__ovld amd_sad(uint2 a, uint2 b, uint2 c);
16237 uint3
__ovld amd_sad(uint3 a, uint3 b, uint3 c);
16238 uint4
__ovld amd_sad(uint4 a, uint4 b, uint4 c);
16239 uint8
__ovld amd_sad(uint8 a, uint8 b, uint8 c);
16240 uint16
__ovld amd_sad(uint16 a, uint16 b, uint16 c);
16243 float2
__ovld amd_unpack0(uint2 a);
16244 float3
__ovld amd_unpack0(uint3 a);
16245 float4
__ovld amd_unpack0(uint4 a);
16246 float8
__ovld amd_unpack0(uint8 a);
16247 float16
__ovld amd_unpack0(uint16 a);
16250 float2
__ovld amd_unpack1(uint2 a);
16251 float3
__ovld amd_unpack1(uint3 a);
16252 float4
__ovld amd_unpack1(uint4 a);
16253 float8
__ovld amd_unpack1(uint8 a);
16254 float16
__ovld amd_unpack1(uint16 a);
16257 float2
__ovld amd_unpack2(uint2 a);
16258 float3
__ovld amd_unpack2(uint3 a);
16259 float4
__ovld amd_unpack2(uint4 a);
16260 float8
__ovld amd_unpack2(uint8 a);
16261 float16
__ovld amd_unpack2(uint16 a);
16264 float2
__ovld amd_unpack3(uint2 a);
16265 float3
__ovld amd_unpack3(uint3 a);
16266 float4
__ovld amd_unpack3(uint4 a);
16267 float8
__ovld amd_unpack3(uint8 a);
16268 float16
__ovld amd_unpack3(uint16 a);
16269 #endif // cl_amd_media_ops
16271 #ifdef cl_amd_media_ops2
16273 int2
__ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);
16274 int3
__ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);
16275 int4
__ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);
16276 int8
__ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);
16277 int16
__ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);
16280 uint2
__ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);
16281 uint3
__ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);
16282 uint4
__ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);
16283 uint8
__ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);
16284 uint16
__ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);
16287 uint2
__ovld amd_bfm(uint2 src0, uint2 src1);
16288 uint3
__ovld amd_bfm(uint3 src0, uint3 src1);
16289 uint4
__ovld amd_bfm(uint4 src0, uint4 src1);
16290 uint8
__ovld amd_bfm(uint8 src0, uint8 src1);
16291 uint16
__ovld amd_bfm(uint16 src0, uint16 src1);
16293 float __ovld amd_max3(
float src0,
float src1,
float src2);
16294 float2
__ovld amd_max3(float2 src0, float2 src1, float2 src2);
16295 float3
__ovld amd_max3(float3 src0, float3 src1, float3 src2);
16296 float4
__ovld amd_max3(float4 src0, float4 src1, float4 src2);
16297 float8
__ovld amd_max3(float8 src0, float8 src1, float8 src2);
16298 float16
__ovld amd_max3(float16 src0, float16 src1, float16 src2);
16300 int __ovld amd_max3(
int src0,
int src1,
int src2);
16301 int2
__ovld amd_max3(int2 src0, int2 src1, int2 src2);
16302 int3
__ovld amd_max3(int3 src0, int3 src1, int3 src2);
16303 int4
__ovld amd_max3(int4 src0, int4 src1, int4 src2);
16304 int8
__ovld amd_max3(int8 src0, int8 src1, int8 src2);
16305 int16
__ovld amd_max3(int16 src0, int16 src1, int16 src2);
16308 uint2
__ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);
16309 uint3
__ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);
16310 uint4
__ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);
16311 uint8
__ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);
16312 uint16
__ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);
16314 float __ovld amd_median3(
float src0,
float src1,
float src2);
16315 float2
__ovld amd_median3(float2 src0, float2 src1, float2 src2);
16316 float3
__ovld amd_median3(float3 src0, float3 src1, float3 src2);
16317 float4
__ovld amd_median3(float4 src0, float4 src1, float4 src2);
16318 float8
__ovld amd_median3(float8 src0, float8 src1, float8 src2);
16319 float16
__ovld amd_median3(float16 src0, float16 src1, float16 src2);
16321 int __ovld amd_median3(
int src0,
int src1,
int src2);
16322 int2
__ovld amd_median3(int2 src0, int2 src1, int2 src2);
16323 int3
__ovld amd_median3(int3 src0, int3 src1, int3 src2);
16324 int4
__ovld amd_median3(int4 src0, int4 src1, int4 src2);
16325 int8
__ovld amd_median3(int8 src0, int8 src1, int8 src2);
16326 int16
__ovld amd_median3(int16 src0, int16 src1, int16 src2);
16329 uint2
__ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);
16330 uint3
__ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);
16331 uint4
__ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);
16332 uint8
__ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);
16333 uint16
__ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);
16335 float __ovld amd_min3(
float src0,
float src1,
float src);
16336 float2
__ovld amd_min3(float2 src0, float2 src1, float2 src);
16337 float3
__ovld amd_min3(float3 src0, float3 src1, float3 src);
16338 float4
__ovld amd_min3(float4 src0, float4 src1, float4 src);
16339 float8
__ovld amd_min3(float8 src0, float8 src1, float8 src);
16340 float16
__ovld amd_min3(float16 src0, float16 src1, float16 src);
16342 int __ovld amd_min3(
int src0,
int src1,
int src2);
16343 int2
__ovld amd_min3(int2 src0, int2 src1, int2 src2);
16344 int3
__ovld amd_min3(int3 src0, int3 src1, int3 src2);
16345 int4
__ovld amd_min3(int4 src0, int4 src1, int4 src2);
16346 int8
__ovld amd_min3(int8 src0, int8 src1, int8 src2);
16347 int16
__ovld amd_min3(int16 src0, int16 src1, int16 src2);
16350 uint2
__ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);
16351 uint3
__ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);
16352 uint4
__ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);
16353 uint8
__ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);
16354 uint16
__ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);
16357 ulong2
__ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);
16358 ulong3
__ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);
16359 ulong4
__ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);
16360 ulong8
__ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);
16361 ulong16
__ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);
16364 ulong2
__ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);
16365 ulong3
__ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);
16366 ulong4
__ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);
16367 ulong8
__ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);
16368 ulong16
__ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);
16371 uint2
__ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);
16372 uint3
__ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);
16373 uint4
__ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);
16374 uint8
__ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);
16375 uint16
__ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);
16378 uint2
__ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);
16379 uint3
__ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);
16380 uint4
__ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);
16381 uint8
__ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);
16382 uint16
__ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);
16385 uint2
__ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);
16386 uint3
__ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);
16387 uint4
__ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);
16388 uint8
__ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
16389 uint16
__ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
16390 #endif // cl_amd_media_ops2
16393 #pragma OPENCL EXTENSION all : disable
16397 #endif //_OPENCL_H_
uchar16 __ovld __cnfn convert_uchar16_rtn(char16)
void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order)
ushort __ovld __cnfn convert_ushort_sat_rtn(char)
uchar __ovld __cnfn convert_uchar_sat_rtz(char)
int16 __ovld __cnfn convert_int16_sat_rtp(char16)
short4 __ovld __cnfn convert_short4_rtp(char4)
float __ovld __cnfn logb(float x)
Compute the exponent of x, which is the integral part of logr | x |.
short16 __ovld __cnfn convert_short16_sat_rtp(char16)
float __ovld __cnfn erfc(float)
Complementary error function.
void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p)
uchar4 __ovld __cnfn convert_uchar4_rte(char4)
float __ovld __cnfn tanh(float)
Compute hyperbolic tangent.
uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16)
ulong8 __ovld __cnfn convert_ulong8_rtz(char8)
float __ovld __cnfn minmag(float x, float y)
Returns x if | x | < | y |, y if | y | < | x |, otherwise fmin(x, y).
long4 __ovld __cnfn convert_long4_sat_rtz(char4)
float __ovld __cnfn half_divide(float x, float y)
Compute x / y.
ushort2 __ovld __cnfn convert_ushort2_rtp(char2)
long2 __ovld __cnfn convert_long2_sat_rtz(char2)
uint3 __ovld __cnfn convert_uint3(char3)
int2 __ovld __cnfn get_image_dim(read_only image2d_t image)
Return the 2D image width and height as an int2 type.
short3 __ovld __cnfn convert_short3_sat_rte(char3)
long __ovld __cnfn convert_long_sat_rte(char)
int16 __ovld __cnfn convert_int16_sat(char16)
float __ovld __cnfn trunc(float)
Round to integral value using the round to zero rounding mode.
int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order)
short16 __ovld __cnfn convert_short16_rte(char16)
uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4)
ulong8 __ovld __cnfn convert_ulong8_rte(char8)
float __ovld __cnfn cospi(float x)
Compute cos (PI * x).
short16 __ovld __cnfn convert_short16_sat_rtn(char16)
float __ovld __cnfn remainder(float x, float y)
Compute the value r such that r = x - n*y, where n is the integer nearest the exact value of x/y...
float3 __ovld __cnfn convert_float3_rtn(char3)
void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color)
float __ovld __cnfn fmod(float x, float y)
Modulus.
float __ovld __cnfn native_rsqrt(float x)
Compute inverse square root over an implementationdefined range.
float __ovld __cnfn native_exp(float x)
Compute the base- e exponential of x over an implementation-defined range.
char2 __ovld __cnfn convert_char2_rtz(char2)
int __ovld __cnfn convert_int_rte(char)
ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4)
void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p)
uint3 __ovld __cnfn convert_uint3_sat_rtp(char3)
uint8 __ovld __cnfn convert_uint8_rtn(char8)
bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected, int desired, memory_order success, memory_order failure)
void __ovld vstore_half8(float8 data, size_t offset, half *p)
uchar2 __ovld __cnfn convert_uchar2_sat(char2)
uint8 __ovld __cnfn convert_uint8_sat(char8)
void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order)
short2 __ovld __cnfn convert_short2_sat_rtz(char2)
__SIZE_TYPE__ size_t
The unsigned integer type of the result of the sizeof operator.
ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3)
int3 __ovld __cnfn convert_int3_sat_rtz(char3)
long4 __ovld __cnfn convert_long4_rtz(char4)
float __ovld __cnfn ceil(float)
Round to integral value using the round to positive infinity rounding mode.
ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4)
uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3)
long8 __ovld __cnfn convert_long8_rtn(char8)
int __ovld __cnfn get_image_width(read_only image1d_t image)
Return the image width in pixels.
char3 __ovld __cnfn convert_char3(char3)
float3 __ovld vload_half3(size_t offset, const __constant half *p)
int __ovld __cnfn mul24(int x, int y)
Multiply two 24-bit integer values x and y.
uchar __ovld __cnfn convert_uchar_rtz(char)
ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16)
int __ovld atomic_or(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p)
ushort __ovld __cnfn convert_ushort_rtn(char)
int __ovld __cnfn all(char x)
Returns 1 if the most significant bit in all components of x is set; otherwise returns 0...
float __ovld __cnfn native_powr(float x, float y)
Compute x to the power y, where x is >= 0.
char __ovld __cnfn convert_char_sat_rte(char)
void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p)
float __ovld __cnfn tgamma(float)
Compute the gamma function.
uint3 __ovld __cnfn convert_uint3_rte(char3)
ulong __ovld __cnfn convert_ulong_sat_rtp(char)
char4 __ovld __cnfn convert_char4_rtn(char4)
bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired)
long16 __ovld __cnfn convert_long16_sat_rtz(char16)
int __ovld __cnfn isgreater(float x, float y)
Returns the component-wise compare of x > y.
uchar3 __ovld __cnfn convert_uchar3_rtn(char3)
uint16 __ovld __cnfn convert_uint16_sat(char16)
ulong2 __ovld __cnfn convert_ulong2(char2)
short3 __ovld __cnfn convert_short3_sat_rtn(char3)
int __ovld __cnfn signbit(float)
Test for sign bit.
uint __ovld __cnfn convert_uint_sat_rte(char)
ushort3 __ovld __cnfn convert_ushort3_rte(char3)
int4 __ovld __cnfn convert_int4_sat_rtn(char4)
long __ovld __cnfn convert_long_rtp(char)
long3 __ovld __cnfn convert_long3_sat_rtz(char3)
float __ovld __cnfn tanpi(float x)
Compute tan (PI * x).
void __ovld vstorea_half_rtz(float data, size_t offset, half *p)
float8 __ovld __cnfn convert_float8_rte(char8)
float16 __ovld __cnfn convert_float16_rtz(char16)
char4 __ovld __cnfn convert_char4_sat_rtz(char4)
float __ovld __cnfn expm1(float x)
Compute e^x- 1.0.
int __ovld atomic_fetch_add(volatile atomic_int *object, int operand)
queue_t __ovld get_default_queue(void)
int __ovld __conv work_group_scan_inclusive_min(int x)
char16 __ovld vload16(size_t offset, const __constant char *p)
float __ovld __cnfn pown(float x, int y)
Compute x to the power y, where y is an integer.
ulong __ovld __cnfn convert_ulong_rtz(char)
float __ovld __cnfn fmax(float x, float y)
Returns y if x < y, otherwise it returns x.
float __ovld __cnfn nextafter(float x, float y)
Computes the next representable single-precision floating-point value following x in the direction of...
float __ovld __cnfn atan(float y_over_x)
Arc tangent function.
ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16)
int2 __ovld __cnfn convert_int2_sat_rtp(char2)
int16 __ovld __cnfn convert_int16_rtp(char16)
uint3 __ovld __cnfn convert_uint3_rtn(char3)
long16 __ovld __cnfn convert_long16_sat(char16)
int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand)
int __ovld __conv work_group_scan_exclusive_min(int x)
uchar __ovld __cnfn convert_uchar_sat_rtp(char)
float __ovld __cnfn native_tan(float x)
Compute tangent over an implementation-defined range.
ushort3 __ovld __cnfn convert_ushort3_rtz(char3)
uchar3 __ovld __cnfn convert_uchar3_rtp(char3)
float __ovld __cnfn asinpi(float x)
Compute asin (x) / PI.
int __ovld atomic_min(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2)
char8 __ovld __cnfn convert_char8_rtp(char8)
float __ovld __cnfn cbrt(float)
Compute cube-root.
bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order)
char __ovld __cnfn clamp(char x, char minval, char maxval)
Returns min(max(x, minval), maxval).
uchar __ovld __cnfn convert_uchar_rtp(char)
float __ovld __cnfn log10(float)
Compute a base 10 logarithm.
uchar2 __ovld __cnfn convert_uchar2_rte(char2)
float __ovld __cnfn half_log10(float x)
Compute a base 10 logarithm.
ndrange_t __ovld ndrange_1D(size_t)
uint2 __ovld __cnfn convert_uint2_sat_rtz(char2)
float __ovld __cnfn native_exp10(float x)
Compute the base- 10 exponential of x over an implementation-defined range.
int __ovld atomic_xor(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float __ovld __cnfn distance(float p0, float p1)
Returns the distance between p0 and p1.
uchar8 __ovld __cnfn convert_uchar8_rte(char8)
char8 __ovld __cnfn convert_char8_sat(char8)
char16 __ovld __cnfn convert_char16(char16)
float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord)
Use the coordinate (coord.xy) to do an element lookup in the 2D image object specified by image...
uchar8 __ovld __cnfn convert_uchar8_rtz(char8)
char16 __ovld __cnfn convert_char16_sat_rtp(char16)
char4 __ovld __cnfn convert_char4(char4)
size_t __ovld __cnfn get_global_id(uint dimindx)
Returns the unique global work-item ID value for dimension identified by dimindx. ...
float __ovld __cnfn half_sqrt(float x)
Compute square root.
ushort16 __ovld __cnfn convert_ushort16_rtn(char16)
uchar4 __ovld __cnfn convert_uchar4_rtz(char4)
void __ovld __conv barrier(cl_mem_fence_flags flags)
All work-items in a work-group executing the kernel on a processor must execute this function before ...
ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2)
void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p)
float __ovld __cnfn native_log2(float x)
Compute a base 2 logarithm over an implementationdefined range.
float __ovld __cnfn radians(float degrees)
Converts degrees to radians, i.e.
size_t __ovld __cnfn get_group_id(uint dimindx)
get_group_id returns the work-group ID which is a number from 0 .
uint4 __ovld __cnfn convert_uint4_rtz(char4)
void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p)
void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void *value)
uint16 __ovld __cnfn convert_uint16(char16)
char16 __ovld __cnfn convert_char16_rtn(char16)
float __ovld __cnfn erf(float)
Error function encountered in integrating the normal distribution.
uint16 __ovld __cnfn convert_uint16_rte(char16)
float __ovld __cnfn asinh(float)
Inverse hyperbolic sine.
void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p)
ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16)
int2 __ovld __cnfn convert_int2_sat_rtz(char2)
char2 __ovld __cnfn convert_char2_sat(char2)
char __ovld __cnfn popcount(char x)
void __ovld vstore_half_rtz(float data, size_t offset, half *p)
uint2 __ovld __cnfn convert_uint2_rtz(char2)
int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
char __ovld __cnfn convert_char_sat_rtn(char)
void __ovld vstorea_half_rtp(float data, size_t offset, half *p)
long8 __ovld __cnfn convert_long8_sat(char8)
char __ovld __cnfn hadd(char x, char y)
Returns (x + y) >> 1.
void __ovld read_mem_fence(cl_mem_fence_flags flags)
Read memory barrier that orders only loads.
float2 __ovld __cnfn convert_float2_rtp(char2)
ulong4 __ovld __cnfn convert_ulong4_sat(char4)
ushort __ovld __cnfn convert_ushort_sat_rte(char)
void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p)
ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4)
float __ovld __cnfn normalize(float p)
Returns a vector in the same direction as p but with a length of 1.
long8 __ovld __cnfn convert_long8_rtz(char8)
float __ovld __cnfn copysign(float x, float y)
Returns x with its sign changed to match the sign of y.
void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color)
unsigned short ushort
An unsigned 16-bit integer.
float __ovld __cnfn mad(float a, float b, float c)
mad approximates a * b + c.
float __ovld __cnfn half_rsqrt(float x)
Compute inverse square root.
uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2)
ushort16 __ovld __cnfn convert_ushort16_rtz(char16)
char2 __ovld __cnfn convert_char2_rte(char2)
int8 __ovld __cnfn convert_int8_rtz(char8)
long2 __ovld __cnfn convert_long2_rtp(char2)
int3 __ovld __cnfn convert_int3_rte(char3)
long2 __ovld __cnfn convert_long2_rtz(char2)
uint __ovld __cnfn convert_uint_sat_rtz(char)
char8 __ovld vload8(size_t offset, const __constant char *p)
uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3)
int __ovld __cnfn mad24(int x, int y, int z)
Multiply two 24-bit integer values x and y and add the 32-bit integer result to the 32-bit integer z...
ushort __ovld __cnfn convert_ushort_rtp(char)
size_t __ovld get_global_linear_id(void)
void __ovld prefetch(const __global char *p, size_t num_elements)
Prefetch num_elements * sizeof(gentype) bytes into the global cache.
char2 __ovld __cnfn convert_char2_sat_rte(char2)
ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2)
ndrange_t __ovld ndrange_2D(const size_t[2])
void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p)
ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2)
uchar8 __ovld __cnfn convert_uchar8_sat(char8)
clk_event_t __ovld create_user_event(void)
int __ovld __cnfn ilogb(float x)
Return the exponent as an integer value.
float __ovld __cnfn sin(float)
Compute sine.
short __ovld __cnfn convert_short_rtz(char)
uint4 __ovld __cnfn convert_uint4(char4)
bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired)
ulong3 __ovld __cnfn convert_ulong3(char3)
uint __ovld __cnfn convert_uint_sat_rtn(char)
int __ovld __conv work_group_reduce_max(int x)
short8 __ovld __cnfn convert_short8_rtp(char8)
short2 __ovld __cnfn convert_short2_sat(char2)
float __ovld __cnfn native_exp2(float x)
Compute the base- 2 exponential of x over an implementation-defined range.
ulong __ovld __cnfn convert_ulong_sat_rtn(char)
int __ovld __cnfn isfinite(float)
Test for finite value.
short8 __ovld __cnfn convert_short8_sat_rtp(char8)
int8 __ovld __cnfn convert_int8_rtn(char8)
char8 __ovld __cnfn convert_char8_rte(char8)
uchar __ovld __cnfn convert_uchar(char)
ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16)
float __ovld __cnfn nan(uint nancode)
Returns a quiet NaN.
int __ovld __cnfn islessequal(float x, float y)
Returns the component-wise compare of x <= y.
ulong4 __ovld __cnfn convert_ulong4_rtz(char4)
void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p)
char __ovld __cnfn convert_char_sat_rtz(char)
uchar4 __ovld __cnfn convert_uchar4_rtp(char4)
int __ovld __cnfn isunordered(float x, float y)
Test if arguments are unordered.
char4 __ovld __cnfn convert_char4_sat_rtp(char4)
void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p)
int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand)
ulong2 __ovld __cnfn convert_ulong2_rtz(char2)
long3 __ovld __cnfn convert_long3_rtp(char3)
char __ovld __cnfn mad_hi(char a, char b, char c)
Returns mul_hi(a, b) + c.
uchar3 __ovld __cnfn convert_uchar3_sat(char3)
ulong __ovld __cnfn convert_ulong_sat_rte(char)
i32 captured_struct **param SharedsTy A type which contains references the shared variables *param Shareds Context with the list of shared variables from the p *TaskFunction *param Data Additional data for task generation like final * state
uint __ovld __cnfn convert_uint_rte(char)
long8 __ovld __cnfn convert_long8_sat_rte(char8)
long8 __ovld __cnfn convert_long8(char8)
uint16 __ovld __cnfn convert_uint16_rtz(char16)
long __ovld __cnfn convert_long_sat_rtp(char)
void __ovld vstore3(char3 data, size_t offset, char *p)
uint4 __ovld __cnfn convert_uint4_rte(char4)
float __ovld __cnfn log2(float)
Compute a base 2 logarithm.
uchar16 __ovld __cnfn convert_uchar16_rtz(char16)
uchar4 __ovld __cnfn convert_uchar4_sat(char4)
float16 __ovld __cnfn convert_float16_rtn(char16)
uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8)
float __ovld __cnfn powr(float x, float y)
Compute x to the power y, where x is >= 0.
uint __ovld __cnfn convert_uint_rtp(char)
short16 __ovld __cnfn convert_short16_rtn(char16)
float8 __ovld vloada_half8(size_t offset, const __constant half *p)
short16 __ovld __cnfn convert_short16_rtz(char16)
void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p)
ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4)
float __ovld __cnfn round(float x)
Return the integral value nearest to x rounding halfway cases away from zero, regardless of the curre...
int __ovld __cnfn isless(float x, float y)
Returns the component-wise compare of x < y.
uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16)
uint16 __ovld __cnfn convert_uint16_sat_rte(char16)
float __ovld __cnfn tan(float)
Compute tangent.
char4 __ovld __cnfn convert_char4_rte(char4)
void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p)
void __ovld vstore_half_rtp(float data, size_t offset, half *p)
short8 __ovld __cnfn convert_short8_sat_rtz(char8)
ushort3 __ovld __cnfn convert_ushort3(char3)
void __ovld write_mem_fence(cl_mem_fence_flags flags)
Write memory barrier that orders only stores.
uint3 __ovld __cnfn convert_uint3_sat(char3)
int __ovld atomic_xchg(volatile __global int *p, int val)
Swaps the old value stored at location p with new value given by val.
float4 __ovld __cnfn convert_float4_rtp(char4)
ulong16 __ovld __cnfn convert_ulong16_rtz(char16)
int4 __ovld __cnfn convert_int4_sat(char4)
int __ovld __conv work_group_scan_exclusive_max(int x)
float __ovld sincos(float x, float *cosval)
Compute sine and cosine of x.
float __ovld __cnfn rint(float)
Round to integral value (using round to nearest even rounding mode) in floating-point format...
long3 __ovld __cnfn convert_long3_rte(char3)
void __ovld vstore16(char16 data, size_t offset, char *p)
ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4)
long16 __ovld __cnfn convert_long16_sat_rtp(char16)
ulong __ovld __cnfn convert_ulong_rtp(char)
uchar __ovld __cnfn convert_uchar_sat_rte(char)
short __ovld __cnfn convert_short_sat(char)
int4 __ovld __cnfn convert_int4_sat_rtz(char4)
uint16 __ovld __cnfn convert_uint16_rtp(char16)
bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object)
int4 __ovld __cnfn convert_int4_rtp(char4)
float __ovld __cnfn degrees(float radians)
Converts radians to degrees, i.e.
int __ovld __conv work_group_scan_inclusive_max(int x)
void __ovld vstore2(char2 data, size_t offset, char *p)
ushort3 __ovld __cnfn convert_ushort3_sat(char3)
long16 __ovld __cnfn convert_long16_rtp(char16)
void __ovld vstore_half_rte(float data, size_t offset, half *p)
void __ovld vstorea_half4(float4 data, size_t offset, half *p)
long3 __ovld __cnfn convert_long3(char3)
long8 __ovld __cnfn convert_long8_rtp(char8)
float4 __ovld __cnfn convert_float4(char4)
int4 __ovld __cnfn convert_int4_sat_rtp(char4)
uchar2 __ovld __cnfn convert_uchar2_rtp(char2)
ulong2 __ovld __cnfn convert_ulong2_rtn(char2)
char16 __ovld __cnfn convert_char16_rtz(char16)
uchar8 __ovld __cnfn convert_uchar8(char8)
uint4 __ovld __cnfn convert_uint4_sat_rte(char4)
int __ovld __cnfn convert_int(char)
void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p)
int2 __ovld __cnfn convert_int2_sat_rte(char2)
char __ovld __cnfn convert_char_rte(char)
void __ovld vstore4(char4 data, size_t offset, char *p)
float3 __ovld __cnfn convert_float3_rtz(char3)
ulong16 __ovld __cnfn convert_ulong16_rtn(char16)
int16 __ovld __cnfn convert_int16_sat_rtz(char16)
ushort8 __ovld __cnfn convert_ushort8_sat(char8)
int __ovld __conv work_group_broadcast(int a, size_t local_id)
char3 __ovld __cnfn convert_char3_sat(char3)
int __ovld __cnfn isequal(float x, float y)
intn isequal (floatn x, floatn y) Returns the component-wise compare of x == y.
void __ovld vstore8(char8 data, size_t offset, char *p)
float __ovld __cnfn log1p(float x)
Compute a base e logarithm of (1.0 + x).
char8 __ovld __cnfn convert_char8_rtz(char8)
char __ovld __cnfn clz(char x)
Returns the number of leading 0-bits in x, starting at the most significant bit position.
float __ovld __cnfn exp10(float)
Exponential base 10 function.
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type...
float __ovld __cnfn half_exp10(float x)
Compute the base- 10 exponential of x.
int3 __ovld __cnfn convert_int3_rtz(char3)
ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3)
int8 __ovld __cnfn convert_int8_rtp(char8)
short3 __ovld __cnfn convert_short3_sat_rtp(char3)
event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event)
Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions ...
char16 __ovld __cnfn convert_char16_sat_rtn(char16)
int __ovld atomic_fetch_or(volatile atomic_int *object, int operand)
int __ovld atomic_fetch_min(volatile atomic_int *object, int operand)
uint2 __ovld __cnfn convert_uint2_sat_rtn(char2)
ulong8 __ovld __cnfn convert_ulong8_rtp(char8)
ushort4 __ovld __cnfn convert_ushort4_rte(char4)
int4 __ovld __cnfn convert_int4(char4)
uint __ovld __cnfn convert_uint(char)
void __ovld vstore_half_rtn(float data, size_t offset, half *p)
int2 __ovld __cnfn convert_int2_rtp(char2)
long2 __ovld __cnfn convert_long2_sat_rtn(char2)
float __ovld __cnfn convert_float_rtz(char)
int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord)
void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p)
char __ovld __cnfn mul_hi(char x, char y)
Computes x * y and returns the high half of the product of x and y.
uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3)
int __ovld __conv work_group_all(int predicate)
Return the number of samples associated with image.
int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order)
short8 __ovld __cnfn convert_short8_rtn(char8)
void __ovld vstore_half16(float16 data, size_t offset, half *p)
void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p)
long2 __ovld __cnfn convert_long2_sat_rte(char2)
int3 __ovld __cnfn convert_int3_sat(char3)
float __ovld __cnfn log(float)
Compute natural logarithm.
uint3 __ovld __cnfn convert_uint3_sat_rtn(char3)
int __ovld __cnfn isnotequal(float x, float y)
Returns the component-wise compare of x != y.
ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4)
long8 __ovld __cnfn convert_long8_sat_rtn(char8)
uint8 __ovld __cnfn convert_uint8_rtp(char8)
uchar8 __ovld __cnfn convert_uchar8_rtn(char8)
ushort16 __ovld __cnfn convert_ushort16_sat(char16)
float __ovld modf(float x, float *iptr)
Decompose a floating-point number.
size_t __ovld get_enqueued_local_size(uint dimindx)
ushort2 __ovld __cnfn convert_ushort2_rte(char2)
float4 __ovld __cnfn cross(float4 p0, float4 p1)
Returns the cross product of p0.xyz and p1.xyz.
float3 __ovld __cnfn convert_float3_rte(char3)
int3 __ovld __cnfn convert_int3(char3)
void __ovld mem_fence(cl_mem_fence_flags flags)
Orders loads and stores of a work-item executing a kernel.
ushort3 __ovld __cnfn convert_ushort3_rtn(char3)
ushort4 __ovld __cnfn convert_ushort4_rtn(char4)
long3 __ovld __cnfn convert_long3_rtn(char3)
float16 __ovld __cnfn convert_float16_rtp(char16)
char2 __ovld __cnfn convert_char2(char2)
char2 __ovld __cnfn shuffle(char2 x, uchar2 mask)
The shuffle and shuffle2 built-in functions construct a permutation of elements from one or two input...
float __ovld __cnfn exp(float x)
Compute the base e exponential function of x.
int8 __ovld __cnfn convert_int8(char8)
float4 __ovld vloada_half4(size_t offset, const __constant half *p)
char __ovld __cnfn mad_sat(char a, char b, char c)
Returns a * b + c and saturates the result.
int2 __ovld __cnfn convert_int2_rtn(char2)
float2 __ovld vloada_half2(size_t offset, const __constant half *p)
short __ovld __cnfn convert_short_rtp(char)
int __ovld atomic_inc(volatile __global int *p)
Read the 32-bit value (referred to as old) stored at location pointed by p.
int __ovld atomic_and(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float __ovld fract(float x, float *iptr)
Returns fmin(x - floor (x), 0x1.fffffep-1f ).
uchar __ovld __cnfn convert_uchar_rtn(char)
short4 __ovld __cnfn convert_short4_rtn(char4)
uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2)
char4 __ovld vload4(size_t offset, const __constant char *p)
float __ovld __cnfn smoothstep(float edge0, float edge1, float x)
Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and performs smooth Hermite interpolation between 0 a...
uint2 __ovld __cnfn convert_uint2_sat(char2)
ushort __ovld __cnfn convert_ushort_sat_rtp(char)
uint __ovld __cnfn convert_uint_rtn(char)
float __ovld __cnfn convert_float_rtn(char)
char4 __ovld __cnfn convert_char4_sat_rtn(char4)
void __ovld vstore_half3(float3 data, size_t offset, half *p)
char2 __ovld __cnfn convert_char2_rtn(char2)
size_t __ovld __cnfn get_local_id(uint dimindx)
Returns the unique local work-item ID i.e.
char __ovld __cnfn convert_char_rtp(char)
uint2 __ovld __cnfn convert_uint2_sat_rtp(char2)
float __ovld __cnfn acos(float)
Arc cosine function.
uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8)
void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color)
Write color value to location specified by coordinate (coord.x, coord.y) in the 2D image object speci...
uint4 __ovld __cnfn convert_uint4_sat_rtz(char4)
char16 __ovld __cnfn convert_char16_sat(char16)
int __ovld __cnfn islessgreater(float x, float y)
Returns the component-wise compare of (x < y) || (x > y) .
long16 __ovld __cnfn convert_long16_sat_rte(char16)
uchar4 __ovld __cnfn convert_uchar4_rtn(char4)
void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope)
short4 __ovld __cnfn convert_short4(char4)
float8 __ovld vload_half8(size_t offset, const __constant half *p)
ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16)
ushort __ovld __cnfn convert_ushort_rte(char)
float8 __ovld __cnfn convert_float8(char8)
char2 __ovld __cnfn convert_char2_rtp(char2)
short3 __ovld __cnfn convert_short3_rte(char3)
long16 __ovld __cnfn convert_long16_rte(char16)
ushort8 __ovld __cnfn convert_ushort8_rtp(char8)
uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4)
float __ovld __cnfn rsqrt(float)
Compute inverse square root.
short16 __ovld __cnfn convert_short16_rtp(char16)
ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16)
void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope)
float2 __ovld __cnfn convert_float2(char2)
uint3 __ovld __cnfn convert_uint3_rtz(char3)
float __ovld __cnfn fabs(float)
Compute absolute value of a floating-point number.
char16 __ovld __cnfn convert_char16_rtp(char16)
uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2)
float2 __ovld vload_half2(size_t offset, const __constant half *p)
Read sizeof (halfn) bytes of data from address (p + (offset * n)).
long __ovld __cnfn convert_long_rtz(char)
void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p)
int3 __ovld __cnfn convert_int3_rtp(char3)
int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order)
ulong4 __ovld __cnfn convert_ulong4_rtp(char4)
short3 __ovld __cnfn convert_short3_rtn(char3)
size_t __ovld __cnfn get_global_size(uint dimindx)
Returns the number of global work-items specified for dimension identified by dimindx.
ulong16 __ovld __cnfn convert_ulong16_sat(char16)
void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p)
uchar __ovld __cnfn abs_diff(char x, char y)
Returns | x - y | without modulo overflow.
long __ovld __cnfn convert_long_sat_rtz(char)
ulong16 __ovld __cnfn convert_ulong16(char16)
short3 __ovld __cnfn convert_short3_sat(char3)
void __ovld vstore_half16_rte(float16 data, size_t offset, half *p)
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
uchar3 __ovld __cnfn convert_uchar3_rtz(char3)
uchar __ovld __cnfn convert_uchar_rte(char)
long4 __ovld __cnfn convert_long4_sat_rtn(char4)
int __ovld __conv work_group_reduce_min(int x)
unsigned int workDimension
cl_mem_fence_flags __ovld get_fence(const void *ptr)
char3 __ovld __cnfn convert_char3_sat_rtp(char3)
ushort4 __ovld __cnfn convert_ushort4(char4)
long2 __ovld __cnfn convert_long2_rte(char2)
int2 __ovld __cnfn convert_int2(char2)
long __ovld __cnfn convert_long(char)
float __ovld __cnfn native_recip(float x)
Compute reciprocal over an implementation-defined range.
void __ovld vstore_half3_rte(float3 data, size_t offset, half *p)
float __ovld __cnfn asin(float)
Arc sine function.
ulong16 __ovld __cnfn convert_ulong16_rte(char16)
long4 __ovld __cnfn convert_long4_sat(char4)
ushort16 __ovld __cnfn convert_ushort16(char16)
float16 __ovld vload_half16(size_t offset, const __constant half *p)
short3 __ovld __cnfn convert_short3(char3)
void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p)
int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order)
uchar __ovld __cnfn abs(char x)
Returns | x |.
int8 __ovld __cnfn convert_int8_sat(char8)
int2 __ovld __cnfn convert_int2_rtz(char2)
uint8 __ovld __cnfn convert_uint8_sat_rtz(char8)
float __ovld __cnfn native_sqrt(float x)
Compute square root over an implementation-defined range.
int __ovld atomic_add(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float8 __ovld __cnfn convert_float8_rtz(char8)
ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2)
int __ovld __cnfn convert_int_rtz(char)
ulong3 __ovld __cnfn convert_ulong3_rte(char3)
ulong3 __ovld __cnfn convert_ulong3_rtz(char3)
char3 __ovld __cnfn convert_char3_rtz(char3)
uint __ovld __cnfn get_work_dim(void)
Returns the number of dimensions in use.
long2 __ovld __cnfn convert_long2_sat(char2)
float __ovld __cnfn length(float p)
Return the length of vector p, i.e., sqrt(p.x2 + p.y 2 + ...)
float __ovld __cnfn acosh(float)
Inverse hyperbolic cosine.
ulong __ovld __cnfn convert_ulong_rtn(char)
short __ovld __cnfn convert_short_sat_rtz(char)
__UINTPTR_TYPE__ uintptr_t
An unsigned integer type with the property that any valid pointer to void can be converted to this ty...
uint2 __ovld __cnfn convert_uint2_rtn(char2)
float __ovld __cnfn hypot(float x, float y)
Compute the value of the square root of x^2 + y^2 without undue overflow or underflow.
uint8 __ovld __cnfn convert_uint8_rte(char8)
float __ovld __cnfn atan2(float y, float x)
Arc tangent of y / x.
float __ovld remquo(float x, float y, int *quo)
The remquo function computes the value r such that r = x - n*y, where n is the integer nearest the ex...
int __ovld atomic_exchange(volatile atomic_int *object, int desired)
char8 __ovld __cnfn convert_char8_rtn(char8)
uint8 __ovld __cnfn convert_uint8_sat_rte(char8)
void __ovld vstorea_half3(float3 data, size_t offset, half *p)
int16 __ovld __cnfn convert_int16_rte(char16)
char16 __ovld __cnfn convert_char16_rte(char16)
long __ovld __cnfn convert_long_rtn(char)
ushort4 __ovld __cnfn convert_ushort4_sat(char4)
int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order)
ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2)
char4 __ovld __cnfn convert_char4_sat(char4)
char16 __ovld __cnfn convert_char16_sat_rte(char16)
float __ovld __cnfn acospi(float x)
Compute acos (x) / PI.
uint16 __ovld __cnfn convert_uint16_rtn(char16)
float2 __ovld __cnfn convert_float2_rtn(char2)
float __ovld __cnfn half_exp2(float x)
Compute the base- 2 exponential of x.
float4 __ovld __cnfn convert_float4_rte(char4)
long4 __ovld __cnfn convert_long4_rte(char4)
uint16 __ovld __cnfn convert_uint16_sat_rtp(char16)
uint3 __ovld __cnfn convert_uint3_sat_rte(char3)
uint4 __ovld __cnfn convert_uint4_sat(char4)
int3 __ovld __cnfn convert_int3_sat_rte(char3)
ulong3 __ovld __cnfn convert_ulong3_rtp(char3)
float __ovld __cnfn native_log(float x)
Compute natural logarithm over an implementationdefined range.
float __ovld __cnfn rootn(float x, int y)
Compute x to the power 1/y.
void __ovld vstorea_half_rte(float data, size_t offset, half *p)
short __ovld __cnfn upsample(char hi, uchar lo)
result[i] = ((short)hi[i] << 8) | lo[i] result[i] = ((ushort)hi[i] << 8) | lo[i]
char __ovld __cnfn bitselect(char a, char b, char c)
Each bit of the result is the corresponding bit of a if the corresponding bit of c is 0...
short __ovld __cnfn convert_short_rtn(char)
void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p)
int __ovld __cnfn convert_int_sat_rtn(char)
void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p)
char3 __ovld __cnfn convert_char3_sat_rte(char3)
uchar16 __ovld __cnfn convert_uchar16(char16)
uint4 __ovld __cnfn convert_uint4_sat_rtp(char4)
ushort8 __ovld __cnfn convert_ushort8(char8)
short8 __ovld __cnfn convert_short8(char8)
float __ovld __cnfn mix(float x, float y, float a)
Returns the linear blend of x & y implemented as: x + (y - x) * a a must be a value in the range 0...
char3 __ovld __cnfn convert_char3_sat_rtz(char3)
float __ovld __cnfn fmin(float x, float y)
Returns y if y < x, otherwise it returns x.
float4 __ovld __cnfn convert_float4_rtz(char4)
ushort16 __ovld __cnfn convert_ushort16_rte(char16)
void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p)
uchar2 __ovld __cnfn convert_uchar2(char2)
ulong __ovld __cnfn convert_ulong_sat_rtz(char)
char __ovld ctz(char x)
Returns the count of trailing 0-bits in x.
int8 __ovld __cnfn convert_int8_sat_rtp(char8)
ulong2 __ovld __cnfn convert_ulong2_rtp(char2)
char3 __ovld __cnfn convert_char3_rte(char3)
uint4 __ovld __cnfn convert_uint4_sat_rtn(char4)
char4 __ovld __cnfn convert_char4_rtz(char4)
bool __ovld is_valid_event(clk_event_t event)
ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8)
uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4)
void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p)
float __ovld __cnfn sinh(float)
Compute hyperbolic sine.
char2 __ovld vload2(size_t offset, const __constant char *p)
Use generic type gentype to indicate the built-in data types char, uchar, short, ushort, int, uint, long, ulong, float, double or half.
int __ovld __conv work_group_reduce_add(int x)
long16 __ovld __cnfn convert_long16_rtn(char16)
ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3)
ulong8 __ovld __cnfn convert_ulong8_rtn(char8)
long4 __ovld __cnfn convert_long4_sat_rtp(char4)
int __ovld __conv work_group_any(int predicate)
int __ovld __cnfn any(char x)
Returns 1 if the most significant bit in any component of x is set; otherwise returns 0...
void __ovld set_user_event_status(clk_event_t e, int state)
char __ovld __cnfn convert_char(char)
unsigned int uint
An unsigned 32-bit integer.
int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order)
short16 __ovld __cnfn convert_short16(char16)
uchar16 __ovld __cnfn convert_uchar16_rte(char16)
float __ovld __cnfn half_cos(float x)
Compute cosine.
size_t __ovld __cnfn get_num_groups(uint dimindx)
Returns the number of work-groups that will execute a kernel for dimension identified by dimindx...
ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3)
float __ovld __cnfn native_sin(float x)
Compute sine over an implementation-defined range.
void __ovld vstorea_half16(float16 data, size_t offset, half *p)
float __ovld __cnfn native_log10(float x)
Compute a base 10 logarithm over an implementationdefined range.
short8 __ovld __cnfn convert_short8_rte(char8)
ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4)
size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array)
Return the image array size.
float __ovld frexp(float x, int *exp)
Extract mantissa and exponent from x.
long2 __ovld __cnfn convert_long2_sat_rtp(char2)
char8 __ovld __cnfn convert_char8_sat_rte(char8)
float __ovld __cnfn maxmag(float x, float y)
Returns x if | x | > | y |, y if | y | > | x |, otherwise fmax(x, y).
int __ovld __cnfn get_image_height(read_only image2d_t image)
Return the image height in pixels.
char3 __ovld __cnfn convert_char3_rtp(char3)
uint4 __ovld __cnfn convert_uint4_rtp(char4)
uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8)
char3 __ovld vload3(size_t offset, const __constant char *p)
int __ovld __conv work_group_scan_inclusive_add(int x)
uint2 __ovld __cnfn convert_uint2_rte(char2)
int __ovld atomic_fetch_max(volatile atomic_int *object, int operand)
char __ovld __cnfn select(char a, char b, char c)
For each component of a vector type, result[i] = if MSB of c[i] is set ? b[i] : a[i].
ulong __ovld __cnfn convert_ulong_sat(char)
void __ovld atomic_init(volatile atomic_int *object, int value)
float __ovld __cnfn sqrt(float)
Compute square root.
int __ovld __cnfn get_image_depth(read_only image3d_t image)
Return the image depth in pixels.
uint __ovld __cnfn convert_uint_rtz(char)
long3 __ovld __cnfn convert_long3_sat_rtp(char3)
void __ovld vstore_half4(float4 data, size_t offset, half *p)
float __ovld __cnfn fast_normalize(float p)
Returns a vector in the same direction as p but with a length of 1.
uint3 __ovld __cnfn convert_uint3_sat_rtz(char3)
long __ovld __cnfn convert_long_sat_rtn(char)
char __ovld __cnfn rotate(char v, char i)
For each element in v, the bits are shifted left by the number of bits given by the corresponding ele...
char16 __ovld __cnfn convert_char16_sat_rtz(char16)
short2 __ovld __cnfn convert_short2(char2)
uint __ovld __cnfn convert_uint_sat(char)
float __ovld __cnfn native_cos(float x)
Compute cosine over an implementation-defined range.
ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8)
ulong16 __ovld __cnfn convert_ulong16_rtp(char16)
char8 __ovld __cnfn convert_char8(char8)
__PTRDIFF_TYPE__ ptrdiff_t
A signed integer type that is the result of subtracting two pointers.
uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3)
float __ovld __cnfn ldexp(float x, int n)
Multiply x by 2 to the power n.
ushort2 __ovld __cnfn convert_ushort2(char2)
size_t __ovld __cnfn get_global_offset(uint dimindx)
get_global_offset returns the offset values specified in global_work_offset argument to clEnqueueNDRa...
short16 __ovld __cnfn convert_short16_sat_rtz(char16)
uint2 __ovld __cnfn convert_uint2(char2)
ulong2 __ovld __cnfn convert_ulong2_rte(char2)
int16 __ovld __cnfn convert_int16_rtz(char16)
int8 __ovld __cnfn convert_int8_rte(char8)
uchar3 __ovld __cnfn convert_uchar3(char3)
ulong4 __ovld __cnfn convert_ulong4(char4)
int2 __ovld __cnfn convert_int2_rte(char2)
ulong8 __ovld __cnfn convert_ulong8_sat(char8)
float __ovld __cnfn convert_float_rte(char)
void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p)
float3 __ovld __cnfn convert_float3(char3)
float __ovld __cnfn half_sin(float x)
Compute sine.
uchar16 __ovld __cnfn convert_uchar16_rtp(char16)
uint2 __ovld __cnfn convert_uint2_rtp(char2)
char8 __ovld __cnfn convert_char8_sat_rtn(char8)
ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3)
short2 __ovld __cnfn convert_short2_rtp(char2)
ulong3 __ovld __cnfn convert_ulong3_sat(char3)
float __ovld vload_half(size_t offset, const __constant half *p)
Read sizeof (half) bytes of data from address (p + offset).
int2 __ovld __cnfn convert_int2_sat(char2)
int __ovld __conv work_group_scan_exclusive_add(int x)
ushort2 __ovld __cnfn convert_ushort2_rtn(char2)
void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p)
int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image)
uchar3 __ovld __cnfn convert_uchar3_rte(char3)
short __ovld __cnfn convert_short_sat_rtn(char)
ushort8 __ovld __cnfn convert_ushort8_rtn(char8)
float __ovld __cnfn half_powr(float x, float y)
Compute x to the power y, where x is >= 0.
char3 __ovld __cnfn convert_char3_sat_rtn(char3)
short3 __ovld __cnfn convert_short3_rtp(char3)
uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord)
float __ovld __cnfn native_divide(float x, float y)
Compute x / y over an implementation-defined range.
char __ovld __cnfn sub_sat(char x, char y)
Returns x - y and saturates the result.
uint8 __ovld __cnfn convert_uint8_sat_rtp(char8)
int4 __ovld __cnfn convert_int4_rtz(char4)
uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16)
float4 __ovld __cnfn convert_float4_rtn(char4)
float __ovld __cnfn dot(float p0, float p1)
Compute dot product.
char __ovld __cnfn convert_char_sat_rtp(char)
uint8 __ovld __cnfn convert_uint8_rtz(char8)
long __ovld __cnfn convert_long_sat(char)
ushort2 __ovld __cnfn convert_ushort2_rtz(char2)
int __ovld __cnfn convert_int_rtn(char)
int kernel_enqueue_flags_t
long8 __ovld __cnfn convert_long8_sat_rtz(char8)
float2 __ovld __cnfn convert_float2_rte(char2)
long3 __ovld __cnfn convert_long3_sat_rte(char3)
long4 __ovld __cnfn convert_long4(char4)
int4 __ovld __cnfn convert_int4_sat_rte(char4)
ushort __ovld __cnfn convert_ushort_rtz(char)
short8 __ovld __cnfn convert_short8_sat_rte(char8)
float __ovld __cnfn fdim(float x, float y)
x - y if x > y, +0 if x is less than or equal to y.
int __ovld __cnfn convert_int_sat_rtz(char)
ulong3 __ovld __cnfn convert_ulong3_rtn(char3)
ndrange_t __ovld ndrange_3D(const size_t[3])
int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order)
long8 __ovld __cnfn convert_long8_rte(char8)
size_t __ovld __cnfn get_local_size(uint dimindx)
Returns the number of local work-items specified in dimension identified by dimindx.
ushort8 __ovld __cnfn convert_ushort8_rtz(char8)
ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3)
ushort16 __ovld __cnfn convert_ushort16_rtp(char16)
char8 __ovld __cnfn convert_char8_sat_rtz(char8)
bool __ovld is_valid_reserve_id(reserve_id_t reserve_id)
int printf(__constant const char *st,...)
long4 __ovld __cnfn convert_long4_rtp(char4)
ulong2 __ovld __cnfn convert_ulong2_sat(char2)
float __ovld __cnfn half_log2(float x)
Compute a base 2 logarithm.
char char2 __attribute__((ext_vector_type(2)))
float __ovld __cnfn sign(float x)
Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x = +0.0, or -1.0 if x < 0.
int2 __ovld __cnfn convert_int2_sat_rtn(char2)
long16 __ovld __cnfn convert_long16_rtz(char16)
float __ovld __cnfn convert_float_rtp(char)
void __ovld vstore_half2(float2 data, size_t offset, half *p)
The floatn value given by data is converted to a halfn value using the appropriate rounding mode...
float __ovld __cnfn fma(float a, float b, float c)
Returns the correctly rounded floating-point representation of the sum of c with the infinitely preci...
ulong4 __ovld __cnfn convert_ulong4_rte(char4)
float __ovld __cnfn fast_distance(float p0, float p1)
Returns fast_length(p0 - p1).
int __ovld __cnfn convert_int_sat(char)
uchar8 __ovld __cnfn convert_uchar8_rtp(char8)
short2 __ovld __cnfn convert_short2_sat_rtn(char2)
short8 __ovld __cnfn convert_short8_sat(char8)
int8 __ovld __cnfn convert_int8_sat_rte(char8)
ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8)
int16 __ovld __cnfn convert_int16_sat_rte(char16)
short3 __ovld __cnfn convert_short3_rtz(char3)
float __ovld __cnfn half_tan(float x)
Compute tangent.
uint4 __ovld __cnfn convert_uint4_rtn(char4)
event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event)
Perform an async gather of num_elements gentype elements from src to dst.
short __ovld __cnfn convert_short_rte(char)
short2 __ovld __cnfn convert_short2_sat_rtp(char2)
long16 __ovld __cnfn convert_long16_sat_rtn(char16)
ulong8 __ovld __cnfn convert_ulong8(char8)
void __ovld vstore_half4_rte(float4 data, size_t offset, half *p)
char2 __ovld __cnfn convert_char2_sat_rtz(char2)
short3 __ovld __cnfn convert_short3_sat_rtz(char3)
ulong4 __ovld __cnfn convert_ulong4_rtn(char4)
char __ovld __cnfn convert_char_rtz(char)
short4 __ovld __cnfn convert_short4_sat(char4)
short4 __ovld __cnfn convert_short4_sat_rte(char4)
ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2)
float __ovld __cnfn step(float edge, float x)
Returns 0.0 if x < edge, otherwise it returns 1.0.
ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8)
uchar __ovld __cnfn convert_uchar_sat(char)
float __ovld __cnfn exp2(float)
Exponential base 2 function.
float __ovld __cnfn cosh(float)
Compute hyperbolic cosine.
char __ovld __cnfn convert_char_sat(char)
ushort4 __ovld __cnfn convert_ushort4_rtp(char4)
float16 __ovld vloada_half16(size_t offset, const __constant half *p)
ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3)
float16 __ovld __cnfn convert_float16(char16)
void __ovld atomic_store(volatile atomic_int *object, int desired)
int3 __ovld __cnfn convert_int3_rtn(char3)
short8 __ovld __cnfn convert_short8_sat_rtn(char8)
float __ovld __cnfn floor(float)
Round to integral value using the round to -ve infinity rounding mode.
uint2 __ovld __cnfn convert_uint2_sat_rte(char2)
void __ovld vstorea_half8(float8 data, size_t offset, half *p)
long8 __ovld __cnfn convert_long8_sat_rtp(char8)
long2 __ovld __cnfn convert_long2_rtn(char2)
int16 __ovld __cnfn convert_int16(char16)
ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2)
ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4)
ushort3 __ovld __cnfn convert_ushort3_rtp(char3)
void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p)
long3 __ovld __cnfn convert_long3_sat(char3)
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p)
short8 __ovld __cnfn convert_short8_rtz(char8)
float __ovld __cnfn lgamma(float x)
Log gamma function.
void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p)
uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4)
uchar2 __ovld __cnfn convert_uchar2_rtn(char2)
ushort __ovld __cnfn convert_ushort_sat_rtz(char)
int __ovld __cnfn convert_int_sat_rte(char)
short2 __ovld __cnfn convert_short2_rtn(char2)
float2 __ovld __cnfn convert_float2_rtz(char2)
short2 __ovld __cnfn convert_short2_sat_rte(char2)
uchar __ovld __cnfn convert_uchar_sat_rtn(char)
float __ovld __cnfn atan2pi(float y, float x)
Compute atan2 (y, x) / PI.
uint16 __ovld __cnfn convert_uint16_sat_rtn(char16)
void __ovld vstorea_half(float data, size_t offset, half *p)
The floatn value given by data is converted to a halfn value using the appropriate rounding mode...
int4 __ovld __cnfn convert_int4_rtn(char4)
int __ovld __cnfn convert_int_sat_rtp(char)
uint16 __ovld __cnfn convert_uint16_sat_rtz(char16)
short __ovld __cnfn convert_short(char)
float __ovld lgamma_r(float x, int *signp)
ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2)
ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8)
unsigned char uchar
An unsigned 8-bit integer.
int8 __ovld __cnfn convert_int8_sat_rtz(char8)
uchar16 __ovld __cnfn convert_uchar16_sat(char16)
ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3)
void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p)
float __ovld __cnfn atanpi(float x)
Compute atan (x) / PI.
void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p)
ulong __ovld __cnfn convert_ulong_rte(char)
void __ovld retain_event(clk_event_t)
short16 __ovld __cnfn convert_short16_sat_rte(char16)
char4 __ovld __cnfn convert_char4_rtp(char4)
int16 __ovld __cnfn convert_int16_rtn(char16)
short16 __ovld __cnfn convert_short16_sat(char16)
void __ovld vstore_half2_rte(float2 data, size_t offset, half *p)
int3 __ovld __cnfn convert_int3_sat_rtp(char3)
void __ovld vstorea_half_rtn(float data, size_t offset, half *p)
int __ovld atomic_dec(volatile __global int *p)
Read the 32-bit value (referred to as old) stored at location pointed by p.
char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask)
short4 __ovld __cnfn convert_short4_sat_rtz(char4)
float __ovld vloada_half(size_t offset, const __constant half *p)
For n = 1, 2, 4, 8 and 16 read sizeof (halfn) bytes of data from address (p + (offset * n))...
int __ovld __cnfn isgreaterequal(float x, float y)
Returns the component-wise compare of x >= y.
int __ovld enqueue_marker(queue_t, uint, const __private clk_event_t *, __private clk_event_t *)
void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p)
uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8)
long3 __ovld __cnfn convert_long3_rtz(char3)
short4 __ovld __cnfn convert_short4_rtz(char4)
float4 __ovld vload_half4(size_t offset, const __constant half *p)
uint8 __ovld __cnfn convert_uint8(char8)
uchar4 __ovld __cnfn convert_uchar4(char4)
ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8)
float3 __ovld __cnfn convert_float3_rtp(char3)
ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16)
short __ovld __cnfn convert_short_sat_rte(char)
uint3 __ovld __cnfn convert_uint3_rtp(char3)
uchar2 __ovld __cnfn convert_uchar2_rtz(char2)
long16 __ovld __cnfn convert_long16(char16)
char __ovld __cnfn add_sat(char x, char y)
Returns x + y and saturates the result.
float __ovld __cnfn fast_length(float p)
Returns the length of vector p computed as: half_sqrt(p.x2 + p.y2 + ...)
long4 __ovld __cnfn convert_long4_rtn(char4)
int3 __ovld __cnfn convert_int3_sat_rtn(char3)
char __ovld __cnfn rhadd(char x, char y)
Returns (x + y + 1) >> 1.
void __ovld vstorea_half2(float2 data, size_t offset, half *p)
bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected, int desired, memory_order success, memory_order failure)
short2 __ovld __cnfn convert_short2_rtz(char2)
short4 __ovld __cnfn convert_short4_sat_rtn(char4)
char3 __ovld __cnfn convert_char3_rtn(char3)
int __ovld __cnfn isordered(float x, float y)
Test if arguments are ordered.
float __ovld __cnfn cos(float)
Compute cosine.
short2 __ovld __cnfn convert_short2_rte(char2)
long __ovld __cnfn convert_long_rte(char)
short4 __ovld __cnfn convert_short4_sat_rtp(char4)
int __ovld __cnfn isnan(float)
Test for a NaN.
int __ovld __cnfn get_image_channel_order(read_only image1d_t image)
void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p)
long2 __ovld __cnfn convert_long2(char2)
void __ovld atomic_flag_clear(volatile atomic_flag *object)
int __ovld __cnfn isinf(float)
Test for infinity value (+ve or -ve) .
void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p)
uint __ovld __cnfn convert_uint_sat_rtp(char)
ushort __ovld __cnfn convert_ushort(char)
char8 __ovld __cnfn convert_char8_sat_rtp(char8)
int4 __ovld __cnfn convert_int4_rte(char4)
float8 __ovld __cnfn convert_float8_rtp(char8)
float __ovld __cnfn half_log(float x)
Compute natural logarithm.
ushort __ovld __cnfn convert_ushort_sat(char)
ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8)
void __ovld vstore_half(float data, size_t offset, half *p)
The float value given by data is first converted to a half value using the appropriate rounding mode...
int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order)
int __ovld atomic_load(volatile atomic_int *object)
long3 __ovld __cnfn convert_long3_sat_rtn(char3)
ushort4 __ovld __cnfn convert_ushort4_rtz(char4)
char2 __ovld __cnfn convert_char2_sat_rtp(char2)
float __ovld __cnfn half_exp(float x)
Compute the base- e exponential of x.
int __ovld __cnfn isnormal(float)
Test for a normal value.
void __ovld vstore_half8_rte(float8 data, size_t offset, half *p)
long4 __ovld __cnfn convert_long4_sat_rte(char4)
ushort8 __ovld __cnfn convert_ushort8_rte(char8)
ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8)
int __ovld atomic_fetch_and(volatile atomic_int *object, int operand)
float __ovld __cnfn sinpi(float x)
Compute sin (PI * x).
int16 __ovld __cnfn convert_int16_sat_rtn(char16)
unsigned long ulong
An unsigned 64-bit integer.
float __ovld __cnfn atanh(float)
Hyperbolic arc tangent.
void __ovld wait_group_events(int num_events, event_t *event_list)
Wait for events that identify the async_work_group_copy operations to complete.
size_t __ovld get_local_linear_id(void)
short4 __ovld __cnfn convert_short4_rte(char4)
char4 __ovld __cnfn convert_char4_sat_rte(char4)
int __ovld atomic_sub(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float __ovld __cnfn half_recip(float x)
Compute reciprocal.
ushort2 __ovld __cnfn convert_ushort2_sat(char2)
ulong __ovld __cnfn convert_ulong(char)
float __ovld __cnfn pow(float x, float y)
Compute x to the power y.
int8 __ovld __cnfn convert_int8_sat_rtn(char8)
void __ovld release_event(clk_event_t)
char __ovld __cnfn convert_char_rtn(char)
ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16)
int __ovld __cnfn convert_int_rtp(char)
float16 __ovld __cnfn convert_float16_rte(char16)
int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order)
void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p)
short __ovld __cnfn convert_short_sat_rtp(char)
float8 __ovld __cnfn convert_float8_rtn(char8)
void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p)
uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16)
int __ovld atomic_max(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
uint8 __ovld __cnfn convert_uint8_sat_rtn(char8)
float3 __ovld vloada_half3(size_t offset, const __constant half *p)
float __ovld __cnfn convert_float(char)
char2 __ovld __cnfn convert_char2_sat_rtn(char2)