23 #ifndef __CLANG_CUDA_CMATH_H__
24 #define __CLANG_CUDA_CMATH_H__
26 #error "This file is for CUDA compilation only."
47 #define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))
65 return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
69 return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
73 return ::frexpf(__arg, __exp);
91 return __builtin_isgreater(__x, __y);
94 return __builtin_isgreater(__x, __y);
97 return __builtin_isgreaterequal(__x, __y);
100 return __builtin_isgreaterequal(__x, __y);
103 return __builtin_isless(__x, __y);
106 return __builtin_isless(__x, __y);
109 return __builtin_islessequal(__x, __y);
112 return __builtin_islessequal(__x, __y);
115 return __builtin_islessgreater(__x, __y);
118 return __builtin_islessgreater(__x, __y);
123 return __builtin_isunordered(__x, __y);
126 return __builtin_isunordered(__x, __y);
129 return ::ldexpf(__arg, __exp);
135 return __builtin_nexttowardf(__from, __to);
138 return __builtin_nexttoward(__from, __to);
141 return __builtin_nexttowardf(__from, __to);
144 return ::powf(__base, __exp);
147 return ::powif(__base, __iexp);
150 return ::powi(__base, __iexp);
175 template<
bool __B,
class __T =
void>
184 #define __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(__retty, __fn) \
185 template <typename __T> \
187 typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer, \
190 return ::__fn((double)__x); \
198 #define __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(__retty, __fn) \
199 template <typename __T1, typename __T2> \
200 __DEVICE__ typename __clang_cuda_enable_if< \
201 std::numeric_limits<__T1>::is_specialized && \
202 std::numeric_limits<__T2>::is_specialized, \
204 __fn(__T1 __x, __T2 __y) { \
205 return __fn((double)__x, (double)__y); \
269 #undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_1
270 #undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_2
274 template <
typename __T1,
typename __T2,
typename __T3>
276 std::numeric_limits<__T1>::is_specialized &&
277 std::numeric_limits<__T2>::is_specialized &&
278 std::numeric_limits<__T3>::is_specialized,
281 return std::fma((
double)__x, (
double)__y, (
double)__z);
284 template <
typename __T>
291 template <
typename __T>
292 __DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
298 template <
typename __T>
299 __DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
305 template <
typename __T1,
typename __T2>
307 std::numeric_limits<__T1>::is_specialized &&
308 std::numeric_limits<__T2>::is_specialized,
311 return std::remquo((
double)__x, (
double)__y, __quo);
314 template <
typename __T>
315 __DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
321 template <
typename __T>
322 __DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
332 #ifdef _LIBCPP_BEGIN_NAMESPACE_STD
333 _LIBCPP_BEGIN_NAMESPACE_STD
336 #ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
337 _GLIBCXX_BEGIN_NAMESPACE_VERSION
476 #ifdef _LIBCPP_END_NAMESPACE_STD
477 _LIBCPP_END_NAMESPACE_STD
479 #ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
480 _GLIBCXX_END_NAMESPACE_VERSION
static __inline unsigned char unsigned int unsigned int __y
__DEVICE__ __clang_cuda_enable_if< std::numeric_limits< __T1 >::is_specialized &&std::numeric_limits< __T2 >::is_specialized, double >::type remquo(__T1 __x, __T2 __y, int *__quo)
__DEVICE__ bool signbit(float __x)
Test for sign bit.
__DEVICE__ float sinh(float __x)
Compute hyperbolic sine.
__DEVICE__ float atan2(float __x, float __y)
Arc tangent of y / x.
__DEVICE__ long long abs(long long __n)
__DEVICE__ float floor(float __x)
Round to integral value using the round to -ve infinity rounding mode.
__DEVICE__ float sqrt(float __x)
Compute square root.
__DEVICE__ float modf(float __x, float *__iptr)
Decompose a floating-point number.
__DEVICE__ float asin(float __x)
Arc sine function.
__DEVICE__ float ceil(float __x)
Round to integral value using the round to positive infinity rounding mode.
__DEVICE__ bool isinf(float __x)
Test for infinity value (+ve or -ve) .
#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(__retty, __fn)
#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(__retty, __fn)
__DEVICE__ float acos(float __x)
Arc cosine function.
__DEVICE__ float log(float __x)
Compute natural logarithm.
__DEVICE__ int fpclassify(float __x)
__DEVICE__ float sin(float __x)
Compute sine.
__DEVICE__ long labs(long)
#define remainder(__x, __y)
__DEVICE__ bool isfinite(float __x)
Test for finite value.
__DEVICE__ float fmod(float __x, float __y)
Modulus.
__DEVICE__ float tan(float __x)
Compute tangent.
__DEVICE__ bool islessequal(float __x, float __y)
Returns the component-wise compare of x <= y.
__DEVICE__ float ldexp(float __arg, int __exp)
Multiply x by 2 to the power n.
#define copysign(__x, __y)
__DEVICE__ float fabs(float __x)
Compute absolute value of a floating-point number.
__DEVICE__ float nexttoward(float __from, double __to)
__DEVICE__ bool isunordered(float __x, float __y)
Test if arguments are unordered.
__DEVICE__ float frexp(float __arg, int *__exp)
Extract mantissa and exponent from x.
static __inline unsigned char unsigned int __x
__DEVICE__ __clang_cuda_enable_if< std::numeric_limits< __T >::is_integer, double >::type scalbln(__T __x, long __exp)
__DEVICE__ bool isgreaterequal(float __x, float __y)
Returns the component-wise compare of x >= y.
__DEVICE__ float cos(float __x)
Compute cosine.
__DEVICE__ float log10(float __x)
Compute a base 10 logarithm.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
__DEVICE__ bool isnan(float __x)
Test for a NaN.
__DEVICE__ bool isgreater(float __x, float __y)
Returns the component-wise compare of x > y.
__DEVICE__ float atan(float __x)
Arc tangent function.
__DEVICE__ bool islessgreater(float __x, float __y)
Returns the component-wise compare of (x < y) || (x > y) .
__DEVICE__ long long llround(float)
__DEVICE__ long long llabs(long long)
__DEVICE__ __clang_cuda_enable_if< std::numeric_limits< __T >::is_integer, double >::type scalbn(__T __x, int __exp)
#define nextafter(__x, __y)
__DEVICE__ float cosh(float __x)
Compute hyperbolic cosine.
__DEVICE__ float pow(float __base, float __exp)
Compute x to the power y.
__DEVICE__ __clang_cuda_enable_if< std::numeric_limits< __T1 >::is_specialized &&std::numeric_limits< __T2 >::is_specialized &&std::numeric_limits< __T3 >::is_specialized, double >::type fma(__T1 __x, __T2 __y, __T3 __z)
__DEVICE__ float nexttowardf(float __from, double __to)
__DEVICE__ bool isless(float __x, float __y)
Returns the component-wise compare of x < y.
__DEVICE__ float tanh(float __x)
Compute hyperbolic tangent.
__DEVICE__ float exp(float __x)
Compute the base e exponential function of x.
__DEVICE__ bool isnormal(float __x)
Test for a normal value.