25 #error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead." 28 #ifndef __AVX512VLINTRIN_H 29 #define __AVX512VLINTRIN_H 31 #define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(128))) 32 #define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(256))) 40 #define _mm_cmpeq_epi32_mask(A, B) \ 41 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) 42 #define _mm_mask_cmpeq_epi32_mask(k, A, B) \ 43 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) 44 #define _mm_cmpge_epi32_mask(A, B) \ 45 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) 46 #define _mm_mask_cmpge_epi32_mask(k, A, B) \ 47 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) 48 #define _mm_cmpgt_epi32_mask(A, B) \ 49 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) 50 #define _mm_mask_cmpgt_epi32_mask(k, A, B) \ 51 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) 52 #define _mm_cmple_epi32_mask(A, B) \ 53 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) 54 #define _mm_mask_cmple_epi32_mask(k, A, B) \ 55 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) 56 #define _mm_cmplt_epi32_mask(A, B) \ 57 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) 58 #define _mm_mask_cmplt_epi32_mask(k, A, B) \ 59 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) 60 #define _mm_cmpneq_epi32_mask(A, B) \ 61 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) 62 #define _mm_mask_cmpneq_epi32_mask(k, A, B) \ 63 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) 65 #define _mm256_cmpeq_epi32_mask(A, B) \ 66 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) 67 #define _mm256_mask_cmpeq_epi32_mask(k, A, B) \ 68 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) 69 #define _mm256_cmpge_epi32_mask(A, B) \ 70 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) 71 #define _mm256_mask_cmpge_epi32_mask(k, A, B) \ 72 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) 73 #define _mm256_cmpgt_epi32_mask(A, B) \ 74 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) 75 #define _mm256_mask_cmpgt_epi32_mask(k, A, B) \ 76 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) 77 #define _mm256_cmple_epi32_mask(A, B) \ 78 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) 79 #define _mm256_mask_cmple_epi32_mask(k, A, B) \ 80 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) 81 #define _mm256_cmplt_epi32_mask(A, B) \ 82 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) 83 #define _mm256_mask_cmplt_epi32_mask(k, A, B) \ 84 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) 85 #define _mm256_cmpneq_epi32_mask(A, B) \ 86 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) 87 #define _mm256_mask_cmpneq_epi32_mask(k, A, B) \ 88 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) 90 #define _mm_cmpeq_epu32_mask(A, B) \ 91 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) 92 #define _mm_mask_cmpeq_epu32_mask(k, A, B) \ 93 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) 94 #define _mm_cmpge_epu32_mask(A, B) \ 95 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) 96 #define _mm_mask_cmpge_epu32_mask(k, A, B) \ 97 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) 98 #define _mm_cmpgt_epu32_mask(A, B) \ 99 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) 100 #define _mm_mask_cmpgt_epu32_mask(k, A, B) \ 101 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) 102 #define _mm_cmple_epu32_mask(A, B) \ 103 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) 104 #define _mm_mask_cmple_epu32_mask(k, A, B) \ 105 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) 106 #define _mm_cmplt_epu32_mask(A, B) \ 107 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) 108 #define _mm_mask_cmplt_epu32_mask(k, A, B) \ 109 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) 110 #define _mm_cmpneq_epu32_mask(A, B) \ 111 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) 112 #define _mm_mask_cmpneq_epu32_mask(k, A, B) \ 113 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) 115 #define _mm256_cmpeq_epu32_mask(A, B) \ 116 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) 117 #define _mm256_mask_cmpeq_epu32_mask(k, A, B) \ 118 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) 119 #define _mm256_cmpge_epu32_mask(A, B) \ 120 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) 121 #define _mm256_mask_cmpge_epu32_mask(k, A, B) \ 122 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) 123 #define _mm256_cmpgt_epu32_mask(A, B) \ 124 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) 125 #define _mm256_mask_cmpgt_epu32_mask(k, A, B) \ 126 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) 127 #define _mm256_cmple_epu32_mask(A, B) \ 128 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) 129 #define _mm256_mask_cmple_epu32_mask(k, A, B) \ 130 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) 131 #define _mm256_cmplt_epu32_mask(A, B) \ 132 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) 133 #define _mm256_mask_cmplt_epu32_mask(k, A, B) \ 134 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) 135 #define _mm256_cmpneq_epu32_mask(A, B) \ 136 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) 137 #define _mm256_mask_cmpneq_epu32_mask(k, A, B) \ 138 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) 140 #define _mm_cmpeq_epi64_mask(A, B) \ 141 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) 142 #define _mm_mask_cmpeq_epi64_mask(k, A, B) \ 143 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) 144 #define _mm_cmpge_epi64_mask(A, B) \ 145 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) 146 #define _mm_mask_cmpge_epi64_mask(k, A, B) \ 147 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) 148 #define _mm_cmpgt_epi64_mask(A, B) \ 149 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) 150 #define _mm_mask_cmpgt_epi64_mask(k, A, B) \ 151 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) 152 #define _mm_cmple_epi64_mask(A, B) \ 153 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) 154 #define _mm_mask_cmple_epi64_mask(k, A, B) \ 155 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) 156 #define _mm_cmplt_epi64_mask(A, B) \ 157 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) 158 #define _mm_mask_cmplt_epi64_mask(k, A, B) \ 159 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) 160 #define _mm_cmpneq_epi64_mask(A, B) \ 161 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) 162 #define _mm_mask_cmpneq_epi64_mask(k, A, B) \ 163 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) 165 #define _mm256_cmpeq_epi64_mask(A, B) \ 166 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) 167 #define _mm256_mask_cmpeq_epi64_mask(k, A, B) \ 168 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) 169 #define _mm256_cmpge_epi64_mask(A, B) \ 170 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) 171 #define _mm256_mask_cmpge_epi64_mask(k, A, B) \ 172 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) 173 #define _mm256_cmpgt_epi64_mask(A, B) \ 174 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) 175 #define _mm256_mask_cmpgt_epi64_mask(k, A, B) \ 176 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) 177 #define _mm256_cmple_epi64_mask(A, B) \ 178 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) 179 #define _mm256_mask_cmple_epi64_mask(k, A, B) \ 180 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) 181 #define _mm256_cmplt_epi64_mask(A, B) \ 182 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) 183 #define _mm256_mask_cmplt_epi64_mask(k, A, B) \ 184 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) 185 #define _mm256_cmpneq_epi64_mask(A, B) \ 186 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) 187 #define _mm256_mask_cmpneq_epi64_mask(k, A, B) \ 188 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) 190 #define _mm_cmpeq_epu64_mask(A, B) \ 191 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) 192 #define _mm_mask_cmpeq_epu64_mask(k, A, B) \ 193 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) 194 #define _mm_cmpge_epu64_mask(A, B) \ 195 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) 196 #define _mm_mask_cmpge_epu64_mask(k, A, B) \ 197 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) 198 #define _mm_cmpgt_epu64_mask(A, B) \ 199 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) 200 #define _mm_mask_cmpgt_epu64_mask(k, A, B) \ 201 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) 202 #define _mm_cmple_epu64_mask(A, B) \ 203 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) 204 #define _mm_mask_cmple_epu64_mask(k, A, B) \ 205 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) 206 #define _mm_cmplt_epu64_mask(A, B) \ 207 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) 208 #define _mm_mask_cmplt_epu64_mask(k, A, B) \ 209 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) 210 #define _mm_cmpneq_epu64_mask(A, B) \ 211 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) 212 #define _mm_mask_cmpneq_epu64_mask(k, A, B) \ 213 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) 215 #define _mm256_cmpeq_epu64_mask(A, B) \ 216 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) 217 #define _mm256_mask_cmpeq_epu64_mask(k, A, B) \ 218 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) 219 #define _mm256_cmpge_epu64_mask(A, B) \ 220 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) 221 #define _mm256_mask_cmpge_epu64_mask(k, A, B) \ 222 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) 223 #define _mm256_cmpgt_epu64_mask(A, B) \ 224 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) 225 #define _mm256_mask_cmpgt_epu64_mask(k, A, B) \ 226 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) 227 #define _mm256_cmple_epu64_mask(A, B) \ 228 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) 229 #define _mm256_mask_cmple_epu64_mask(k, A, B) \ 230 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) 231 #define _mm256_cmplt_epu64_mask(A, B) \ 232 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) 233 #define _mm256_mask_cmplt_epu64_mask(k, A, B) \ 234 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) 235 #define _mm256_cmpneq_epu64_mask(A, B) \ 236 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) 237 #define _mm256_mask_cmpneq_epu64_mask(k, A, B) \ 238 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) 243 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
251 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
259 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
267 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
275 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
283 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
291 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
299 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
307 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
315 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
323 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
331 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
339 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
347 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
355 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
363 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
371 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
379 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
387 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
395 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
403 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
411 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
419 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
427 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
435 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
443 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
451 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
459 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
467 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
481 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
495 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
510 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
524 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
538 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
552 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
567 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
581 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
595 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
609 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
624 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
638 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
652 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
666 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
681 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
692 #define _mm_cmp_epi32_mask(a, b, p) \ 693 (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ 694 (__v4si)(__m128i)(b), (int)(p), \ 697 #define _mm_mask_cmp_epi32_mask(m, a, b, p) \ 698 (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ 699 (__v4si)(__m128i)(b), (int)(p), \ 702 #define _mm_cmp_epu32_mask(a, b, p) \ 703 (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ 704 (__v4si)(__m128i)(b), (int)(p), \ 707 #define _mm_mask_cmp_epu32_mask(m, a, b, p) \ 708 (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ 709 (__v4si)(__m128i)(b), (int)(p), \ 712 #define _mm256_cmp_epi32_mask(a, b, p) \ 713 (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ 714 (__v8si)(__m256i)(b), (int)(p), \ 717 #define _mm256_mask_cmp_epi32_mask(m, a, b, p) \ 718 (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ 719 (__v8si)(__m256i)(b), (int)(p), \ 722 #define _mm256_cmp_epu32_mask(a, b, p) \ 723 (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ 724 (__v8si)(__m256i)(b), (int)(p), \ 727 #define _mm256_mask_cmp_epu32_mask(m, a, b, p) \ 728 (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ 729 (__v8si)(__m256i)(b), (int)(p), \ 732 #define _mm_cmp_epi64_mask(a, b, p) \ 733 (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ 734 (__v2di)(__m128i)(b), (int)(p), \ 737 #define _mm_mask_cmp_epi64_mask(m, a, b, p) \ 738 (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ 739 (__v2di)(__m128i)(b), (int)(p), \ 742 #define _mm_cmp_epu64_mask(a, b, p) \ 743 (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ 744 (__v2di)(__m128i)(b), (int)(p), \ 747 #define _mm_mask_cmp_epu64_mask(m, a, b, p) \ 748 (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ 749 (__v2di)(__m128i)(b), (int)(p), \ 752 #define _mm256_cmp_epi64_mask(a, b, p) \ 753 (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ 754 (__v4di)(__m256i)(b), (int)(p), \ 757 #define _mm256_mask_cmp_epi64_mask(m, a, b, p) \ 758 (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ 759 (__v4di)(__m256i)(b), (int)(p), \ 762 #define _mm256_cmp_epu64_mask(a, b, p) \ 763 (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ 764 (__v4di)(__m256i)(b), (int)(p), \ 767 #define _mm256_mask_cmp_epu64_mask(m, a, b, p) \ 768 (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ 769 (__v4di)(__m256i)(b), (int)(p), \ 772 #define _mm256_cmp_ps_mask(a, b, p) \ 773 (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ 774 (__v8sf)(__m256)(b), (int)(p), \ 777 #define _mm256_mask_cmp_ps_mask(m, a, b, p) \ 778 (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ 779 (__v8sf)(__m256)(b), (int)(p), \ 782 #define _mm256_cmp_pd_mask(a, b, p) \ 783 (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ 784 (__v4df)(__m256d)(b), (int)(p), \ 787 #define _mm256_mask_cmp_pd_mask(m, a, b, p) \ 788 (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ 789 (__v4df)(__m256d)(b), (int)(p), \ 792 #define _mm_cmp_ps_mask(a, b, p) \ 793 (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ 794 (__v4sf)(__m128)(b), (int)(p), \ 797 #define _mm_mask_cmp_ps_mask(m, a, b, p) \ 798 (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ 799 (__v4sf)(__m128)(b), (int)(p), \ 802 #define _mm_cmp_pd_mask(a, b, p) \ 803 (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ 804 (__v2df)(__m128d)(b), (int)(p), \ 807 #define _mm_mask_cmp_pd_mask(m, a, b, p) \ 808 (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ 809 (__v2df)(__m128d)(b), (int)(p), \ 815 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
816 __builtin_ia32_vfmaddpd ((__v2df) __A,
825 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
826 __builtin_ia32_vfmaddpd ((__v2df) __A,
835 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
836 __builtin_ia32_vfmaddpd ((__v2df) __A,
845 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
846 __builtin_ia32_vfmaddpd ((__v2df) __A,
855 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
856 __builtin_ia32_vfmaddpd ((__v2df) __A,
865 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
866 __builtin_ia32_vfmaddpd (-(__v2df) __A,
875 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
876 __builtin_ia32_vfmaddpd (-(__v2df) __A,
885 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
886 __builtin_ia32_vfmaddpd (-(__v2df) __A,
895 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
896 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
905 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
906 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
915 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
916 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
925 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
926 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
935 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
936 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
945 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
946 __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
955 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
956 __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
965 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
966 __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
975 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
976 __builtin_ia32_vfmaddps ((__v4sf) __A,
985 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
986 __builtin_ia32_vfmaddps ((__v4sf) __A,
995 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
996 __builtin_ia32_vfmaddps ((__v4sf) __A,
1005 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1006 __builtin_ia32_vfmaddps ((__v4sf) __A,
1015 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1016 __builtin_ia32_vfmaddps ((__v4sf) __A,
1025 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1026 __builtin_ia32_vfmaddps (-(__v4sf) __A,
1035 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1036 __builtin_ia32_vfmaddps (-(__v4sf) __A,
1045 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1046 __builtin_ia32_vfmaddps (-(__v4sf) __A,
1055 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1056 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1065 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1066 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1075 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1076 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1085 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1086 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1095 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1096 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1105 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1106 __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
1115 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1116 __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
1125 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1126 __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
1135 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1136 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1145 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1146 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1155 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1156 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1165 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1166 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1175 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1176 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1185 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1186 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1195 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1196 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1205 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1206 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1215 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1216 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1225 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1226 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1235 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1236 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1245 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1246 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1255 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1256 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1265 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1266 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1275 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1276 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1286 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1287 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1296 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1297 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1306 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1307 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1316 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1317 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1326 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1327 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1336 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1337 __builtin_ia32_vfmaddpd ((__v2df) __A,
1346 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1347 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1356 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1357 __builtin_ia32_vfmaddps ((__v4sf) __A,
1366 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1367 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1376 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1377 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1386 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1387 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1396 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1397 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1406 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1407 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1416 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1417 __builtin_ia32_vfmaddpd ((__v2df) __A,
1426 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1427 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1436 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1437 __builtin_ia32_vfmaddps ((__v4sf) __A,
1446 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1447 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1456 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1457 __builtin_ia32_vfmaddpd ((__v2df) __A,
1466 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1467 __builtin_ia32_vfmaddpd ((__v2df) __A,
1476 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1477 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1486 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1487 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1496 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1497 __builtin_ia32_vfmaddps ((__v4sf) __A,
1506 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1507 __builtin_ia32_vfmaddps ((__v4sf) __A,
1516 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1517 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1526 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1527 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1535 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
1542 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
1549 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
1556 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
1563 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1570 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1577 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1584 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1591 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
1598 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
1605 return (__m128d) __builtin_ia32_selectpd_128 ((
__mmask8) __U,
1612 return (__m256d) __builtin_ia32_selectpd_256 ((
__mmask8) __U,
1619 return (__m128) __builtin_ia32_selectps_128 ((
__mmask8) __U,
1626 return (__m256) __builtin_ia32_selectps_256 ((
__mmask8) __U,
1633 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
1640 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
1647 return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
1654 return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
1662 return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
1669 return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
1677 return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
1684 return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
1692 return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
1699 return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
1707 return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
1714 return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
1722 return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
1729 return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
1737 return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
1744 return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
1752 return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
1759 return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
1767 __builtin_ia32_compressstoredf128_mask ((__v2df *) __P,
1774 __builtin_ia32_compressstoredf256_mask ((__v4df *) __P,
1781 __builtin_ia32_compressstoredi128_mask ((__v2di *) __P,
1788 __builtin_ia32_compressstoredi256_mask ((__v4di *) __P,
1795 __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P,
1802 __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P,
1809 __builtin_ia32_compressstoresi128_mask ((__v4si *) __P,
1816 __builtin_ia32_compressstoresi256_mask ((__v8si *) __P,
1823 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
1830 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
1837 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
1844 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
1851 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1858 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1865 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1872 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1879 return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
1886 return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
1894 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1901 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1908 return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
1915 return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
1923 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1930 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1937 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
1945 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
1952 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
1960 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
1968 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
1975 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
1983 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1990 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1997 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2004 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2011 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2018 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2025 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2032 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2039 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
2047 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
2054 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
2062 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
2070 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
2077 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
2085 return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
2092 return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
2100 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2107 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2114 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2122 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2129 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2137 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2145 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2152 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2160 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2167 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2174 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2181 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2188 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2196 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2203 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2211 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2219 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2226 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2234 return (__m128d) __builtin_convertvector(
2235 __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df);
2240 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
2247 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
2254 return (__m256d)__builtin_convertvector((__v4su)__A, __v4df);
2259 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
2266 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
2273 return (__m128)__builtin_convertvector((__v4su)__A, __v4sf);
2278 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2285 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2292 return (__m256)__builtin_convertvector((__v8su)__A, __v8sf);
2297 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2304 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2311 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2318 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2325 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2332 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2339 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2346 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2353 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2360 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2367 return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
2374 return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
2382 return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
2389 return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
2397 return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
2404 return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
2412 return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
2419 return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
2427 return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
2435 return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
2444 return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
2452 return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
2461 return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
2469 return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
2479 return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
2487 return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
2496 return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
2503 return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
2512 return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
2519 return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
2528 return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
2536 return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
2545 return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
2553 return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
2562 return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
2569 return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
2577 return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
2584 return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
2592 return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
2599 return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
2607 return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
2614 return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
2622 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2630 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2637 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2645 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2653 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2660 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2668 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2676 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2683 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2691 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2699 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2706 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2714 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2721 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2728 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2735 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2742 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2749 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2756 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2763 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2770 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2777 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2784 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2791 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2798 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2805 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2812 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2819 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2826 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2833 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2840 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2847 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2854 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2861 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2868 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2875 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2882 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2889 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2896 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2903 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2910 return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
2915 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
2922 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
2929 return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
2934 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
2941 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
2948 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
2955 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
2962 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
2969 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
2976 return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
2981 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
2988 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
2995 return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
3000 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3007 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3014 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3021 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3028 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3035 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3042 return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
3047 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3054 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3061 return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
3066 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3073 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3080 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3087 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3094 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3101 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3108 return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
3113 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3120 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3127 return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
3132 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3139 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3146 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3153 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3160 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3167 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3174 return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
3179 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3186 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3193 return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
3198 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3205 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3210 #define _mm_roundscale_pd(A, imm) \ 3211 (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ 3213 (__v2df)_mm_setzero_pd(), \ 3217 #define _mm_mask_roundscale_pd(W, U, A, imm) \ 3218 (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ 3220 (__v2df)(__m128d)(W), \ 3224 #define _mm_maskz_roundscale_pd(U, A, imm) \ 3225 (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ 3227 (__v2df)_mm_setzero_pd(), \ 3231 #define _mm256_roundscale_pd(A, imm) \ 3232 (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ 3234 (__v4df)_mm256_setzero_pd(), \ 3238 #define _mm256_mask_roundscale_pd(W, U, A, imm) \ 3239 (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ 3241 (__v4df)(__m256d)(W), \ 3245 #define _mm256_maskz_roundscale_pd(U, A, imm) \ 3246 (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ 3248 (__v4df)_mm256_setzero_pd(), \ 3251 #define _mm_roundscale_ps(A, imm) \ 3252 (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ 3253 (__v4sf)_mm_setzero_ps(), \ 3257 #define _mm_mask_roundscale_ps(W, U, A, imm) \ 3258 (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ 3259 (__v4sf)(__m128)(W), \ 3263 #define _mm_maskz_roundscale_ps(U, A, imm) \ 3264 (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ 3265 (__v4sf)_mm_setzero_ps(), \ 3268 #define _mm256_roundscale_ps(A, imm) \ 3269 (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ 3270 (__v8sf)_mm256_setzero_ps(), \ 3273 #define _mm256_mask_roundscale_ps(W, U, A, imm) \ 3274 (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ 3275 (__v8sf)(__m256)(W), \ 3279 #define _mm256_maskz_roundscale_ps(U, A, imm) \ 3280 (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ 3281 (__v8sf)_mm256_setzero_ps(), \ 3286 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3296 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3304 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3313 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3323 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3331 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3340 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3349 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3357 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3366 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3376 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3384 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3391 #define _mm_i64scatter_pd(addr, index, v1, scale) \ 3392 __builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)-1, \ 3393 (__v2di)(__m128i)(index), \ 3394 (__v2df)(__m128d)(v1), (int)(scale)) 3396 #define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) \ 3397 __builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)(mask), \ 3398 (__v2di)(__m128i)(index), \ 3399 (__v2df)(__m128d)(v1), (int)(scale)) 3401 #define _mm_i64scatter_epi64(addr, index, v1, scale) \ 3402 __builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)-1, \ 3403 (__v2di)(__m128i)(index), \ 3404 (__v2di)(__m128i)(v1), (int)(scale)) 3406 #define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) \ 3407 __builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)(mask), \ 3408 (__v2di)(__m128i)(index), \ 3409 (__v2di)(__m128i)(v1), (int)(scale)) 3411 #define _mm256_i64scatter_pd(addr, index, v1, scale) \ 3412 __builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)-1, \ 3413 (__v4di)(__m256i)(index), \ 3414 (__v4df)(__m256d)(v1), (int)(scale)) 3416 #define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) \ 3417 __builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)(mask), \ 3418 (__v4di)(__m256i)(index), \ 3419 (__v4df)(__m256d)(v1), (int)(scale)) 3421 #define _mm256_i64scatter_epi64(addr, index, v1, scale) \ 3422 __builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)-1, \ 3423 (__v4di)(__m256i)(index), \ 3424 (__v4di)(__m256i)(v1), (int)(scale)) 3426 #define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) \ 3427 __builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)(mask), \ 3428 (__v4di)(__m256i)(index), \ 3429 (__v4di)(__m256i)(v1), (int)(scale)) 3431 #define _mm_i64scatter_ps(addr, index, v1, scale) \ 3432 __builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)-1, \ 3433 (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \ 3436 #define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) \ 3437 __builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)(mask), \ 3438 (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \ 3441 #define _mm_i64scatter_epi32(addr, index, v1, scale) \ 3442 __builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)-1, \ 3443 (__v2di)(__m128i)(index), \ 3444 (__v4si)(__m128i)(v1), (int)(scale)) 3446 #define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) \ 3447 __builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)(mask), \ 3448 (__v2di)(__m128i)(index), \ 3449 (__v4si)(__m128i)(v1), (int)(scale)) 3451 #define _mm256_i64scatter_ps(addr, index, v1, scale) \ 3452 __builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)-1, \ 3453 (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \ 3456 #define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) \ 3457 __builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)(mask), \ 3458 (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \ 3461 #define _mm256_i64scatter_epi32(addr, index, v1, scale) \ 3462 __builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)-1, \ 3463 (__v4di)(__m256i)(index), \ 3464 (__v4si)(__m128i)(v1), (int)(scale)) 3466 #define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) \ 3467 __builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)(mask), \ 3468 (__v4di)(__m256i)(index), \ 3469 (__v4si)(__m128i)(v1), (int)(scale)) 3471 #define _mm_i32scatter_pd(addr, index, v1, scale) \ 3472 __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)-1, \ 3473 (__v4si)(__m128i)(index), \ 3474 (__v2df)(__m128d)(v1), (int)(scale)) 3476 #define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) \ 3477 __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)(mask), \ 3478 (__v4si)(__m128i)(index), \ 3479 (__v2df)(__m128d)(v1), (int)(scale)) 3481 #define _mm_i32scatter_epi64(addr, index, v1, scale) \ 3482 __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)-1, \ 3483 (__v4si)(__m128i)(index), \ 3484 (__v2di)(__m128i)(v1), (int)(scale)) 3486 #define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) \ 3487 __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)(mask), \ 3488 (__v4si)(__m128i)(index), \ 3489 (__v2di)(__m128i)(v1), (int)(scale)) 3491 #define _mm256_i32scatter_pd(addr, index, v1, scale) \ 3492 __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)-1, \ 3493 (__v4si)(__m128i)(index), \ 3494 (__v4df)(__m256d)(v1), (int)(scale)) 3496 #define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) \ 3497 __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)(mask), \ 3498 (__v4si)(__m128i)(index), \ 3499 (__v4df)(__m256d)(v1), (int)(scale)) 3501 #define _mm256_i32scatter_epi64(addr, index, v1, scale) \ 3502 __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)-1, \ 3503 (__v4si)(__m128i)(index), \ 3504 (__v4di)(__m256i)(v1), (int)(scale)) 3506 #define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) \ 3507 __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)(mask), \ 3508 (__v4si)(__m128i)(index), \ 3509 (__v4di)(__m256i)(v1), (int)(scale)) 3511 #define _mm_i32scatter_ps(addr, index, v1, scale) \ 3512 __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)-1, \ 3513 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \ 3516 #define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) \ 3517 __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)(mask), \ 3518 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \ 3521 #define _mm_i32scatter_epi32(addr, index, v1, scale) \ 3522 __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)-1, \ 3523 (__v4si)(__m128i)(index), \ 3524 (__v4si)(__m128i)(v1), (int)(scale)) 3526 #define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) \ 3527 __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)(mask), \ 3528 (__v4si)(__m128i)(index), \ 3529 (__v4si)(__m128i)(v1), (int)(scale)) 3531 #define _mm256_i32scatter_ps(addr, index, v1, scale) \ 3532 __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)-1, \ 3533 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \ 3536 #define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) \ 3537 __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)(mask), \ 3538 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \ 3541 #define _mm256_i32scatter_epi32(addr, index, v1, scale) \ 3542 __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)-1, \ 3543 (__v8si)(__m256i)(index), \ 3544 (__v8si)(__m256i)(v1), (int)(scale)) 3546 #define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) \ 3547 __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)(mask), \ 3548 (__v8si)(__m256i)(index), \ 3549 (__v8si)(__m256i)(v1), (int)(scale)) 3553 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3560 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3567 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3574 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3581 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3588 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3595 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3602 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3609 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3616 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3623 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3630 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3637 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3644 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3651 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3658 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3665 return (__m128i)__builtin_ia32_vpermi2vard128((__v4si) __A, (__v4si)__I,
3672 return (__m128i)__builtin_ia32_selectd_128(__U,
3680 return (__m128i)__builtin_ia32_selectd_128(__U,
3688 return (__m128i)__builtin_ia32_selectd_128(__U,
3695 return (__m256i)__builtin_ia32_vpermi2vard256((__v8si)__A, (__v8si) __I,
3702 return (__m256i)__builtin_ia32_selectd_256(__U,
3710 return (__m256i)__builtin_ia32_selectd_256(__U,
3718 return (__m256i)__builtin_ia32_selectd_256(__U,
3725 return (__m128d)__builtin_ia32_vpermi2varpd128((__v2df)__A, (__v2di)__I,
3731 return (__m128d)__builtin_ia32_selectpd_128(__U,
3738 return (__m128d)__builtin_ia32_selectpd_128(__U,
3740 (__v2df)(__m128d)__I);
3745 return (__m128d)__builtin_ia32_selectpd_128(__U,
3752 return (__m256d)__builtin_ia32_vpermi2varpd256((__v4df)__A, (__v4di)__I,
3759 return (__m256d)__builtin_ia32_selectpd_256(__U,
3767 return (__m256d)__builtin_ia32_selectpd_256(__U,
3769 (__v4df)(__m256d)__I);
3775 return (__m256d)__builtin_ia32_selectpd_256(__U,
3782 return (__m128)__builtin_ia32_vpermi2varps128((__v4sf)__A, (__v4si)__I,
3788 return (__m128)__builtin_ia32_selectps_128(__U,
3795 return (__m128)__builtin_ia32_selectps_128(__U,
3797 (__v4sf)(__m128)__I);
3802 return (__m128)__builtin_ia32_selectps_128(__U,
3809 return (__m256)__builtin_ia32_vpermi2varps256((__v8sf)__A, (__v8si)__I,
3815 return (__m256)__builtin_ia32_selectps_256(__U,
3823 return (__m256)__builtin_ia32_selectps_256(__U,
3825 (__v8sf)(__m256)__I);
3831 return (__m256)__builtin_ia32_selectps_256(__U,
3838 return (__m128i)__builtin_ia32_vpermi2varq128((__v2di)__A, (__v2di)__I,
3845 return (__m128i)__builtin_ia32_selectq_128(__U,
3853 return (__m128i)__builtin_ia32_selectq_128(__U,
3861 return (__m128i)__builtin_ia32_selectq_128(__U,
3869 return (__m256i)__builtin_ia32_vpermi2varq256((__v4di)__A, (__v4di) __I,
3876 return (__m256i)__builtin_ia32_selectq_256(__U,
3884 return (__m256i)__builtin_ia32_selectq_256(__U,
3892 return (__m256i)__builtin_ia32_selectq_256(__U,
3900 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
3908 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
3916 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3924 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3932 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3940 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3948 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3956 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3964 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3972 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3980 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3988 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3996 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4004 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4012 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4020 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4028 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4036 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4044 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4052 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4061 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4069 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4077 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4085 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4093 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4101 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4109 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4117 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4125 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4133 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4141 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4149 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4157 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4165 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4173 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4181 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4189 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4197 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4205 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4213 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4219 #define _mm_rol_epi32(a, b) \ 4220 (__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b)) 4222 #define _mm_mask_rol_epi32(w, u, a, b) \ 4223 (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ 4224 (__v4si)_mm_rol_epi32((a), (b)), \ 4225 (__v4si)(__m128i)(w)) 4227 #define _mm_maskz_rol_epi32(u, a, b) \ 4228 (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ 4229 (__v4si)_mm_rol_epi32((a), (b)), \ 4230 (__v4si)_mm_setzero_si128()) 4232 #define _mm256_rol_epi32(a, b) \ 4233 (__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b)) 4235 #define _mm256_mask_rol_epi32(w, u, a, b) \ 4236 (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ 4237 (__v8si)_mm256_rol_epi32((a), (b)), \ 4238 (__v8si)(__m256i)(w)) 4240 #define _mm256_maskz_rol_epi32(u, a, b) \ 4241 (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ 4242 (__v8si)_mm256_rol_epi32((a), (b)), \ 4243 (__v8si)_mm256_setzero_si256()) 4245 #define _mm_rol_epi64(a, b) \ 4246 (__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b)) 4248 #define _mm_mask_rol_epi64(w, u, a, b) \ 4249 (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ 4250 (__v2di)_mm_rol_epi64((a), (b)), \ 4251 (__v2di)(__m128i)(w)) 4253 #define _mm_maskz_rol_epi64(u, a, b) \ 4254 (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ 4255 (__v2di)_mm_rol_epi64((a), (b)), \ 4256 (__v2di)_mm_setzero_si128()) 4258 #define _mm256_rol_epi64(a, b) \ 4259 (__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b)) 4261 #define _mm256_mask_rol_epi64(w, u, a, b) \ 4262 (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ 4263 (__v4di)_mm256_rol_epi64((a), (b)), \ 4264 (__v4di)(__m256i)(w)) 4266 #define _mm256_maskz_rol_epi64(u, a, b) \ 4267 (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ 4268 (__v4di)_mm256_rol_epi64((a), (b)), \ 4269 (__v4di)_mm256_setzero_si256()) 4274 return (__m128i)__builtin_ia32_prolvd128((__v4si)__A, (__v4si)__B);
4280 return (__m128i)__builtin_ia32_selectd_128(__U,
4288 return (__m128i)__builtin_ia32_selectd_128(__U,
4296 return (__m256i)__builtin_ia32_prolvd256((__v8si)__A, (__v8si)__B);
4302 return (__m256i)__builtin_ia32_selectd_256(__U,
4310 return (__m256i)__builtin_ia32_selectd_256(__U,
4318 return (__m128i)__builtin_ia32_prolvq128((__v2di)__A, (__v2di)__B);
4324 return (__m128i)__builtin_ia32_selectq_128(__U,
4332 return (__m128i)__builtin_ia32_selectq_128(__U,
4340 return (__m256i)__builtin_ia32_prolvq256((__v4di)__A, (__v4di)__B);
4346 return (__m256i)__builtin_ia32_selectq_256(__U,
4354 return (__m256i)__builtin_ia32_selectq_256(__U,
4359 #define _mm_ror_epi32(a, b) \ 4360 (__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b)) 4362 #define _mm_mask_ror_epi32(w, u, a, b) \ 4363 (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ 4364 (__v4si)_mm_ror_epi32((a), (b)), \ 4365 (__v4si)(__m128i)(w)) 4367 #define _mm_maskz_ror_epi32(u, a, b) \ 4368 (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ 4369 (__v4si)_mm_ror_epi32((a), (b)), \ 4370 (__v4si)_mm_setzero_si128()) 4372 #define _mm256_ror_epi32(a, b) \ 4373 (__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b)) 4375 #define _mm256_mask_ror_epi32(w, u, a, b) \ 4376 (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ 4377 (__v8si)_mm256_ror_epi32((a), (b)), \ 4378 (__v8si)(__m256i)(w)) 4380 #define _mm256_maskz_ror_epi32(u, a, b) \ 4381 (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ 4382 (__v8si)_mm256_ror_epi32((a), (b)), \ 4383 (__v8si)_mm256_setzero_si256()) 4385 #define _mm_ror_epi64(a, b) \ 4386 (__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b)) 4388 #define _mm_mask_ror_epi64(w, u, a, b) \ 4389 (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ 4390 (__v2di)_mm_ror_epi64((a), (b)), \ 4391 (__v2di)(__m128i)(w)) 4393 #define _mm_maskz_ror_epi64(u, a, b) \ 4394 (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ 4395 (__v2di)_mm_ror_epi64((a), (b)), \ 4396 (__v2di)_mm_setzero_si128()) 4398 #define _mm256_ror_epi64(a, b) \ 4399 (__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b)) 4401 #define _mm256_mask_ror_epi64(w, u, a, b) \ 4402 (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ 4403 (__v4di)_mm256_ror_epi64((a), (b)), \ 4404 (__v4di)(__m256i)(w)) 4406 #define _mm256_maskz_ror_epi64(u, a, b) \ 4407 (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ 4408 (__v4di)_mm256_ror_epi64((a), (b)), \ 4409 (__v4di)_mm256_setzero_si256()) 4414 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4422 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4430 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4438 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4446 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4454 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4462 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4470 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4478 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4486 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4494 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4502 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4510 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4518 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4526 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4534 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4542 return (__m128i)__builtin_ia32_prorvd128((__v4si)__A, (__v4si)__B);
4548 return (__m128i)__builtin_ia32_selectd_128(__U,
4556 return (__m128i)__builtin_ia32_selectd_128(__U,
4564 return (__m256i)__builtin_ia32_prorvd256((__v8si)__A, (__v8si)__B);
4570 return (__m256i)__builtin_ia32_selectd_256(__U,
4578 return (__m256i)__builtin_ia32_selectd_256(__U,
4586 return (__m128i)__builtin_ia32_prorvq128((__v2di)__A, (__v2di)__B);
4592 return (__m128i)__builtin_ia32_selectq_128(__U,
4600 return (__m128i)__builtin_ia32_selectq_128(__U,
4608 return (__m256i)__builtin_ia32_prorvq256((__v4di)__A, (__v4di)__B);
4614 return (__m256i)__builtin_ia32_selectq_256(__U,
4622 return (__m256i)__builtin_ia32_selectq_256(__U,
4630 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4638 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4646 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4654 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4662 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4670 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4678 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4686 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4694 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4702 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4710 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4718 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4726 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4734 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4742 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4750 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4758 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4766 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4774 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4782 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4790 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4798 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4806 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4814 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4822 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4830 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4838 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4846 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4854 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4862 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4870 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4878 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4886 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4894 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4902 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4910 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4918 return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)__Y);
4924 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4932 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4940 return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di) __Y);
4946 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4954 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4962 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
4970 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
4979 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
4987 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
4995 return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
5004 return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
5014 return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
5023 return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
5033 __builtin_ia32_movdqa32store128_mask ((__v4si *) __P,
5041 __builtin_ia32_movdqa32store256_mask ((__v8si *) __P,
5049 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
5057 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
5065 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
5073 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
5081 return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
5090 return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
5100 return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
5109 return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
5119 __builtin_ia32_movdqa64store128_mask ((__v2di *) __P,
5127 __builtin_ia32_movdqa64store256_mask ((__v4di *) __P,
5135 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5143 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5151 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5159 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5167 return (__m128i)__builtin_ia32_selectd_128(__M,
5175 return (__m128i)__builtin_ia32_selectd_128(__M,
5183 return (__m256i)__builtin_ia32_selectd_256(__M,
5191 return (__m256i)__builtin_ia32_selectd_256(__M,
5200 return (__m128i) __builtin_ia32_selectq_128(__M,
5208 return (__m128i) __builtin_ia32_selectq_128(__M,
5216 return (__m256i) __builtin_ia32_selectq_256(__M,
5224 return (__m256i) __builtin_ia32_selectq_256(__M,
5229 #define _mm_fixupimm_pd(A, B, C, imm) \ 5230 (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \ 5231 (__v2df)(__m128d)(B), \ 5232 (__v2di)(__m128i)(C), (int)(imm), \ 5235 #define _mm_mask_fixupimm_pd(A, U, B, C, imm) \ 5236 (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \ 5237 (__v2df)(__m128d)(B), \ 5238 (__v2di)(__m128i)(C), (int)(imm), \ 5241 #define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \ 5242 (__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \ 5243 (__v2df)(__m128d)(B), \ 5244 (__v2di)(__m128i)(C), \ 5245 (int)(imm), (__mmask8)(U)) 5247 #define _mm256_fixupimm_pd(A, B, C, imm) \ 5248 (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \ 5249 (__v4df)(__m256d)(B), \ 5250 (__v4di)(__m256i)(C), (int)(imm), \ 5253 #define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \ 5254 (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \ 5255 (__v4df)(__m256d)(B), \ 5256 (__v4di)(__m256i)(C), (int)(imm), \ 5259 #define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \ 5260 (__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \ 5261 (__v4df)(__m256d)(B), \ 5262 (__v4di)(__m256i)(C), \ 5263 (int)(imm), (__mmask8)(U)) 5265 #define _mm_fixupimm_ps(A, B, C, imm) \ 5266 (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \ 5267 (__v4sf)(__m128)(B), \ 5268 (__v4si)(__m128i)(C), (int)(imm), \ 5271 #define _mm_mask_fixupimm_ps(A, U, B, C, imm) \ 5272 (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \ 5273 (__v4sf)(__m128)(B), \ 5274 (__v4si)(__m128i)(C), (int)(imm), \ 5277 #define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \ 5278 (__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \ 5279 (__v4sf)(__m128)(B), \ 5280 (__v4si)(__m128i)(C), (int)(imm), \ 5283 #define _mm256_fixupimm_ps(A, B, C, imm) \ 5284 (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \ 5285 (__v8sf)(__m256)(B), \ 5286 (__v8si)(__m256i)(C), (int)(imm), \ 5289 #define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \ 5290 (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \ 5291 (__v8sf)(__m256)(B), \ 5292 (__v8si)(__m256i)(C), (int)(imm), \ 5295 #define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \ 5296 (__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \ 5297 (__v8sf)(__m256)(B), \ 5298 (__v8si)(__m256i)(C), (int)(imm), \ 5304 return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
5312 return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
5321 return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
5329 return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
5338 return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
5346 return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
5355 return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
5363 return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
5372 return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
5380 return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
5389 return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
5397 return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
5406 return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
5414 return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
5423 return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
5431 return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
5440 return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
5448 return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
5457 return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
5465 return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
5474 return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
5482 return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
5491 return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
5499 return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
5508 __builtin_ia32_storeapd128_mask ((__v2df *) __P,
5516 __builtin_ia32_storeapd256_mask ((__v4df *) __P,
5524 __builtin_ia32_storeaps128_mask ((__v4sf *) __P,
5532 __builtin_ia32_storeaps256_mask ((__v8sf *) __P,
5540 __builtin_ia32_storedqudi128_mask ((__v2di *) __P,
5548 __builtin_ia32_storedqudi256_mask ((__v4di *) __P,
5556 __builtin_ia32_storedqusi128_mask ((__v4si *) __P,
5564 __builtin_ia32_storedqusi256_mask ((__v8si *) __P,
5572 __builtin_ia32_storeupd128_mask ((__v2df *) __P,
5580 __builtin_ia32_storeupd256_mask ((__v4df *) __P,
5588 __builtin_ia32_storeups128_mask ((__v4sf *) __P,
5596 __builtin_ia32_storeups256_mask ((__v8sf *) __P,
5605 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5613 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5621 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5629 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5637 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5645 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5653 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5661 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5669 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5677 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5685 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5693 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5701 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5709 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5717 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5725 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5733 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5742 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5750 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5759 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5768 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5776 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5785 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
5794 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
5802 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
5811 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
5820 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
5828 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
5834 #define _mm_mask_permute_pd(W, U, X, C) \ 5835 (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ 5836 (__v2df)_mm_permute_pd((X), (C)), \ 5837 (__v2df)(__m128d)(W)) 5839 #define _mm_maskz_permute_pd(U, X, C) \ 5840 (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ 5841 (__v2df)_mm_permute_pd((X), (C)), \ 5842 (__v2df)_mm_setzero_pd()) 5844 #define _mm256_mask_permute_pd(W, U, X, C) \ 5845 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 5846 (__v4df)_mm256_permute_pd((X), (C)), \ 5847 (__v4df)(__m256d)(W)) 5849 #define _mm256_maskz_permute_pd(U, X, C) \ 5850 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 5851 (__v4df)_mm256_permute_pd((X), (C)), \ 5852 (__v4df)_mm256_setzero_pd()) 5854 #define _mm_mask_permute_ps(W, U, X, C) \ 5855 (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ 5856 (__v4sf)_mm_permute_ps((X), (C)), \ 5857 (__v4sf)(__m128)(W)) 5859 #define _mm_maskz_permute_ps(U, X, C) \ 5860 (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ 5861 (__v4sf)_mm_permute_ps((X), (C)), \ 5862 (__v4sf)_mm_setzero_ps()) 5864 #define _mm256_mask_permute_ps(W, U, X, C) \ 5865 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 5866 (__v8sf)_mm256_permute_ps((X), (C)), \ 5867 (__v8sf)(__m256)(W)) 5869 #define _mm256_maskz_permute_ps(U, X, C) \ 5870 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 5871 (__v8sf)_mm256_permute_ps((X), (C)), \ 5872 (__v8sf)_mm256_setzero_ps()) 5877 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5885 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5893 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5901 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5909 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5917 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5925 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5933 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
6049 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6057 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6065 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6073 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6081 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6089 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6097 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6105 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6113 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6121 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6129 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6137 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6145 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6153 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6161 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6169 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6177 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6185 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6193 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6201 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6209 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6217 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6225 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6233 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6241 return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B);
6247 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6255 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6263 return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B);
6269 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6277 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6285 return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
6291 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6299 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6307 return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
6313 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6321 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6326 #define _mm_ternarylogic_epi32(A, B, C, imm) \ 6327 (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \ 6328 (__v4si)(__m128i)(B), \ 6329 (__v4si)(__m128i)(C), (int)(imm), \ 6332 #define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \ 6333 (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \ 6334 (__v4si)(__m128i)(B), \ 6335 (__v4si)(__m128i)(C), (int)(imm), \ 6338 #define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \ 6339 (__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \ 6340 (__v4si)(__m128i)(B), \ 6341 (__v4si)(__m128i)(C), (int)(imm), \ 6344 #define _mm256_ternarylogic_epi32(A, B, C, imm) \ 6345 (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \ 6346 (__v8si)(__m256i)(B), \ 6347 (__v8si)(__m256i)(C), (int)(imm), \ 6350 #define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \ 6351 (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \ 6352 (__v8si)(__m256i)(B), \ 6353 (__v8si)(__m256i)(C), (int)(imm), \ 6356 #define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \ 6357 (__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \ 6358 (__v8si)(__m256i)(B), \ 6359 (__v8si)(__m256i)(C), (int)(imm), \ 6362 #define _mm_ternarylogic_epi64(A, B, C, imm) \ 6363 (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \ 6364 (__v2di)(__m128i)(B), \ 6365 (__v2di)(__m128i)(C), (int)(imm), \ 6368 #define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \ 6369 (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \ 6370 (__v2di)(__m128i)(B), \ 6371 (__v2di)(__m128i)(C), (int)(imm), \ 6374 #define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \ 6375 (__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \ 6376 (__v2di)(__m128i)(B), \ 6377 (__v2di)(__m128i)(C), (int)(imm), \ 6380 #define _mm256_ternarylogic_epi64(A, B, C, imm) \ 6381 (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \ 6382 (__v4di)(__m256i)(B), \ 6383 (__v4di)(__m256i)(C), (int)(imm), \ 6386 #define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \ 6387 (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \ 6388 (__v4di)(__m256i)(B), \ 6389 (__v4di)(__m256i)(C), (int)(imm), \ 6392 #define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \ 6393 (__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \ 6394 (__v4di)(__m256i)(B), \ 6395 (__v4di)(__m256i)(C), (int)(imm), \ 6400 #define _mm256_shuffle_f32x4(A, B, imm) \ 6401 (__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \ 6402 (__v8sf)(__m256)(B), (int)(imm)) 6404 #define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \ 6405 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 6406 (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \ 6407 (__v8sf)(__m256)(W)) 6409 #define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \ 6410 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 6411 (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \ 6412 (__v8sf)_mm256_setzero_ps()) 6414 #define _mm256_shuffle_f64x2(A, B, imm) \ 6415 (__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \ 6416 (__v4df)(__m256d)(B), (int)(imm)) 6418 #define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \ 6419 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 6420 (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \ 6421 (__v4df)(__m256d)(W)) 6423 #define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \ 6424 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 6425 (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \ 6426 (__v4df)_mm256_setzero_pd()) 6428 #define _mm256_shuffle_i32x4(A, B, imm) \ 6429 (__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \ 6430 (__v8si)(__m256i)(B), (int)(imm)) 6432 #define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \ 6433 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 6434 (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \ 6435 (__v8si)(__m256i)(W)) 6437 #define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \ 6438 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 6439 (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \ 6440 (__v8si)_mm256_setzero_si256()) 6442 #define _mm256_shuffle_i64x2(A, B, imm) \ 6443 (__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \ 6444 (__v4di)(__m256i)(B), (int)(imm)) 6446 #define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \ 6447 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 6448 (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \ 6449 (__v4di)(__m256i)(W)) 6452 #define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \ 6453 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 6454 (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \ 6455 (__v4di)_mm256_setzero_si256()) 6457 #define _mm_mask_shuffle_pd(W, U, A, B, M) \ 6458 (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ 6459 (__v2df)_mm_shuffle_pd((A), (B), (M)), \ 6460 (__v2df)(__m128d)(W)) 6462 #define _mm_maskz_shuffle_pd(U, A, B, M) \ 6463 (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ 6464 (__v2df)_mm_shuffle_pd((A), (B), (M)), \ 6465 (__v2df)_mm_setzero_pd()) 6467 #define _mm256_mask_shuffle_pd(W, U, A, B, M) \ 6468 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 6469 (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ 6470 (__v4df)(__m256d)(W)) 6472 #define _mm256_maskz_shuffle_pd(U, A, B, M) \ 6473 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 6474 (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ 6475 (__v4df)_mm256_setzero_pd()) 6477 #define _mm_mask_shuffle_ps(W, U, A, B, M) \ 6478 (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ 6479 (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ 6480 (__v4sf)(__m128)(W)) 6482 #define _mm_maskz_shuffle_ps(U, A, B, M) \ 6483 (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ 6484 (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ 6485 (__v4sf)_mm_setzero_ps()) 6487 #define _mm256_mask_shuffle_ps(W, U, A, B, M) \ 6488 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 6489 (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ 6490 (__v8sf)(__m256)(W)) 6492 #define _mm256_maskz_shuffle_ps(U, A, B, M) \ 6493 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 6494 (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ 6495 (__v8sf)_mm256_setzero_ps()) 6500 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6509 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6517 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6526 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6535 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6543 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6552 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6561 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6569 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6578 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6587 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6595 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6604 return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
6605 0, 1, 2, 3, 0, 1, 2, 3);
6611 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__M,
6619 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__M,
6627 return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
6628 0, 1, 2, 3, 0, 1, 2, 3);
6634 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
6642 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
6650 return (__m256d)__builtin_ia32_selectpd_256(__M,
6658 return (__m256d)__builtin_ia32_selectpd_256(__M,
6666 return (__m128)__builtin_ia32_selectps_128(__M,
6674 return (__m128)__builtin_ia32_selectps_128(__M,
6682 return (__m256)__builtin_ia32_selectps_256(__M,
6690 return (__m256)__builtin_ia32_selectps_256(__M,
6698 return (__m128i)__builtin_ia32_selectd_128(__M,
6706 return (__m128i)__builtin_ia32_selectd_128(__M,
6714 return (__m256i)__builtin_ia32_selectd_256(__M,
6722 return (__m256i)__builtin_ia32_selectd_256(__M,
6730 return (__m128i)__builtin_ia32_selectq_128(__M,
6738 return (__m128i)__builtin_ia32_selectq_128(__M,
6746 return (__m256i)__builtin_ia32_selectq_256(__M,
6754 return (__m256i)__builtin_ia32_selectq_256(__M,
6762 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6770 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6771 (__v16qi) __O, __M);
6777 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6785 __builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
6791 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
6799 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
6800 (__v16qi) __O, __M);
6806 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
6814 __builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
6820 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
6828 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
6836 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
6844 __builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
6850 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
6858 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
6865 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
6873 __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
6879 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
6887 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
6888 (__v16qi) __O, __M);
6894 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
6902 __builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
6908 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
6916 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
6917 (__v16qi) __O, __M);
6923 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
6931 __builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
6937 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
6945 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
6952 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
6960 __builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
6966 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
6974 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
6982 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
6990 __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
6996 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
7004 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
7011 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
7019 __builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
7025 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
7033 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
7040 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
7048 __builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
7054 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
7062 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
7070 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
7078 __builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
7084 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
7092 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
7100 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
7108 __builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M);
7114 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7122 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7129 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7137 __builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
7143 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7151 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7158 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7166 __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
7172 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7180 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7188 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7196 __builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
7202 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7210 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7218 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7226 __builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
7232 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7240 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7247 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7255 __builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
7261 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7269 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7276 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7284 __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
7290 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7298 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7305 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7313 __builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
7319 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7327 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7334 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7342 __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
7348 return (__m128i)__builtin_shufflevector(
7349 __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
7350 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
7356 return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
7357 (__v16qi) __O, __M);
7363 return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
7372 __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
7378 return (__m128i)__builtin_shufflevector(
7379 __builtin_convertvector((__v8si)__A, __v8qi),
7380 (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
7387 return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
7388 (__v16qi) __O, __M);
7394 return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
7402 __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
7408 return (__m128i)__builtin_shufflevector(
7409 __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
7416 return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
7423 return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
7431 __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
7437 return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi);
7443 return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
7450 return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
7458 __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
7464 return (__m128i)__builtin_shufflevector(
7465 __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3,
7466 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
7472 return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
7473 (__v16qi) __O, __M);
7479 return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
7487 __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
7493 return (__m128i)__builtin_shufflevector(
7494 __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
7495 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
7501 return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
7502 (__v16qi) __O, __M);
7508 return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
7516 __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
7522 return (__m128i)__builtin_shufflevector(
7523 __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3);
7529 return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
7536 return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
7544 __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
7550 return (__m128i)__builtin_convertvector((__v4di)__A, __v4si);
7556 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
7564 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
7572 __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
7578 return (__m128i)__builtin_shufflevector(
7579 __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3,
7586 return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
7594 return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
7602 __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
7608 return (__m128i)__builtin_shufflevector(
7609 __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
7616 return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
7623 return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
7631 __builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
7634 #define _mm256_extractf32x4_ps(A, imm) \ 7635 (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ 7637 (__v4sf)_mm_undefined_ps(), \ 7640 #define _mm256_mask_extractf32x4_ps(W, U, A, imm) \ 7641 (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ 7643 (__v4sf)(__m128)(W), \ 7646 #define _mm256_maskz_extractf32x4_ps(U, A, imm) \ 7647 (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ 7649 (__v4sf)_mm_setzero_ps(), \ 7652 #define _mm256_extracti32x4_epi32(A, imm) \ 7653 (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ 7655 (__v4si)_mm_undefined_si128(), \ 7658 #define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \ 7659 (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ 7661 (__v4si)(__m128i)(W), \ 7664 #define _mm256_maskz_extracti32x4_epi32(U, A, imm) \ 7665 (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ 7667 (__v4si)_mm_setzero_si128(), \ 7670 #define _mm256_insertf32x4(A, B, imm) \ 7671 (__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \ 7672 (__v4sf)(__m128)(B), (int)(imm)) 7674 #define _mm256_mask_insertf32x4(W, U, A, B, imm) \ 7675 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 7676 (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \ 7677 (__v8sf)(__m256)(W)) 7679 #define _mm256_maskz_insertf32x4(U, A, B, imm) \ 7680 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 7681 (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \ 7682 (__v8sf)_mm256_setzero_ps()) 7684 #define _mm256_inserti32x4(A, B, imm) \ 7685 (__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \ 7686 (__v4si)(__m128i)(B), (int)(imm)) 7688 #define _mm256_mask_inserti32x4(W, U, A, B, imm) \ 7689 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 7690 (__v8si)_mm256_inserti32x4((A), (B), (imm)), \ 7691 (__v8si)(__m256i)(W)) 7693 #define _mm256_maskz_inserti32x4(U, A, B, imm) \ 7694 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 7695 (__v8si)_mm256_inserti32x4((A), (B), (imm)), \ 7696 (__v8si)_mm256_setzero_si256()) 7698 #define _mm_getmant_pd(A, B, C) \ 7699 (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ 7700 (int)(((C)<<2) | (B)), \ 7701 (__v2df)_mm_setzero_pd(), \ 7704 #define _mm_mask_getmant_pd(W, U, A, B, C) \ 7705 (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ 7706 (int)(((C)<<2) | (B)), \ 7707 (__v2df)(__m128d)(W), \ 7710 #define _mm_maskz_getmant_pd(U, A, B, C) \ 7711 (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ 7712 (int)(((C)<<2) | (B)), \ 7713 (__v2df)_mm_setzero_pd(), \ 7716 #define _mm256_getmant_pd(A, B, C) \ 7717 (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ 7718 (int)(((C)<<2) | (B)), \ 7719 (__v4df)_mm256_setzero_pd(), \ 7722 #define _mm256_mask_getmant_pd(W, U, A, B, C) \ 7723 (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ 7724 (int)(((C)<<2) | (B)), \ 7725 (__v4df)(__m256d)(W), \ 7728 #define _mm256_maskz_getmant_pd(U, A, B, C) \ 7729 (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ 7730 (int)(((C)<<2) | (B)), \ 7731 (__v4df)_mm256_setzero_pd(), \ 7734 #define _mm_getmant_ps(A, B, C) \ 7735 (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ 7736 (int)(((C)<<2) | (B)), \ 7737 (__v4sf)_mm_setzero_ps(), \ 7740 #define _mm_mask_getmant_ps(W, U, A, B, C) \ 7741 (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ 7742 (int)(((C)<<2) | (B)), \ 7743 (__v4sf)(__m128)(W), \ 7746 #define _mm_maskz_getmant_ps(U, A, B, C) \ 7747 (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ 7748 (int)(((C)<<2) | (B)), \ 7749 (__v4sf)_mm_setzero_ps(), \ 7752 #define _mm256_getmant_ps(A, B, C) \ 7753 (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ 7754 (int)(((C)<<2) | (B)), \ 7755 (__v8sf)_mm256_setzero_ps(), \ 7758 #define _mm256_mask_getmant_ps(W, U, A, B, C) \ 7759 (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ 7760 (int)(((C)<<2) | (B)), \ 7761 (__v8sf)(__m256)(W), \ 7764 #define _mm256_maskz_getmant_ps(U, A, B, C) \ 7765 (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ 7766 (int)(((C)<<2) | (B)), \ 7767 (__v8sf)_mm256_setzero_ps(), \ 7770 #define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \ 7771 (__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \ 7772 (double const *)(addr), \ 7773 (__v2di)(__m128i)(index), \ 7774 (__mmask8)(mask), (int)(scale)) 7776 #define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \ 7777 (__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \ 7778 (long long const *)(addr), \ 7779 (__v2di)(__m128i)(index), \ 7780 (__mmask8)(mask), (int)(scale)) 7782 #define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \ 7783 (__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \ 7784 (double const *)(addr), \ 7785 (__v4di)(__m256i)(index), \ 7786 (__mmask8)(mask), (int)(scale)) 7788 #define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \ 7789 (__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \ 7790 (long long const *)(addr), \ 7791 (__v4di)(__m256i)(index), \ 7792 (__mmask8)(mask), (int)(scale)) 7794 #define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \ 7795 (__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \ 7796 (float const *)(addr), \ 7797 (__v2di)(__m128i)(index), \ 7798 (__mmask8)(mask), (int)(scale)) 7800 #define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \ 7801 (__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \ 7802 (int const *)(addr), \ 7803 (__v2di)(__m128i)(index), \ 7804 (__mmask8)(mask), (int)(scale)) 7806 #define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \ 7807 (__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \ 7808 (float const *)(addr), \ 7809 (__v4di)(__m256i)(index), \ 7810 (__mmask8)(mask), (int)(scale)) 7812 #define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \ 7813 (__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \ 7814 (int const *)(addr), \ 7815 (__v4di)(__m256i)(index), \ 7816 (__mmask8)(mask), (int)(scale)) 7818 #define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \ 7819 (__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \ 7820 (double const *)(addr), \ 7821 (__v4si)(__m128i)(index), \ 7822 (__mmask8)(mask), (int)(scale)) 7824 #define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \ 7825 (__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \ 7826 (long long const *)(addr), \ 7827 (__v4si)(__m128i)(index), \ 7828 (__mmask8)(mask), (int)(scale)) 7830 #define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \ 7831 (__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \ 7832 (double const *)(addr), \ 7833 (__v4si)(__m128i)(index), \ 7834 (__mmask8)(mask), (int)(scale)) 7836 #define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \ 7837 (__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \ 7838 (long long const *)(addr), \ 7839 (__v4si)(__m128i)(index), \ 7840 (__mmask8)(mask), (int)(scale)) 7842 #define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \ 7843 (__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \ 7844 (float const *)(addr), \ 7845 (__v4si)(__m128i)(index), \ 7846 (__mmask8)(mask), (int)(scale)) 7848 #define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \ 7849 (__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \ 7850 (int const *)(addr), \ 7851 (__v4si)(__m128i)(index), \ 7852 (__mmask8)(mask), (int)(scale)) 7854 #define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \ 7855 (__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \ 7856 (float const *)(addr), \ 7857 (__v8si)(__m256i)(index), \ 7858 (__mmask8)(mask), (int)(scale)) 7860 #define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \ 7861 (__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \ 7862 (int const *)(addr), \ 7863 (__v8si)(__m256i)(index), \ 7864 (__mmask8)(mask), (int)(scale)) 7866 #define _mm256_permutex_pd(X, C) \ 7867 (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C)) 7869 #define _mm256_mask_permutex_pd(W, U, X, C) \ 7870 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 7871 (__v4df)_mm256_permutex_pd((X), (C)), \ 7872 (__v4df)(__m256d)(W)) 7874 #define _mm256_maskz_permutex_pd(U, X, C) \ 7875 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 7876 (__v4df)_mm256_permutex_pd((X), (C)), \ 7877 (__v4df)_mm256_setzero_pd()) 7879 #define _mm256_permutex_epi64(X, C) \ 7880 (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C)) 7882 #define _mm256_mask_permutex_epi64(W, U, X, C) \ 7883 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 7884 (__v4di)_mm256_permutex_epi64((X), (C)), \ 7885 (__v4di)(__m256i)(W)) 7887 #define _mm256_maskz_permutex_epi64(U, X, C) \ 7888 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 7889 (__v4di)_mm256_permutex_epi64((X), (C)), \ 7890 (__v4di)_mm256_setzero_si256()) 7895 return (__m256d)__builtin_ia32_permvardf256((__v4df)__Y, (__v4di)__X);
7902 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
7910 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
7918 return (__m256i)__builtin_ia32_permvardi256((__v4di) __Y, (__v4di) __X);
7924 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
7933 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
7938 #define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps((B), (A)) 7943 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
7951 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
7956 #define _mm256_permutexvar_epi32(A, B) _mm256_permutevar8x32_epi32((B), (A)) 7962 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
7970 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
7975 #define _mm_alignr_epi32(A, B, imm) \ 7976 (__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \ 7977 (__v4si)(__m128i)(B), (int)(imm)) 7979 #define _mm_mask_alignr_epi32(W, U, A, B, imm) \ 7980 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 7981 (__v4si)_mm_alignr_epi32((A), (B), (imm)), \ 7982 (__v4si)(__m128i)(W)) 7984 #define _mm_maskz_alignr_epi32(U, A, B, imm) \ 7985 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 7986 (__v4si)_mm_alignr_epi32((A), (B), (imm)), \ 7987 (__v4si)_mm_setzero_si128()) 7989 #define _mm256_alignr_epi32(A, B, imm) \ 7990 (__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \ 7991 (__v8si)(__m256i)(B), (int)(imm)) 7993 #define _mm256_mask_alignr_epi32(W, U, A, B, imm) \ 7994 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 7995 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \ 7996 (__v8si)(__m256i)(W)) 7998 #define _mm256_maskz_alignr_epi32(U, A, B, imm) \ 7999 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 8000 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \ 8001 (__v8si)_mm256_setzero_si256()) 8003 #define _mm_alignr_epi64(A, B, imm) \ 8004 (__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \ 8005 (__v2di)(__m128i)(B), (int)(imm)) 8007 #define _mm_mask_alignr_epi64(W, U, A, B, imm) \ 8008 (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ 8009 (__v2di)_mm_alignr_epi64((A), (B), (imm)), \ 8010 (__v2di)(__m128i)(W)) 8012 #define _mm_maskz_alignr_epi64(U, A, B, imm) \ 8013 (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ 8014 (__v2di)_mm_alignr_epi64((A), (B), (imm)), \ 8015 (__v2di)_mm_setzero_si128()) 8017 #define _mm256_alignr_epi64(A, B, imm) \ 8018 (__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \ 8019 (__v4di)(__m256i)(B), (int)(imm)) 8021 #define _mm256_mask_alignr_epi64(W, U, A, B, imm) \ 8022 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 8023 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \ 8024 (__v4di)(__m256i)(W)) 8026 #define _mm256_maskz_alignr_epi64(U, A, B, imm) \ 8027 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 8028 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \ 8029 (__v4di)_mm256_setzero_si256()) 8034 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8042 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8050 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8058 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8066 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8074 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8082 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8090 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8095 #define _mm256_mask_shuffle_epi32(W, U, A, I) \ 8096 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 8097 (__v8si)_mm256_shuffle_epi32((A), (I)), \ 8098 (__v8si)(__m256i)(W)) 8100 #define _mm256_maskz_shuffle_epi32(U, A, I) \ 8101 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 8102 (__v8si)_mm256_shuffle_epi32((A), (I)), \ 8103 (__v8si)_mm256_setzero_si256()) 8105 #define _mm_mask_shuffle_epi32(W, U, A, I) \ 8106 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 8107 (__v4si)_mm_shuffle_epi32((A), (I)), \ 8108 (__v4si)(__m128i)(W)) 8110 #define _mm_maskz_shuffle_epi32(U, A, I) \ 8111 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 8112 (__v4si)_mm_shuffle_epi32((A), (I)), \ 8113 (__v4si)_mm_setzero_si128()) 8118 return (__m128d) __builtin_ia32_selectpd_128 ((
__mmask8) __U,
8126 return (__m128d) __builtin_ia32_selectpd_128 ((
__mmask8) __U,
8134 return (__m256d) __builtin_ia32_selectpd_256 ((
__mmask8) __U,
8142 return (__m256d) __builtin_ia32_selectpd_256 ((
__mmask8) __U,
8150 return (__m128) __builtin_ia32_selectps_128 ((
__mmask8) __U,
8158 return (__m128) __builtin_ia32_selectps_128 ((
__mmask8) __U,
8166 return (__m256) __builtin_ia32_selectps_256 ((
__mmask8) __U,
8174 return (__m256) __builtin_ia32_selectps_256 ((
__mmask8) __U,
8182 return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
8190 return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
8199 return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
8207 return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
8229 #define _mm_mask_cvt_roundps_ph(W, U, A, I) \ 8230 (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \ 8231 (__v8hi)(__m128i)(W), \ 8234 #define _mm_maskz_cvt_roundps_ph(U, A, I) \ 8235 (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \ 8236 (__v8hi)_mm_setzero_si128(), \ 8254 #define _mm256_mask_cvt_roundps_ph(W, U, A, I) \ 8255 (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \ 8256 (__v8hi)(__m128i)(W), \ 8259 #define _mm256_maskz_cvt_roundps_ph(U, A, I) \ 8260 (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \ 8261 (__v8hi)_mm_setzero_si128(), \ 8265 #undef __DEFAULT_FN_ATTRS128 8266 #undef __DEFAULT_FN_ATTRS256 static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_testn_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_compress_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi32_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_abs_epi64(__m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_scalef_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a, int __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu32_ps(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rorv_epi64(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_getexp_pd(__m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttps_epu32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi8(__m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epi64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_permutevar_pd(__m256d __a, __m256i __c)
Copies the values in a 256-bit vector of [4 x double] as specified by the 256-bit integer vector oper...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_set1_epi32(__mmask8 __M, int __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_load_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi64_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu16_epi64(__m128i __V)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setzero_ps(void)
Constructs a 256-bit floating-point vector of [8 x float] with all vector elements initialized to zer...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_pd(__m256d __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_set1_epi32(__mmask8 __M, int __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rolv_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_getexp_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_rcp14_ps(__m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_abs_epi32(__m256i __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srav_epi64(__m128i __X, __m128i __Y)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_testn_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_moveldup_ps(__mmask8 __U, __m256 __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_test_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_unpackhi_ps(__m128 __a, __m128 __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x float] and interleaves the...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srlv_epi32(__m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_abs_epi64(__m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_broadcast_f32x4(__m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a)
Calculates the square root of the each of two values stored in a 128-bit vector of [2 x double]...
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastss_ps(__m256 __O, __mmask8 __M, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_rsqrt14_pd(__m256d __A)
#define _mm_mask_cmpneq_epi64_mask(k, A, B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a, __m128i __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rorv_epi32(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_min_ps(__m128 __a, __m128 __b)
Compares two 128-bit vectors of [4 x float] and returns the lesser of each pair of values...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_load_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastd_epi32(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_epi64(__mmask8 __U, void const *__P)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_moveldup_ps(__m256 __a)
Moves and duplicates even-indexed values from a 256-bit vector of [8 x float] to float values in a 25...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a)
Converts the lower two integer elements of a 128-bit vector of [4 x i32] into two double-precision fl...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a, int __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_add_ps(__m128 __a, __m128 __b)
Adds two 128-bit vectors of [4 x float], and returns the results of the addition. ...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V)
Sign-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q)
Initializes both values in a 128-bit integer vector with the specified 64-bit integer value...
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_pd(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_epi32(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srl_epi64(__m256i __a, __m128i __count)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_pd(__m256d __W, __mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_getexp_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_blend_epi32(__mmask8 __U, __m256i __A, __m256i __W)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_rsqrt14_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_getexp_ps(__m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_pd(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_abs_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_scalef_pd(__m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtpd_epu32(__m256d __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rolv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
#define _mm256_cmpeq_epi64_mask(A, B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu32_ps(__m256 __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_loadu_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [4 x i32], saving the lower 32 bits of each...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_mul_ps(__m256 __a, __m256 __b)
Multiplies two 256-bit vectors of [8 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_blend_ps(__mmask8 __U, __m128 __A, __m128 __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpackhi_epi64(__m256i __a, __m256i __b)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpacklo_epi32(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_load_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_epi32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a, __m128i __b)
Subtracts the corresponding elements of two [2 x i64] vectors.
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_unpackhi_pd(__m256d __a, __m256d __b)
Unpacks the odd-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves them...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_scalef_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttps_epi32(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtps_epu32(__m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a, int __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_abs_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_max_epu64(__m128i __A, __m128i __B)
#define _mm256_mask_cmpeq_epi64_mask(k, A, B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epu64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi8(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_unpackhi_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the two 256-bit vectors of [8 x float] ...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rorv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
#define _mm256_cmpneq_epi64_mask(A, B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_broadcastss_ps(__m128 __O, __mmask8 __M, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_unpacklo_ps(__m128 __a, __m128 __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x float] and interleaves them...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a, __m128d __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them i...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mov_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srai_epi64(__m256i __A, int __imm)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_testn_epi32_mask(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_loadu_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_rsqrt14_pd(__m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_ph(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_rcp14_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srai_epi32(__m256i __a, int __count)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sub_ps(__m128 __a, __m128 __b)
Subtracts each of the values of the second operand from the first operand, both of which are 128-bit ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_moveldup_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm256_mask_cvtsepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a, __m128i __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x i32] and interleaves them ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_abs_epi32(__m128i __a)
Computes the absolute value of each of the packed 32-bit signed integers in the source operand and st...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sll_epi32(__m256i __a, __m128i __count)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_mov_pd(__m128d __W, __mmask8 __U, __m128d __A)
#define _mm256_cmpeq_epi32_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V)
Zero-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi64_epi32(__m256i __A)
#define _mm256_mask_cmpneq_epi64_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_epu32(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_moveldup_ps(__m128 __a)
Duplicates even-indexed values from a 128-bit vector of [4 x float] to float values stored in a 128-b...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srai_epi64(__m128i __A, int __imm)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi8(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B)
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvtpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x i32].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V)
Sign-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu32_pd(__m256d __W, __mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttpd_epu32(__m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttpd_epu32(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_mov_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_ps(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_rsqrt14_ps(__m256 __A)
#define _mm_cmpneq_epi32_mask(A, B)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi64x(long long __q)
Constructs a 256-bit integer vector of [4 x i64], with each of the 64-bit integral vector elements se...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srlv_epi64(__m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttpd_epi32(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_rcp14_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_loadu_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtepu32_ps(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sllv_epi64(__m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi16(__m128i __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sqrt_pd(__m256d __a)
Calculates the square roots of the values in a 256-bit vector of [4 x double].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srli_epi64(__m256i __a, int __count)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sll_epi64(__m256i __a, __m128i __count)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcast_f32x4(__mmask8 __M, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_pd(__m128d __W, __mmask8 __U, __m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mul_epu32(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srav_epi64(__m256i __X, __m256i __Y)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sub_pd(__m256d __a, __m256d __b)
Subtracts two 256-bit vectors of [4 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi32(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epi64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_or_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_max_ps(__m128 __a, __m128 __b)
Compares two 128-bit vectors of [4 x float] and returns the greater of each pair of values...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rorv_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sub_ps(__m256 __a, __m256 __b)
Subtracts two 256-bit vectors of [8 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtepu32_ps(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_min_epu64(__m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_expand_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_min_epi64(__m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_scalef_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_load_pd(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttps_epu32(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rolv_epi64(__m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi32_epi16(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi32_epi64(__m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rolv_epi32(__m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a, __m128i __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_compress_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_blend_pd(__mmask8 __U, __m128d __A, __m128d __W)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_testn_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1, __m128i __V2)
Multiples corresponding elements of two 128-bit vectors of [4 x i32] and returns the lower 32 bits of...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastss_ps(__mmask8 __M, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_getexp_pd(__m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_ps(__mmask8 __U, __m256i __A)
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvttpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x i32], truncating the result b...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sqrt_ps(__m128 __a)
Calculates the square roots of the values stored in a 128-bit vector of [4 x float].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_add_ps(__m256 __a, __m256 __b)
Adds two 256-bit vectors of [8 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi32_epi16(__m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_rsqrt14_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_expand_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_scalef_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi32(__m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_blend_ps(__mmask8 __U, __m256 __A, __m256 __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epi64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_andnot_si256(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_movehdup_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi16_epi64(__m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srlv_epi32(__m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rorv_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi32_epi16(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu16_epi32(__m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_set1_epi64(__mmask8 __M, long long __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_broadcastq_epi64(__m128i __X)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi8_epi64(__m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_broadcastss_ps(__m128 __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a, int __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a, __m128i __b)
Subtracts the corresponding 32-bit integer values in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mov_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_or_si256(__m256i __a, __m256i __b)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
#define _mm_mask_cmpeq_epi64_mask(k, A, B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_epu32(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_abs_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I, __m256d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rorv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastsd_pd(__m256d __O, __mmask8 __M, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_min_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the lesser of each pair of values...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_epi32(__mmask8 __U, __m256i __A)
short __v2hi __attribute__((__vector_size__(4)))
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_setzero_ps(void)
Constructs a 128-bit floating-point vector of [4 x float] initialized to zero.
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_epi32(__mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_epu32(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_movedup_pd(__m256d __a)
Moves and duplicates double-precision floating point values from a 256-bit vector of [4 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rolv_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sub_epi64(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_epi64(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi32(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a, __m128d __b)
Subtracts two 128-bit vectors of [2 x double].
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline __m128 __DEFAULT_FN_ATTRS128 _mm_permutevar_ps(__m128 __a, __m128i __c)
Copies the values stored in a 128-bit vector of [4 x float] as specified by the 128-bit integer vecto...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a)
Converts the lower two single-precision floating-point elements of a 128-bit vector of [4 x float] in...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_cvtepu32_pd(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [2 x i64], saving the lower 64 bits of each...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epi64(__m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_testn_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epu32(__m256i __a, __m256i __b)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_rsqrt14_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, int __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srlv_epi64(__m256i __X, __m256i __Y)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sra_epi64(__m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a)
Converts a vector of [4 x float] into a vector of [4 x i32], truncating the result when it is inexact...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sllv_epi32(__m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_epi32(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a, int __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mov_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1, __m128i __V2)
Multiplies corresponding even-indexed elements of two 128-bit vectors of [4 x i32] and returns a 128-...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_scalef_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mul_epi32(__m256i __a, __m256i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_load_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_mov_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_pd(__m256d __W, __mmask8 __U, __m256i __X, __m256d __Y)
#define _mm_mask_cmpeq_epi32_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi32_epi16(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu8_epi64(__m128i __V)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_rsqrt14_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi16(__m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_getexp_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi32_epi8(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a, __m128d __b)
Adds two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_rcp14_pd(__m128d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_broadcastss_ps(__m128 __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rolv_epi64(__m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpackhi_epi32(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_loadu_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epi32(__m256i __a, __m256i __b)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_scalef_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V)
Zero-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpacklo_epi64(__m256i __a, __m256i __b)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_compress_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mov_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_test_epi32_mask(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline __m128 __DEFAULT_FN_ATTRS _mm256_cvtpd_ps(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x float].
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_test_epi64_mask(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
#define __DEFAULT_FN_ATTRS256
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epu64(__m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu32_pd(__m128d __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastss_ps(__mmask8 __M, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mullo_epi32(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a, __m128d __b)
Performs an element-by-element division of two 128-bit vectors of [2 x double].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttps_epi32(__mmask8 __U, __m256 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a, __m128i __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them into...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_rsqrt14_ps(__m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_rcp14_ps(__m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi64_epi8(__m128i __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_unpacklo_pd(__m256d __a, __m256d __b)
Unpacks the even-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves the...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_div_ps(__m256 __a, __m256 __b)
Divides two 256-bit vectors of [8 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srav_epi32(__m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_max_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the greater of each pair of values...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi32_epi8(__m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a, __m128d __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them ...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_and_si256(__m256i __a, __m256i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi64_epi8(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_broadcastd_epi32(__m128i __X)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_cvtepi32_ps(__m256i __a)
Converts a vector of [8 x i32] into a vector of [8 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V)
Zero-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a, __m128i __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttps_epu32(__mmask8 __U, __m256 __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_test_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rolv_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_expand_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu32_ps(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A)
#define _mm256_mask_cmpeq_epi32_mask(k, A, B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_movedup_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rorv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_slli_epi64(__m256i __a, int __count)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_expand_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rorv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_getexp_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_load_epi64(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_mov_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_load_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_moveldup_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_ph(__mmask8 __U, __m256 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi32(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V)
Sign-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
#define _mm256_permutexvar_epi32(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_permutexvar_pd(__m256i __X, __m256d __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors.
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu32_epi64(__m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_epi32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_div_pd(__m256d __a, __m256d __b)
Divides two 256-bit vectors of [4 x double].
#define _mm256_permutexvar_ps(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epi64(__m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srav_epi32(__m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi16_epi32(__m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_max_epi64(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a, __m128i __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a, __m128i __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x i32] and interleaves them i...
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_testn_epi64_mask(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_load_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_ps(__m256 __W, __mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_load_ps(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rolv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_moveldup_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_movehdup_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt14_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_rsqrt14_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns the vec...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_compress_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi64_epi32(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srl_epi32(__m256i __a, __m128i __count)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_load_pd(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_compress_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_add_epi32(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rorv_epi32(__m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_scalef_ps(__m256 __A, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_mul_pd(__m256d __a, __m256d __b)
Multiplies two 256-bit vectors of [4 x double].
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mul_ps(__m128 __a, __m128 __b)
Multiplies two 128-bit vectors of [4 x float] and returns the results of the multiplication.
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_blend_epi32(__mmask8 __U, __m128i __A, __m128i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_broadcastq_epi64(__m128i __X)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_div_ps(__m128 __a, __m128 __b)
Divides two 128-bit vectors of [4 x float].
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_rcp14_pd(__mmask8 __U, __m128d __A)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sqrt_ps(__m256 __a)
Calculates the square roots of the values in a 256-bit vector of [8 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
#define _mm_cmpeq_epi32_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_epu32(__mmask8 __U, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_broadcastsd_pd(__m128d __X)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_blend_epi64(__mmask8 __U, __m256i __A, __m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi32_epi8(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_slli_epi32(__m256i __a, int __count)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a, __m128i __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the corresponding elements o...
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_rsqrt14_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
#define __DEFAULT_FN_ATTRS128
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_add_epi64(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a, __m128i __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them int...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rolv_epi32(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_xor_si256(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_scalef_pd(__m128d __A, __m128d __B)
#define _MM_FROUND_CUR_DIRECTION
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_rcp14_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_getexp_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_load_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i)
Initializes all values in a 128-bit vector of [4 x i32] with the specified 32-bit value...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_add_pd(__m256d __a, __m256d __b)
Adds two 256-bit vectors of [4 x double].
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastsd_pd(__mmask8 __M, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sub_epi32(__m256i __a, __m256i __b)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_movedup_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu8_epi32(__m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_epu32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epi32(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi8(__mmask8 __M, __m256i __A)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi32(int __i)
Constructs a 256-bit integer vector of [8 x i32], with each of the 32-bit integral vector elements se...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srli_epi32(__m256i __a, int __count)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_permutevar_ps(__m256 __a, __m256i __c)
Copies the values stored in a 256-bit vector of [8 x float] as specified by the 256-bit integer vecto...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epu32(__m256i __a, __m256i __b)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I, __m256d __B)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvttps_epi32(__m256 __a)
Converts a vector of [8 x float] into a vector of [8 x i32], truncating the result by rounding toward...
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_broadcast_i32x4(__m128i __A)
#define _mm_cmpneq_epi64_mask(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttps_epi32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt14_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rorv_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_mov_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_movedup_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtps_epu32(__m256 __A)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_ph(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_scalef_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_movehdup_ps(__m128 __a)
Moves and duplicates odd-indexed values from a 128-bit vector of [4 x float] to float values stored i...
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_movedup_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epu64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rolv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_testn_epi32_mask(__m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_test_epi64_mask(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi32_epi8(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttps_epu32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_ps(__mmask8 __U, __m128d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_getexp_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void)
Generates a 128-bit vector of [4 x i32] with unspecified content.
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sra_epi32(__m256i __a, __m128i __count)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_ps(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V)
Sign-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_testn_epi64_mask(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_compress_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_movedup_pd(__m128d __a)
Moves and duplicates the double-precision value in the lower bits of a 128-bit vector of [2 x double]...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_rcp14_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mov_epi64(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_test_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_epi32(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi32_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, __m128i __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V)
Sign-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_cvtepi32_pd(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x double].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi8_epi32(__m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtpd_epu32(__m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
static __inline __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_ph(__m128i __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sllv_epi32(__m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu32_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_getexp_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm_mask_cvtepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sllv_epi64(__m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
#define _mm256_cmpneq_epi32_mask(A, B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_load_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_unpacklo_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the two 256-bit vectors of [8 x float] ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srav_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutexvar_epi64(__m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvtps_epi32(__m256 __a)
Converts a vector of [8 x float] into a vector of [8 x i32].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_broadcastd_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a)
Converts a vector of [4 x float] into a vector of [4 x i32].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_load_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_rcp14_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_mov_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rolv_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvttpd_epu32(__m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_epu32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_movehdup_ps(__m256 __a)
Moves and duplicates odd-indexed values from a 256-bit vector of [8 x float] to float values in a 256...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_movehdup_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastq_epi64(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_cvtepu32_pd(__m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns the vec...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_blend_epi64(__mmask8 __U, __m128i __A, __m128i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a, __m128i __b)
Performs a bitwise exclusive OR of two 128-bit integer vectors.
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epi64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V)
Zero-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_getexp_ps(__mmask8 __U, __m128 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_scalef_ps(__m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_rcp14_pd(__m256d __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epu64(__m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_test_epi32_mask(__m128i __A, __m128i __B)
static __inline __m128d __DEFAULT_FN_ATTRS128 _mm_permutevar_pd(__m128d __a, __m128i __c)
Copies the values in a 128-bit vector of [2 x double] as specified by the 128-bit integer vector oper...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mov_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a, __m128i __b)
Performs a bitwise OR of two 128-bit integer vectors.
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_pd(__mmask8 __U, __m256i __X, __m256d __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_movehdup_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sra_epi64(__m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V)
Zero-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttpd_epu32(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_epi64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epu64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi32_epi16(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a, __m128d __b)
Multiplies two 128-bit vectors of [2 x double].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvttps_epu32(__m256 __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setzero_pd(void)
Constructs a 256-bit floating-point vector of [4 x double] with all vector elements initialized to ze...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi32_epi8(__m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_scalef_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttps_epu32(__m128 __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_max_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the greater of each pair of values...
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_load_ps(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_broadcastd_epi32(__m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rolv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi64(__mmask8 __U, __m256i __A)
#define _mm256_mask_cmpneq_epi32_mask(k, A, B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_abs_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_rcp14_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rorv_epi64(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_broadcastq_epi64(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_load_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_blend_pd(__mmask8 __U, __m256d __A, __m256d __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
#define _mm_mask_cmpneq_epi32_mask(k, A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttps_epi32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m256d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U, __m256d __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
#define _mm_cmpeq_epi64_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttpd_epi32(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_expand_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rorv_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi8(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_set1_epi64(__mmask8 __M, long long __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm256_cvtsepi32_epi8(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_rcp14_ps(__mmask8 __U, __m128 __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_cvtps_pd(__m128 __a)
Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4 x double].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors, using the one's complement of the values conta...
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_getexp_ps(__m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epu64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_min_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the lesser of each pair of values...
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_abs_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m128d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_expand_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi32_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi64_epi16(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_test_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_epi64(void *__P, __mmask8 __U, __m256i __A)