11 #error "Never use <avx512vlvbmi2intrin.h> directly; include <immintrin.h> instead." 14 #ifndef __AVX512VLVBMI2INTRIN_H 15 #define __AVX512VLVBMI2INTRIN_H 18 #define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2"), __min_vector_width__(128))) 19 #define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2"), __min_vector_width__(256))) 24 return (__m128i) __builtin_ia32_compresshi128_mask ((
__v8hi) __D,
32 return (__m128i) __builtin_ia32_compresshi128_mask ((
__v8hi) __D,
40 return (__m128i) __builtin_ia32_compressqi128_mask ((
__v16qi) __D,
48 return (__m128i) __builtin_ia32_compressqi128_mask ((
__v16qi) __D,
56 __builtin_ia32_compressstorehi128_mask ((
__v8hi *) __P, (
__v8hi) __D,
63 __builtin_ia32_compressstoreqi128_mask ((
__v16qi *) __P, (
__v16qi) __D,
70 return (__m128i) __builtin_ia32_expandhi128_mask ((
__v8hi) __D,
78 return (__m128i) __builtin_ia32_expandhi128_mask ((
__v8hi) __D,
86 return (__m128i) __builtin_ia32_expandqi128_mask ((
__v16qi) __D,
94 return (__m128i) __builtin_ia32_expandqi128_mask ((
__v16qi) __D,
102 return (__m128i) __builtin_ia32_expandloadhi128_mask ((
const __v8hi *)__P,
110 return (__m128i) __builtin_ia32_expandloadhi128_mask ((
const __v8hi *)__P,
118 return (__m128i) __builtin_ia32_expandloadqi128_mask ((
const __v16qi *)__P,
126 return (__m128i) __builtin_ia32_expandloadqi128_mask ((
const __v16qi *)__P,
134 return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D,
142 return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D,
150 return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D,
158 return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D,
166 __builtin_ia32_compressstorehi256_mask ((__v16hi *) __P, (__v16hi) __D,
173 __builtin_ia32_compressstoreqi256_mask ((__v32qi *) __P, (__v32qi) __D,
180 return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D,
188 return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D,
196 return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D,
204 return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D,
212 return (__m256i) __builtin_ia32_expandloadhi256_mask ((
const __v16hi *)__P,
220 return (__m256i) __builtin_ia32_expandloadhi256_mask ((
const __v16hi *)__P,
228 return (__m256i) __builtin_ia32_expandloadqi256_mask ((
const __v32qi *)__P,
236 return (__m256i) __builtin_ia32_expandloadqi256_mask ((
const __v32qi *)__P,
241 #define _mm256_shldi_epi64(A, B, I) \ 242 (__m256i)__builtin_ia32_vpshldq256((__v4di)(__m256i)(A), \ 243 (__v4di)(__m256i)(B), (int)(I)) 245 #define _mm256_mask_shldi_epi64(S, U, A, B, I) \ 246 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 247 (__v4di)_mm256_shldi_epi64((A), (B), (I)), \ 248 (__v4di)(__m256i)(S)) 250 #define _mm256_maskz_shldi_epi64(U, A, B, I) \ 251 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 252 (__v4di)_mm256_shldi_epi64((A), (B), (I)), \ 253 (__v4di)_mm256_setzero_si256()) 255 #define _mm_shldi_epi64(A, B, I) \ 256 (__m128i)__builtin_ia32_vpshldq128((__v2di)(__m128i)(A), \ 257 (__v2di)(__m128i)(B), (int)(I)) 259 #define _mm_mask_shldi_epi64(S, U, A, B, I) \ 260 (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ 261 (__v2di)_mm_shldi_epi64((A), (B), (I)), \ 262 (__v2di)(__m128i)(S)) 264 #define _mm_maskz_shldi_epi64(U, A, B, I) \ 265 (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ 266 (__v2di)_mm_shldi_epi64((A), (B), (I)), \ 267 (__v2di)_mm_setzero_si128()) 269 #define _mm256_shldi_epi32(A, B, I) \ 270 (__m256i)__builtin_ia32_vpshldd256((__v8si)(__m256i)(A), \ 271 (__v8si)(__m256i)(B), (int)(I)) 273 #define _mm256_mask_shldi_epi32(S, U, A, B, I) \ 274 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 275 (__v8si)_mm256_shldi_epi32((A), (B), (I)), \ 276 (__v8si)(__m256i)(S)) 278 #define _mm256_maskz_shldi_epi32(U, A, B, I) \ 279 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 280 (__v8si)_mm256_shldi_epi32((A), (B), (I)), \ 281 (__v8si)_mm256_setzero_si256()) 283 #define _mm_shldi_epi32(A, B, I) \ 284 (__m128i)__builtin_ia32_vpshldd128((__v4si)(__m128i)(A), \ 285 (__v4si)(__m128i)(B), (int)(I)) 287 #define _mm_mask_shldi_epi32(S, U, A, B, I) \ 288 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 289 (__v4si)_mm_shldi_epi32((A), (B), (I)), \ 290 (__v4si)(__m128i)(S)) 292 #define _mm_maskz_shldi_epi32(U, A, B, I) \ 293 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 294 (__v4si)_mm_shldi_epi32((A), (B), (I)), \ 295 (__v4si)_mm_setzero_si128()) 297 #define _mm256_shldi_epi16(A, B, I) \ 298 (__m256i)__builtin_ia32_vpshldw256((__v16hi)(__m256i)(A), \ 299 (__v16hi)(__m256i)(B), (int)(I)) 301 #define _mm256_mask_shldi_epi16(S, U, A, B, I) \ 302 (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ 303 (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \ 304 (__v16hi)(__m256i)(S)) 306 #define _mm256_maskz_shldi_epi16(U, A, B, I) \ 307 (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ 308 (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \ 309 (__v16hi)_mm256_setzero_si256()) 311 #define _mm_shldi_epi16(A, B, I) \ 312 (__m128i)__builtin_ia32_vpshldw128((__v8hi)(__m128i)(A), \ 313 (__v8hi)(__m128i)(B), (int)(I)) 315 #define _mm_mask_shldi_epi16(S, U, A, B, I) \ 316 (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ 317 (__v8hi)_mm_shldi_epi16((A), (B), (I)), \ 318 (__v8hi)(__m128i)(S)) 320 #define _mm_maskz_shldi_epi16(U, A, B, I) \ 321 (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ 322 (__v8hi)_mm_shldi_epi16((A), (B), (I)), \ 323 (__v8hi)_mm_setzero_si128()) 325 #define _mm256_shrdi_epi64(A, B, I) \ 326 (__m256i)__builtin_ia32_vpshrdq256((__v4di)(__m256i)(A), \ 327 (__v4di)(__m256i)(B), (int)(I)) 329 #define _mm256_mask_shrdi_epi64(S, U, A, B, I) \ 330 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 331 (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \ 332 (__v4di)(__m256i)(S)) 334 #define _mm256_maskz_shrdi_epi64(U, A, B, I) \ 335 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 336 (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \ 337 (__v4di)_mm256_setzero_si256()) 339 #define _mm_shrdi_epi64(A, B, I) \ 340 (__m128i)__builtin_ia32_vpshrdq128((__v2di)(__m128i)(A), \ 341 (__v2di)(__m128i)(B), (int)(I)) 343 #define _mm_mask_shrdi_epi64(S, U, A, B, I) \ 344 (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ 345 (__v2di)_mm_shrdi_epi64((A), (B), (I)), \ 346 (__v2di)(__m128i)(S)) 348 #define _mm_maskz_shrdi_epi64(U, A, B, I) \ 349 (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ 350 (__v2di)_mm_shrdi_epi64((A), (B), (I)), \ 351 (__v2di)_mm_setzero_si128()) 353 #define _mm256_shrdi_epi32(A, B, I) \ 354 (__m256i)__builtin_ia32_vpshrdd256((__v8si)(__m256i)(A), \ 355 (__v8si)(__m256i)(B), (int)(I)) 357 #define _mm256_mask_shrdi_epi32(S, U, A, B, I) \ 358 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 359 (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \ 360 (__v8si)(__m256i)(S)) 362 #define _mm256_maskz_shrdi_epi32(U, A, B, I) \ 363 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 364 (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \ 365 (__v8si)_mm256_setzero_si256()) 367 #define _mm_shrdi_epi32(A, B, I) \ 368 (__m128i)__builtin_ia32_vpshrdd128((__v4si)(__m128i)(A), \ 369 (__v4si)(__m128i)(B), (int)(I)) 371 #define _mm_mask_shrdi_epi32(S, U, A, B, I) \ 372 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 373 (__v4si)_mm_shrdi_epi32((A), (B), (I)), \ 374 (__v4si)(__m128i)(S)) 376 #define _mm_maskz_shrdi_epi32(U, A, B, I) \ 377 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 378 (__v4si)_mm_shrdi_epi32((A), (B), (I)), \ 379 (__v4si)_mm_setzero_si128()) 381 #define _mm256_shrdi_epi16(A, B, I) \ 382 (__m256i)__builtin_ia32_vpshrdw256((__v16hi)(__m256i)(A), \ 383 (__v16hi)(__m256i)(B), (int)(I)) 385 #define _mm256_mask_shrdi_epi16(S, U, A, B, I) \ 386 (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ 387 (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \ 388 (__v16hi)(__m256i)(S)) 390 #define _mm256_maskz_shrdi_epi16(U, A, B, I) \ 391 (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ 392 (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \ 393 (__v16hi)_mm256_setzero_si256()) 395 #define _mm_shrdi_epi16(A, B, I) \ 396 (__m128i)__builtin_ia32_vpshrdw128((__v8hi)(__m128i)(A), \ 397 (__v8hi)(__m128i)(B), (int)(I)) 399 #define _mm_mask_shrdi_epi16(S, U, A, B, I) \ 400 (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ 401 (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \ 402 (__v8hi)(__m128i)(S)) 404 #define _mm_maskz_shrdi_epi16(U, A, B, I) \ 405 (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ 406 (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \ 407 (__v8hi)_mm_setzero_si128()) 412 return (__m256i)__builtin_ia32_vpshldvq256((__v4di)__A, (__v4di)__B,
419 return (__m256i)__builtin_ia32_selectq_256(__U,
427 return (__m256i)__builtin_ia32_selectq_256(__U,
435 return (__m128i)__builtin_ia32_vpshldvq128((
__v2di)__A, (
__v2di)__B,
442 return (__m128i)__builtin_ia32_selectq_128(__U,
450 return (__m128i)__builtin_ia32_selectq_128(__U,
458 return (__m256i)__builtin_ia32_vpshldvd256((__v8si)__A, (__v8si)__B,
465 return (__m256i)__builtin_ia32_selectd_256(__U,
473 return (__m256i)__builtin_ia32_selectd_256(__U,
481 return (__m128i)__builtin_ia32_vpshldvd128((
__v4si)__A, (
__v4si)__B,
488 return (__m128i)__builtin_ia32_selectd_128(__U,
496 return (__m128i)__builtin_ia32_selectd_128(__U,
504 return (__m256i)__builtin_ia32_vpshldvw256((__v16hi)__A, (__v16hi)__B,
511 return (__m256i)__builtin_ia32_selectw_256(__U,
519 return (__m256i)__builtin_ia32_selectw_256(__U,
527 return (__m128i)__builtin_ia32_vpshldvw128((
__v8hi)__A, (
__v8hi)__B,
534 return (__m128i)__builtin_ia32_selectw_128(__U,
542 return (__m128i)__builtin_ia32_selectw_128(__U,
550 return (__m256i)__builtin_ia32_vpshrdvq256((__v4di)__A, (__v4di)__B,
557 return (__m256i)__builtin_ia32_selectq_256(__U,
565 return (__m256i)__builtin_ia32_selectq_256(__U,
573 return (__m128i)__builtin_ia32_vpshrdvq128((
__v2di)__A, (
__v2di)__B,
580 return (__m128i)__builtin_ia32_selectq_128(__U,
588 return (__m128i)__builtin_ia32_selectq_128(__U,
596 return (__m256i)__builtin_ia32_vpshrdvd256((__v8si)__A, (__v8si)__B,
603 return (__m256i)__builtin_ia32_selectd_256(__U,
611 return (__m256i)__builtin_ia32_selectd_256(__U,
619 return (__m128i)__builtin_ia32_vpshrdvd128((
__v4si)__A, (
__v4si)__B,
626 return (__m128i)__builtin_ia32_selectd_128(__U,
634 return (__m128i)__builtin_ia32_selectd_128(__U,
642 return (__m256i)__builtin_ia32_vpshrdvw256((__v16hi)__A, (__v16hi)__B,
649 return (__m256i)__builtin_ia32_selectw_256(__U,
657 return (__m256i)__builtin_ia32_selectw_256(__U,
665 return (__m128i)__builtin_ia32_vpshrdvw128((
__v8hi)__A, (
__v8hi)__B,
672 return (__m128i)__builtin_ia32_selectw_128(__U,
680 return (__m128i)__builtin_ia32_selectw_128(__U,
686 #undef __DEFAULT_FN_ATTRS128 687 #undef __DEFAULT_FN_ATTRS256 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_shldv_epi16(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi8(__mmask16 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_shrdv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_shrdv_epi64(__m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_shldv_epi16(__m256i __A, __m256i __B, __m256i __C)
__vector signed char __v16qi
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_shldv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_shrdv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_shrdv_epi64(__m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_shldv_epi16(__m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi8(__mmask16 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_shrdv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi16(void *__P, __mmask16 __U, __m256i __D)
__inline __m128 __m64 const * __P
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi8(__m256i __S, __mmask32 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_shrdv_epi16(__m256i __A, __mmask16 __U, __m256i __B, __m256i __C)
static __inline__ unsigned char int __C
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi16(__m256i __S, __mmask16 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi16(__mmask16 __U, void const *__P)
#define __DEFAULT_FN_ATTRS256
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi16(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi16(__m256i __S, __mmask16 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi8(__mmask32 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_shldv_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi16(__mmask16 __U, __m256i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_shldv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_shrdv_epi32(__m128i __A, __m128i __B, __m128i __C)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_shldv_epi64(__m128i __A, __m128i __B, __m128i __C)
__inline void __m128d __A
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
__inline __m128d __m128d __B
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_shrdv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_shldv_epi32(__m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_shrdv_epi16(__m128i __A, __m128i __B, __m128i __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi8(void *__P, __mmask32 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_shldv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_shrdv_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi16(__m256i __S, __mmask16 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_shldv_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi8(__mmask16 __U, __m128i __D)
static __inline__ unsigned int unsigned char __D
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_shldv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi16(__mmask16 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_shldv_epi16(__m256i __A, __mmask16 __U, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_shrdv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_shrdv_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_shrdv_epi16(__m256i __A, __m256i __B, __m256i __C)
__vector long long __v2di
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi8(__m256i __S, __mmask32 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_shrdv_epi32(__m256i __A, __m256i __B, __m256i __C)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi8(__m256i __S, __mmask32 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_shldv_epi32(__m128i __A, __m128i __B, __m128i __C)
#define __DEFAULT_FN_ATTRS128
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_shrdv_epi16(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_shldv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi16(__mmask8 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi16(__mmask8 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_shldv_epi64(__m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi8(__mmask32 __U, __m256i __D)