16 #ifndef included_vector_avx512_h 17 #define included_vector_avx512_h 20 #include <x86intrin.h> 23 #define foreach_avx512_vec512i \ 24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64) 25 #define foreach_avx512_vec512u \ 26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64) 27 #define foreach_avx512_vec512f \ 28 _(f,32,8,ps) _(f,64,4,pd) 32 #define _(t, s, c, i) \ 33 static_always_inline t##s##x##c \ 34 t##s##x##c##_splat (t##s x) \ 35 { return (t##s##x##c) _mm512_set1_##i (x); } \ 37 static_always_inline t##s##x##c \ 38 t##s##x##c##_load_aligned (void *p) \ 39 { return (t##s##x##c) _mm512_load_si512 (p); } \ 41 static_always_inline void \ 42 t##s##x##c##_store_aligned (t##s##x##c v, void *p) \ 43 { _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \ 45 static_always_inline t##s##x##c \ 46 t##s##x##c##_load_unaligned (void *p) \ 47 { return (t##s##x##c) _mm512_loadu_si512 (p); } \ 49 static_always_inline void \ 50 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \ 51 { _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \ 53 static_always_inline int \ 54 t##s##x##c##_is_all_zero (t##s##x##c v) \ 55 { return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \ 57 static_always_inline int \ 58 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \ 59 { return t##s##x##c##_is_all_zero (a ^ b); } \ 61 static_always_inline int \ 62 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \ 63 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \ 65 static_always_inline u##c \ 66 t##s##x##c##_is_zero_mask (t##s##x##c v) \ 67 { return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \ 69 static_always_inline t##s##x##c \ 70 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \ 71 { return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \ 73 static_always_inline t##s##x##c \ 74 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \ 75 { return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \ 85 return (
u32) _mm512_movepi16_mask ((__m512i) v);
92 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
93 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
94 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
95 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
97 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
104 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
105 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
106 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
107 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
109 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
115 return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
121 return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
127 return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
133 return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
146 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
152 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
158 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
163 #define u32x16_ternary_logic(a, b, c, d) \ 164 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d) 166 #define u8x64_insert_u8x16(a, b, n) \ 167 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n) 169 #define u8x64_extract_u8x16(a, n) \ 170 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n) 172 #define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n) 173 #define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n) 178 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
185 static const u8x64
mask = {
186 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
187 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
188 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
189 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
191 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
197 return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
200 #define u8x64_align_right(a, b, imm) \ 201 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm) 210 return sum8[0] + sum8[4];
216 return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
222 _mm512_mask_storeu_epi8 (p, mask, (__m512i) a);
228 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
234 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
240 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
246 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
252 return _mm512_cmpeq_epu64_mask ((__m512i) a, (__m512i) b);
258 __m512i r[16],
a,
b,
c, d, x, y;
261 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
262 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
263 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
264 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
267 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
268 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
269 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
270 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
271 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
272 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
273 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
274 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
276 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
277 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
278 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
279 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
280 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
281 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
282 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
283 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
285 a = _mm512_unpacklo_epi64 (r[0], r[1]);
286 b = _mm512_unpacklo_epi64 (r[2], r[3]);
287 c = _mm512_unpacklo_epi64 (r[4], r[5]);
288 d = _mm512_unpacklo_epi64 (r[6], r[7]);
289 x = _mm512_permutex2var_epi64 (a, pm1, b);
290 y = _mm512_permutex2var_epi64 (c, pm1, d);
291 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
292 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
293 x = _mm512_permutex2var_epi64 (a, pm2, b);
294 y = _mm512_permutex2var_epi64 (c, pm2, d);
295 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
296 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
298 a = _mm512_unpacklo_epi64 (r[8], r[9]);
299 b = _mm512_unpacklo_epi64 (r[10], r[11]);
300 c = _mm512_unpacklo_epi64 (r[12], r[13]);
301 d = _mm512_unpacklo_epi64 (r[14], r[15]);
302 x = _mm512_permutex2var_epi64 (a, pm1, b);
303 y = _mm512_permutex2var_epi64 (c, pm1, d);
304 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
305 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
306 x = _mm512_permutex2var_epi64 (a, pm2, b);
307 y = _mm512_permutex2var_epi64 (c, pm2, d);
308 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
309 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
311 a = _mm512_unpackhi_epi64 (r[0], r[1]);
312 b = _mm512_unpackhi_epi64 (r[2], r[3]);
313 c = _mm512_unpackhi_epi64 (r[4], r[5]);
314 d = _mm512_unpackhi_epi64 (r[6], r[7]);
315 x = _mm512_permutex2var_epi64 (a, pm1, b);
316 y = _mm512_permutex2var_epi64 (c, pm1, d);
317 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
318 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
319 x = _mm512_permutex2var_epi64 (a, pm2, b);
320 y = _mm512_permutex2var_epi64 (c, pm2, d);
321 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
322 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
324 a = _mm512_unpackhi_epi64 (r[8], r[9]);
325 b = _mm512_unpackhi_epi64 (r[10], r[11]);
326 c = _mm512_unpackhi_epi64 (r[12], r[13]);
327 d = _mm512_unpackhi_epi64 (r[14], r[15]);
328 x = _mm512_permutex2var_epi64 (a, pm1, b);
329 y = _mm512_permutex2var_epi64 (c, pm1, d);
330 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
331 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
332 x = _mm512_permutex2var_epi64 (a, pm2, b);
333 y = _mm512_permutex2var_epi64 (c, pm2, d);
334 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
335 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
346 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
347 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
348 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
349 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
352 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
353 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
354 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
355 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
356 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
357 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
358 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
359 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
361 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
362 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
363 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
364 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
365 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
366 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
367 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
368 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
370 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
371 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
372 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
373 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
374 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
375 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
376 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
377 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
static_always_inline u64x8 u64x8_permute(u64x8 a, u64x8 b, u64x8 mask)
static_always_inline void u64x8_transpose(u64x8 m[8])
static_always_inline u8x64 u8x64_reflect_u8x16(u8x64 x)
static_always_inline u8x64 u8x64_mask_load(u8x64 a, void *p, u64 mask)
static_always_inline u32 u32x8_min_scalar(u32x8 v)
static_always_inline u8x64 u8x64_shuffle(u8x64 v, u8x64 m)
static_always_inline void u32x16_transpose(u32x16 m[16])
static_always_inline u32x8 u32x16_extract_hi(u32x16 v)
static_always_inline u8x32 u8x64_extract_hi(u8x64 v)
static_always_inline u32x16 u32x16_insert_lo(u32x16 r, u32x8 v)
static_always_inline u8x64 u8x64_splat_u8x16(u8x16 a)
static_always_inline u32x16 u32x16_splat_u32x4(u32x4 a)
#define static_always_inline
#define foreach_avx512_vec512i
static_always_inline u8x64 u8x64_xor3(u8x64 a, u8x64 b, u8x64 c)
static_always_inline u16x32 u16x32_byte_swap(u16x32 v)
#define u8x64_align_right(a, b, imm)
static_always_inline u32x16 u32x16_byte_swap(u32x16 v)
static_always_inline u32x8 u32x16_extract_lo(u32x16 v)
static_always_inline u32x16 u32x16_mask_blend(u32x16 a, u32x16 b, u16 mask)
static_always_inline u32 u32x16_sum_elts(u32x16 sum16)
static_always_inline u8 u64x8_mask_is_equal(u64x8 a, u64x8 b)
static_always_inline u8x32 u8x64_extract_lo(u8x64 v)
static_always_inline u32 u32x16_min_scalar(u32x16 v)
static_always_inline u32x8 u32x8_min(u32x8 a, u32x8 b)
foreach_avx512_vec512i foreach_avx512_vec512u static_always_inline u32 u16x32_msb_mask(u16x32 v)
static_always_inline u32x16 u32x16_insert_hi(u32x16 r, u32x8 v)
static_always_inline u8x64 u8x64_mask_blend(u8x64 a, u8x64 b, u64 mask)
static_always_inline void u8x64_mask_store(u8x64 a, void *p, u64 mask)
#define foreach_avx512_vec512u