|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
38 #ifndef included_vector_sse2_h
39 #define included_vector_sse2_h
42 #include <x86intrin.h>
45 #define foreach_sse42_vec128i \
46 _(i,8,16,epi8) _(i,16,8,epi16) _(i,32,4,epi32) _(i,64,2,epi64x)
47 #define foreach_sse42_vec128u \
48 _(u,8,16,epi8) _(u,16,8,epi16) _(u,32,4,epi32) _(u,64,2,epi64x)
49 #define foreach_sse42_vec128f \
50 _(f,32,4,ps) _(f,64,2,pd)
54 #define _(t, s, c, i) \
55 static_always_inline t##s##x##c \
56 t##s##x##c##_splat (t##s x) \
57 { return (t##s##x##c) _mm_set1_##i (x); } \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_load_unaligned (void *p) \
61 { return (t##s##x##c) _mm_loadu_si128 (p); } \
63 static_always_inline void \
64 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
65 { _mm_storeu_si128 ((__m128i *) p, (__m128i) v); } \
67 static_always_inline int \
68 t##s##x##c##_is_all_zero (t##s##x##c x) \
69 { return _mm_testz_si128 ((__m128i) x, (__m128i) x); } \
71 static_always_inline int \
72 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
73 { return t##s##x##c##_is_all_zero (a ^ b); } \
75 static_always_inline int \
76 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
77 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \
83 #define _(t, s, c, i) \
84 static_always_inline t##s##x##c \
85 t##s##x##c##_min (t##s##x##c a, t##s##x##c b) \
86 { return (t##s##x##c) _mm_min_##i ((__m128i) a, (__m128i) b); } \
88 static_always_inline t##s##x##c \
89 t##s##x##c##_max (t##s##x##c a, t##s##x##c b) \
90 { return (t##s##x##c) _mm_max_##i ((__m128i) a, (__m128i) b); } \
92 _(
i,8,16,epi8) _(
i,16,8,epi16) _(
i,32,4,epi32) _(
i,64,2,epi64)
93 _(u,8,16,epu8) _(u,16,8,
epu16) _(u,32,4,epu32) _(u,64,2,
epu64)
97 #define CLIB_VEC128_SPLAT_DEFINED
98 #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
104 return (
u8x16) _mm_unpackhi_epi8 ((__m128i)
a, (__m128i)
b);
110 return (
u8x16) _mm_unpacklo_epi8 ((__m128i)
a, (__m128i)
b);
116 return (
u16x8) _mm_unpackhi_epi16 ((__m128i)
a, (__m128i)
b);
122 return (
u16x8) _mm_unpacklo_epi16 ((__m128i)
a, (__m128i)
b);
128 return (
u32x4) _mm_unpackhi_epi32 ((__m128i)
a, (__m128i)
b);
134 return (
u32x4) _mm_unpacklo_epi32 ((__m128i)
a, (__m128i)
b);
140 return (
u64x2) _mm_unpackhi_epi64 ((__m128i)
a, (__m128i)
b);
146 return (
u64x2) _mm_unpacklo_epi64 ((__m128i)
a, (__m128i)
b);
153 return (u8x8) _m_punpckhbw ((__m64)
a, (__m64)
b);
159 return (u8x8) _m_punpcklbw ((__m64)
a, (__m64)
b);
165 return (u16x4) _m_punpckhwd ((__m64)
a, (__m64)
b);
171 return (u16x4) _m_punpcklwd ((__m64)
a, (__m64)
b);
177 return (u32x2) _m_punpckhdq ((__m64)
a, (__m64)
b);
183 return (u32x2) _m_punpckldq ((__m64)
a, (__m64)
b);
187 #define _(f, t, fn) \
188 always_inline t t##_pack (f lo, f hi) \
190 return (t) fn ((__m128i) lo, (__m128i) hi); \
193 _ (
i16x8, i8x16, _mm_packs_epi16)
200 #define _signed_binop(n,m,f,g) \
202 always_inline u##n##x##m \
203 u##n##x##m##_##f (u##n##x##m x, u##n##x##m y) \
204 { return (u##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); } \
207 always_inline i##n##x##m \
208 i##n##x##m##_##f (i##n##x##m x, i##n##x##m y) \
209 { return (i##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); }
218 return (
i16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
224 return (
u16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
230 return (
i16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
236 return (
u16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
241 #define _(p,a,b,c,f) \
242 always_inline p##a##x##b p##a##x##b##_ishift_##c (p##a##x##b x, int i) \
243 { return (p##a##x##b) _mm_##f##i_epi##a ((__m128i) x, i); } \
245 always_inline p##a##x##b p##a##x##b##_shift_##c (p##a##x##b x, p##a##x##b y) \
246 { return (p##a##x##b) _mm_##f##_epi##a ((__m128i) x, (__m128i) y); }
248 _(u, 16, 8,
left, sll)
250 _(u, 64, 2,
left, sll)
252 _(u, 32, 4,
right, srl)
253 _(u, 64, 2,
right, srl)
254 _(
i, 16, 8,
left, sll)
255 _(
i, 32, 4,
left, sll)
256 _(
i, 64, 2,
left, sll) _(
i, 16, 8,
right, sra) _(
i, 32, 4,
right, sra)
260 u16x4_shift_left (u16x4 x, u16x4
i)
262 return (u16x4) _m_psllw ((__m64) x, (__m64)
i);
268 return (u32x2) _m_pslld ((__m64) x, (__m64)
i);
274 return (u16x4) _m_psrlw ((__m64) x, (__m64)
i);
280 return (u32x2) _m_psrld ((__m64) x, (__m64)
i);
286 return (i16x4) _m_psllw ((__m64) x, (__m64)
i);
292 return (i32x2) _m_pslld ((__m64) x, (__m64)
i);
298 return (i16x4) _m_psraw ((__m64) x, (__m64)
i);
304 return (i32x2) _m_psrad ((__m64) x, (__m64)
i);
307 #define u8x16_word_shift_left(a,n) (u8x16) _mm_slli_si128((__m128i) a, n)
308 #define u8x16_word_shift_right(a,n) (u8x16) _mm_srli_si128((__m128i) a, n)
310 #define i8x16_word_shift_left(a,n) \
311 ((i8x16) u8x16_word_shift_left((u8x16) (a), (n)))
312 #define i8x16_word_shift_right(a,n) \
313 ((i8x16) u8x16_word_shift_right((u8x16) (a), (n)))
315 #define u16x8_word_shift_left(a,n) \
316 ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16)))
317 #define i16x8_word_shift_left(a,n) \
318 ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16)))
319 #define u16x8_word_shift_right(a,n) \
320 ((u16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16)))
321 #define i16x8_word_shift_right(a,n) \
322 ((i16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16)))
324 #define u32x4_word_shift_left(a,n) \
325 ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32)))
326 #define i32x4_word_shift_left(a,n) \
327 ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32)))
328 #define u32x4_word_shift_right(a,n) \
329 ((u32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32)))
330 #define i32x4_word_shift_right(a,n) \
331 ((i32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32)))
333 #define u64x2_word_shift_left(a,n) \
334 ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64)))
335 #define i64x2_word_shift_left(a,n) \
336 ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64)))
337 #define u64x2_word_shift_right(a,n) \
338 ((u64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64)))
339 #define i64x2_word_shift_right(a,n) \
340 ((i64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64)))
343 #define _(t,n,lr1,lr2) \
344 always_inline t##x##n \
345 t##x##n##_irotate_##lr1 (t##x##n w, int i) \
347 ASSERT (i >= 0 && i <= BITS (t)); \
348 return (t##x##n##_ishift_##lr1 (w, i) \
349 | t##x##n##_ishift_##lr2 (w, BITS (t) - i)); \
352 always_inline t##x##n \
353 t##x##n##_rotate_##lr1 (t##x##n w, t##x##n i) \
355 t##x##n j = t##x##n##_splat (BITS (t)); \
356 return (t##x##n##_shift_##lr1 (w, i) \
357 | t##x##n##_shift_##lr2 (w, j - i)); \
370 #define _(t,n,lr1,lr2) \
371 always_inline t##x##n \
372 t##x##n##_word_rotate2_##lr1 (t##x##n w0, t##x##n w1, int i) \
374 int m = sizeof (t##x##n) / sizeof (t); \
375 ASSERT (i >= 0 && i < m); \
376 return (t##x##n##_word_shift_##lr1 (w0, i) \
377 | t##x##n##_word_shift_##lr2 (w1, m - i)); \
380 always_inline t##x##n \
381 t##x##n##_word_rotate_##lr1 (t##x##n w0, int i) \
382 { return t##x##n##_word_rotate2_##lr1 (w0, w0, i); }
396 #define u32x4_select(A,MASK) \
400 asm volatile ("pshufd %[mask], %[x], %[y]" \
402 : [x] "x" (_x), [mask] "i" (MASK)); \
406 #define u32x4_splat_word(x,i) \
407 u32x4_select ((x), (((i) << (2*0)) \
417 asm volatile (
"movd %[x], %[result]": [result]
"=r" (result)
426 asm volatile (
"movd %[x], %[result]": [result]
"=x" (result)
447 return _mm_movemask_epi8 ((__m128i) x);
488 return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
498 return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
507 return _mm_extract_epi16 ((__m128i) x, 0);
516 return _mm_extract_epi16 ((__m128i) x, 0);
519 #define u8x16_align_right(a, b, imm) \
520 (u8x16) _mm_alignr_epi8 ((__m128i) a, (__m128i) b, imm)
557 return _mm_movemask_epi8 ((__m128i) v);
563 return _mm_movemask_epi8 ((__m128i) v);
566 #define CLIB_HAVE_VEC128_MSB_MASK
574 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
576 return (
u32x4) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
583 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
585 return (
u16x8) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
592 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
594 return (
u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i)
mask);
600 return (
u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2);
614 return (
u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) m);
620 #if defined(__clang__) || !__OPTIMIZE__
624 return (
u32x4) _mm_shuffle_epi32 ((__m128i) v,
625 a |
b << 2 |
c << 4 | d << 6);
632 static_always_inline t \
634 { return (t) _mm_cvt##i ((__m128i) x); }
643 _(i8x16,
i16x8, epi8_epi16)
645 _(i8x16,
i64x2, epi8_epi64)
698 return (
u8x16) _mm_cmpgt_epi8 ((__m128i) v1, (__m128i) v2);
704 return (
u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i)
mask);
711 return (
u8x16) _mm_ternarylogic_epi32 ((__m128i)
a, (__m128i)
b,
static u32x4 u32x4_set0(u32 x)
#define u8x16_align_right(a, b, imm)
static_always_inline void u64x2_scatter_one(u64x2 r, int index, void *p)
static_always_inline u32 u32x4_min_scalar(u32x4 v)
static u64x2 u64x2_interleave_hi(u64x2 a, u64x2 b)
static u16x4 u16x4_interleave_hi(u16x4 a, u16x4 b)
#define foreach_sse42_vec128i
static u32 u32x4_zero_byte_mask(u32x4 x)
#define u8x16_word_shift_right(a, n)
static i16 i16x8_max_scalar(i16x8 x)
static u16x4 u16x4_interleave_lo(u16x4 a, u16x4 b)
static u8x8 u8x8_interleave_hi(u8x8 a, u8x8 b)
static i32x4 i32x4_set0(i32 x)
static_always_inline u32x4 u32x4_gather(void *p0, void *p1, void *p2, void *p3)
static_always_inline u16 u8x16_msb_mask(u8x16 v)
epu8_epi32 epu16_epi32 u64x2
static u32 u16x8_zero_byte_mask(u16x8 x)
static_always_inline void u32x4_scatter(u32x4 r, void *p0, void *p1, void *p2, void *p3)
epu8_epi32 epu16_epi32 epu32_epi64 epi8_epi32 epi16_epi32 epi32_epi64 static_always_inline u64x2 u64x2_gather(void *p0, void *p1)
vnet_hw_if_output_node_runtime_t * r
static_always_inline u32 __clib_unused u32x4_sum_elts(u32x4 sum4)
static u16x8 u16x8_interleave_lo(u16x8 a, u16x8 b)
static_always_inline u32x4 u32x4_shuffle(u32x4 v, const int a, const int b, const int c, const int d)
static u32x2 u32x2_interleave_lo(u32x2 a, u32x2 b)
static_always_inline u16x8 u16x8_byte_swap(u16x8 v)
static_always_inline u8x16 u8x16_xor3(u8x16 a, u8x16 b, u8x16 c)
static i16x4 i16x4_shift_right(i16x4 x, i16x4 i)
static_always_inline u32 u32x4_max_scalar(u32x4 v)
static u16x8 u16x8_interleave_hi(u16x8 a, u16x8 b)
static_always_inline u8x16 u8x16_blend(u8x16 v1, u8x16 v2, u8x16 mask)
static i16x8 i16x8_mul_hi(i16x8 x, i16x8 y)
static_always_inline void u64x2_scatter(u64x2 r, void *p0, void *p1)
static u32 u8x16_zero_byte_mask(u8x16 x)
static u32 u32x4_get0(u32x4 x)
#define static_always_inline
static u8x16 u8x16_interleave_hi(u8x16 a, u8x16 b)
static i16 i16x8_min_scalar(i16x8 x)
#define i16x8_word_shift_right(a, n)
epu8_epi32 epu16_epi32 epu32_epi64 epi8_epi32 epi16_epi32 i64x2
sll srl srl sll sra u16x4 i
epu8_epi32 epu16_epi32 epu32_epi64 i32x4
static i32x2 i32x2_shift_left(i32x2 x, i32x2 i)
static u8 u8x16_min_scalar(u8x16 x)
static u32x2 u32x2_shift_right(u32x2 x, u32x2 i)
static_always_inline u8x16 u8x16_shuffle(u8x16 v, u8x16 m)
static u32x2 u32x2_shift_left(u32x2 x, u32x2 i)
static_always_inline u8x16 u8x16_is_greater(u8x16 v1, u8x16 v2)
adds_epu static subs_epu i16x8 i16x8_mul_lo(i16x8 x, i16x8 y)
static u32 u8x16_compare_byte_mask(u8x16 x)
static_always_inline u8x16 u8x16_reflect(u8x16 v)
#define foreach_sse42_vec128u
static u32 u32x4_compare_word_mask(u32x4 x)
static u32x4 u32x4_interleave_hi(u32x4 a, u32x4 b)
static u16x4 u16x4_shift_right(u16x4 x, u16x4 i)
static_always_inline u16 i8x16_msb_mask(i8x16 v)
static_always_inline u32 i32x4_max_scalar(i32x4 v)
u8 u32x4_compare_word_mask_table[256]
static i16x4 i16x4_shift_left(i16x4 x, i16x4 i)
static i32 i32x4_get0(i32x4 x)
static u8x16 u8x16_interleave_lo(u8x16 a, u8x16 b)
static_always_inline void u32x4_scatter_one(u32x4 r, int index, void *p)
static i32x2 i32x2_shift_right(i32x2 x, i32x2 i)
static_always_inline u32 i32x4_min_scalar(i32x4 v)
static u16x8 u16x8_mul_lo(u16x8 x, u16x8 y)
static u32x2 u32x2_interleave_hi(u32x2 a, u32x2 b)
static u32 u8x16_max_scalar(u8x16 x)
static u16x8 u16x8_mul_hi(u16x8 x, u16x8 y)
static u8x8 u8x8_interleave_lo(u8x8 a, u8x8 b)
static_always_inline u32x4 u32x4_byte_swap(u32x4 v)
static u64x2 u64x2_interleave_lo(u64x2 a, u64x2 b)
static u32x4 u32x4_interleave_lo(u32x4 a, u32x4 b)
static_always_inline u32x4 u32x4_hadd(u32x4 v1, u32x4 v2)