FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
vector_sse42.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  Copyright (c) 2005 Eliot Dresselhaus
17 
18  Permission is hereby granted, free of charge, to any person obtaining
19  a copy of this software and associated documentation files (the
20  "Software"), to deal in the Software without restriction, including
21  without limitation the rights to use, copy, modify, merge, publish,
22  distribute, sublicense, and/or sell copies of the Software, and to
23  permit persons to whom the Software is furnished to do so, subject to
24  the following conditions:
25 
26  The above copyright notice and this permission notice shall be
27  included in all copies or substantial portions of the Software.
28 
29  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36 */
37 
38 #ifndef included_vector_sse2_h
39 #define included_vector_sse2_h
40 
41 #include <vppinfra/error_bootstrap.h> /* for ASSERT */
42 #include <x86intrin.h>
43 
44 /* *INDENT-OFF* */
45 #define foreach_sse42_vec128i \
46  _(i,8,16,epi8) _(i,16,8,epi16) _(i,32,4,epi32) _(i,64,2,epi64x)
47 #define foreach_sse42_vec128u \
48  _(u,8,16,epi8) _(u,16,8,epi16) _(u,32,4,epi32) _(u,64,2,epi64x)
49 #define foreach_sse42_vec128f \
50  _(f,32,4,ps) _(f,64,2,pd)
51 
52 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
53  is_all_equal */
54 #define _(t, s, c, i) \
55 static_always_inline t##s##x##c \
56 t##s##x##c##_splat (t##s x) \
57 { return (t##s##x##c) _mm_set1_##i (x); } \
58 \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_load_unaligned (void *p) \
61 { return (t##s##x##c) _mm_loadu_si128 (p); } \
62 \
63 static_always_inline void \
64 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
65 { _mm_storeu_si128 ((__m128i *) p, (__m128i) v); } \
66 \
67 static_always_inline int \
68 t##s##x##c##_is_all_zero (t##s##x##c x) \
69 { return _mm_testz_si128 ((__m128i) x, (__m128i) x); } \
70 \
71 static_always_inline int \
72 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
73 { return t##s##x##c##_is_all_zero (a ^ b); } \
74 \
75 static_always_inline int \
76 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
77 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \
78 
80 #undef _
81 
82 /* min, max */
83 #define _(t, s, c, i) \
84 static_always_inline t##s##x##c \
85 t##s##x##c##_min (t##s##x##c a, t##s##x##c b) \
86 { return (t##s##x##c) _mm_min_##i ((__m128i) a, (__m128i) b); } \
87 \
88 static_always_inline t##s##x##c \
89 t##s##x##c##_max (t##s##x##c a, t##s##x##c b) \
90 { return (t##s##x##c) _mm_max_##i ((__m128i) a, (__m128i) b); } \
91 
92 _(i,8,16,epi8) _(i,16,8,epi16) _(i,32,4,epi32) _(i,64,2,epi64)
93 _(u,8,16,epu8) _(u,16,8,epu16) _(u,32,4,epu32) _(u,64,2,epu64)
94 #undef _
95 /* *INDENT-ON* */
96 
97 #define CLIB_VEC128_SPLAT_DEFINED
98 #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
99 
100 /* 128 bit interleaves. */
103 {
104  return (u8x16) _mm_unpackhi_epi8 ((__m128i) a, (__m128i) b);
105 }
106 
109 {
110  return (u8x16) _mm_unpacklo_epi8 ((__m128i) a, (__m128i) b);
111 }
112 
115 {
116  return (u16x8) _mm_unpackhi_epi16 ((__m128i) a, (__m128i) b);
117 }
118 
121 {
122  return (u16x8) _mm_unpacklo_epi16 ((__m128i) a, (__m128i) b);
123 }
124 
127 {
128  return (u32x4) _mm_unpackhi_epi32 ((__m128i) a, (__m128i) b);
129 }
130 
133 {
134  return (u32x4) _mm_unpacklo_epi32 ((__m128i) a, (__m128i) b);
135 }
136 
139 {
140  return (u64x2) _mm_unpackhi_epi64 ((__m128i) a, (__m128i) b);
141 }
142 
145 {
146  return (u64x2) _mm_unpacklo_epi64 ((__m128i) a, (__m128i) b);
147 }
148 
149 /* 128 bit packs. */
150 #define _(f, t, fn) \
151  always_inline t t##_pack (f lo, f hi) \
152  { \
153  return (t) fn ((__m128i) lo, (__m128i) hi); \
154  }
155 
156 _ (i16x8, i8x16, _mm_packs_epi16)
157 _ (i16x8, u8x16, _mm_packus_epi16)
158 _ (i32x4, i16x8, _mm_packs_epi32)
159 _ (i32x4, u16x8, _mm_packus_epi32)
160 
161 #undef _
162 
163 #define _signed_binop(n,m,f,g) \
164  /* Unsigned */ \
165  always_inline u##n##x##m \
166  u##n##x##m##_##f (u##n##x##m x, u##n##x##m y) \
167  { return (u##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); } \
168  \
169  /* Signed */ \
170  always_inline i##n##x##m \
171  i##n##x##m##_##f (i##n##x##m x, i##n##x##m y) \
172  { return (i##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); }
173 /* Addition/subtraction with saturation. */
174 _signed_binop (8, 16, add_saturate, adds_epu)
175 _signed_binop (16, 8, add_saturate, adds_epu)
176 _signed_binop (8, 16, sub_saturate, subs_epu)
177 _signed_binop (16, 8, sub_saturate, subs_epu)
178 /* Multiplication. */
180 {
181  return (i16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
182 }
183 
186 {
187  return (u16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
188 }
189 
192 {
193  return (i16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
194 }
195 
198 {
199  return (u16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
200 }
201 
202 /* 128 bit shifts. */
203 
204 #define _(p,a,b,c,f) \
205  always_inline p##a##x##b p##a##x##b##_ishift_##c (p##a##x##b x, int i) \
206  { return (p##a##x##b) _mm_##f##i_epi##a ((__m128i) x, i); } \
207  \
208  always_inline p##a##x##b p##a##x##b##_shift_##c (p##a##x##b x, p##a##x##b y) \
209  { return (p##a##x##b) _mm_##f##_epi##a ((__m128i) x, (__m128i) y); }
210 
211 _(u, 16, 8, left, sll)
212 _(u, 32, 4, left, sll)
213 _(u, 64, 2, left, sll)
214 _(u, 16, 8, right, srl)
215 _(u, 32, 4, right, srl)
216 _(u, 64, 2, right, srl)
217 _(i, 16, 8, left, sll)
218 _(i, 32, 4, left, sll)
219 _(i, 64, 2, left, sll) _(i, 16, 8, right, sra) _(i, 32, 4, right, sra)
220 #undef _
221 
222 #define u8x16_word_shift_left(a,n) (u8x16) _mm_slli_si128((__m128i) a, n)
223 #define u8x16_word_shift_right(a,n) (u8x16) _mm_srli_si128((__m128i) a, n)
224 
225 #define i8x16_word_shift_left(a,n) \
226  ((i8x16) u8x16_word_shift_left((u8x16) (a), (n)))
227 #define i8x16_word_shift_right(a,n) \
228  ((i8x16) u8x16_word_shift_right((u8x16) (a), (n)))
229 
230 #define u16x8_word_shift_left(a,n) \
231  ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16)))
232 #define i16x8_word_shift_left(a,n) \
233  ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16)))
234 #define u16x8_word_shift_right(a,n) \
235  ((u16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16)))
236 #define i16x8_word_shift_right(a,n) \
237  ((i16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16)))
238 
239 #define u32x4_word_shift_left(a,n) \
240  ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32)))
241 #define i32x4_word_shift_left(a,n) \
242  ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32)))
243 #define u32x4_word_shift_right(a,n) \
244  ((u32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32)))
245 #define i32x4_word_shift_right(a,n) \
246  ((i32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32)))
247 
248 #define u64x2_word_shift_left(a,n) \
249  ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64)))
250 #define i64x2_word_shift_left(a,n) \
251  ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64)))
252 #define u64x2_word_shift_right(a,n) \
253  ((u64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64)))
254 #define i64x2_word_shift_right(a,n) \
255  ((i64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64)))
256 
257 /* SSE2 has no rotate instructions: use shifts to simulate them. */
258 #define _(t,n,lr1,lr2) \
259  always_inline t##x##n \
260  t##x##n##_irotate_##lr1 (t##x##n w, int i) \
261  { \
262  ASSERT (i >= 0 && i <= BITS (t)); \
263  return (t##x##n##_ishift_##lr1 (w, i) \
264  | t##x##n##_ishift_##lr2 (w, BITS (t) - i)); \
265  } \
266  \
267  always_inline t##x##n \
268  t##x##n##_rotate_##lr1 (t##x##n w, t##x##n i) \
269  { \
270  t##x##n j = t##x##n##_splat (BITS (t)); \
271  return (t##x##n##_shift_##lr1 (w, i) \
272  | t##x##n##_shift_##lr2 (w, j - i)); \
273  }
274 
275 _(u16, 8, left, right);
276 _(u16, 8, right, left);
277 _(u32, 4, left, right);
278 _(u32, 4, right, left);
279 _(u64, 2, left, right);
280 _(u64, 2, right, left);
281 
282 #undef _
283 
286 {
287  x = u8x16_max (x, u8x16_word_shift_right (x, 8));
288  x = u8x16_max (x, u8x16_word_shift_right (x, 4));
289  x = u8x16_max (x, u8x16_word_shift_right (x, 2));
290  x = u8x16_max (x, u8x16_word_shift_right (x, 1));
291  return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
292 }
293 
296 {
297  x = u8x16_min (x, u8x16_word_shift_right (x, 8));
298  x = u8x16_min (x, u8x16_word_shift_right (x, 4));
299  x = u8x16_min (x, u8x16_word_shift_right (x, 2));
300  x = u8x16_min (x, u8x16_word_shift_right (x, 1));
301  return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
302 }
303 
306 {
307  x = i16x8_max (x, i16x8_word_shift_right (x, 4));
308  x = i16x8_max (x, i16x8_word_shift_right (x, 2));
309  x = i16x8_max (x, i16x8_word_shift_right (x, 1));
310  return _mm_extract_epi16 ((__m128i) x, 0);
311 }
312 
315 {
316  x = i16x8_min (x, i16x8_word_shift_right (x, 4));
317  x = i16x8_min (x, i16x8_word_shift_right (x, 2));
318  x = i16x8_min (x, i16x8_word_shift_right (x, 1));
319  return _mm_extract_epi16 ((__m128i) x, 0);
320 }
321 
322 #define u8x16_align_right(a, b, imm) \
323  (u8x16) _mm_alignr_epi8 ((__m128i) a, (__m128i) b, imm)
324 
327 {
328  v = u32x4_min (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
329  v = u32x4_min (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
330  return v[0];
331 }
332 
335 {
336  v = u32x4_max (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
337  v = u32x4_max (v, (u32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
338  return v[0];
339 }
340 
343 {
344  v = i32x4_min (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
345  v = i32x4_min (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
346  return v[0];
347 }
348 
351 {
352  v = i32x4_max (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 8));
353  v = i32x4_max (v, (i32x4) u8x16_align_right ((u8x16) v, (u8x16) v, 4));
354  return v[0];
355 }
356 
359 {
360  return _mm_movemask_epi8 ((__m128i) v);
361 }
362 
364 i8x16_msb_mask (i8x16 v)
365 {
366  return _mm_movemask_epi8 ((__m128i) v);
367 }
368 
369 #define CLIB_HAVE_VEC128_MSB_MASK
370 
371 #undef _signed_binop
372 
375 {
376  u8x16 swap = {
377  3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
378  };
379  return (u32x4) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
380 }
381 
384 {
385  u8x16 swap = {
386  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
387  };
388  return (u16x8) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
389 }
390 
393 {
394  u8x16 mask = {
395  15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
396  };
397  return (u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) mask);
398 }
399 
402 {
403  return (u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2);
404 }
405 
406 static_always_inline u32 __clib_unused
408 {
409  sum4 += (u32x4) u8x16_align_right (sum4, sum4, 8);
410  sum4 += (u32x4) u8x16_align_right (sum4, sum4, 4);
411  return sum4[0];
412 }
413 
416 {
417  return (u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) m);
418 }
419 
421 u32x4_shuffle (u32x4 v, const int a, const int b, const int c, const int d)
422 {
423 #if defined(__clang__) || !__OPTIMIZE__
424  u32x4 r = { v[a], v[b], v[c], v[d] };
425  return r;
426 #else
427  return (u32x4) _mm_shuffle_epi32 ((__m128i) v,
428  a | b << 2 | c << 4 | d << 6);
429 #endif
430 }
431 
432 /* _from_ */
433 /* *INDENT-OFF* */
434 #define _(f,t,i) \
435 static_always_inline t \
436 t##_from_##f (f x) \
437 { return (t) _mm_cvt##i ((__m128i) x); }
438 
439 _(u8x16, u16x8, epu8_epi16)
440 _(u8x16, u32x4, epu8_epi32)
441 _(u8x16, u64x2, epu8_epi64)
442 _(u16x8, u32x4, epu16_epi32)
443 _(u16x8, u64x2, epu16_epi64)
444 _(u32x4, u64x2, epu32_epi64)
445 
446 _(i8x16, i16x8, epi8_epi16)
447 _(i8x16, i32x4, epi8_epi32)
448 _(i8x16, i64x2, epi8_epi64)
449 _(i16x8, i32x4, epi16_epi32)
450 _(i16x8, i64x2, epi16_epi64)
451 _(i32x4, i64x2, epi32_epi64)
452 #undef _
453 /* *INDENT-ON* */
454 
456 u64x2_gather (void *p0, void *p1)
457 {
458  u64x2 r = { *(u64 *) p0, *(u64 *) p1 };
459  return r;
460 }
461 
463 u32x4_gather (void *p0, void *p1, void *p2, void *p3)
464 {
465  u32x4 r = { *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3 };
466  return r;
467 }
468 
469 
471 u64x2_scatter (u64x2 r, void *p0, void *p1)
472 {
473  *(u64 *) p0 = r[0];
474  *(u64 *) p1 = r[1];
475 }
476 
478 u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
479 {
480  *(u32 *) p0 = r[0];
481  *(u32 *) p1 = r[1];
482  *(u32 *) p2 = r[2];
483  *(u32 *) p3 = r[3];
484 }
485 
488 {
489  *(u64 *) p = r[index];
490 }
491 
494 {
495  *(u32 *) p = r[index];
496 }
497 
500 {
501  return (u8x16) _mm_cmpgt_epi8 ((__m128i) v1, (__m128i) v2);
502 }
503 
506 {
507  return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask);
508 }
509 
512 {
513 #if __AVX512F__
514  return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
515  (__m128i) c, 0x96);
516 #endif
517  return a ^ b ^ c;
518 }
519 
520 #endif /* included_vector_sse2_h */
521 
522 /*
523  * fd.io coding-style-patch-verification: ON
524  *
525  * Local Variables:
526  * eval: (c-set-style "gnu")
527  * End:
528  */
u8x16_align_right
#define u8x16_align_right(a, b, imm)
Definition: vector_sse42.h:322
u32x4
u32x4
Definition: vector_sse42.h:440
u64x2_scatter_one
static_always_inline void u64x2_scatter_one(u64x2 r, int index, void *p)
Definition: vector_sse42.h:487
u32x4_min_scalar
static_always_inline u32 u32x4_min_scalar(u32x4 v)
Definition: vector_sse42.h:326
sub_saturate
adds_epu sub_saturate
Definition: vector_sse42.h:177
u64x2_interleave_hi
static u64x2 u64x2_interleave_hi(u64x2 a, u64x2 b)
Definition: vector_sse42.h:138
foreach_sse42_vec128i
#define foreach_sse42_vec128i
Definition: vector_sse42.h:45
right
sll right
Definition: vector_sse42.h:214
u8x16_word_shift_right
#define u8x16_word_shift_right(a, n)
i16x8_max_scalar
static i16 i16x8_max_scalar(i16x8 x)
Definition: vector_sse42.h:305
u16
unsigned short u16
Definition: types.h:57
u32x4_gather
static_always_inline u32x4 u32x4_gather(void *p0, void *p1, void *p2, void *p3)
Definition: vector_sse42.h:463
u8x16_msb_mask
static_always_inline u16 u8x16_msb_mask(u8x16 v)
Definition: vector_sse42.h:358
u64x2
epu8_epi32 epu16_epi32 u64x2
Definition: vector_sse42.h:444
error_bootstrap.h
u32x4_scatter
static_always_inline void u32x4_scatter(u32x4 r, void *p0, void *p1, void *p2, void *p3)
Definition: vector_sse42.h:478
u64x2_gather
epu8_epi32 epu16_epi32 epu32_epi64 epi8_epi32 epi16_epi32 epi32_epi64 static_always_inline u64x2 u64x2_gather(void *p0, void *p1)
Definition: vector_sse42.h:456
r
vnet_hw_if_output_node_runtime_t * r
Definition: interface_output.c:1089
u32x4_sum_elts
static_always_inline u32 __clib_unused u32x4_sum_elts(u32x4 sum4)
Definition: vector_sse42.h:407
u16x8_interleave_lo
static u16x8 u16x8_interleave_lo(u16x8 a, u16x8 b)
Definition: vector_sse42.h:120
u32x4_shuffle
static_always_inline u32x4 u32x4_shuffle(u32x4 v, const int a, const int b, const int c, const int d)
Definition: vector_sse42.h:421
u16x8_byte_swap
static_always_inline u16x8 u16x8_byte_swap(u16x8 v)
Definition: vector_sse42.h:383
u8x16_xor3
static_always_inline u8x16 u8x16_xor3(u8x16 a, u8x16 b, u8x16 c)
Definition: vector_sse42.h:511
u8x16
u8x16
Definition: vector_sse42.h:157
u32x4_max_scalar
static_always_inline u32 u32x4_max_scalar(u32x4 v)
Definition: vector_sse42.h:334
u16x8_interleave_hi
static u16x8 u16x8_interleave_hi(u16x8 a, u16x8 b)
Definition: vector_sse42.h:114
i16
signed short i16
Definition: types.h:46
u8x16_blend
static_always_inline u8x16 u8x16_blend(u8x16 v1, u8x16 v2, u8x16 mask)
Definition: vector_sse42.h:505
left
left
Definition: vector_sse42.h:212
i16x8_mul_hi
static i16x8 i16x8_mul_hi(i16x8 x, i16x8 y)
Definition: vector_sse42.h:191
u64x2_scatter
static_always_inline void u64x2_scatter(u64x2 r, void *p0, void *p1)
Definition: vector_sse42.h:471
epu16
epu16
Definition: vector_avx512.h:286
c
svmdb_client_t * c
Definition: vpp_get_metrics.c:48
static_always_inline
#define static_always_inline
Definition: clib.h:112
u8x16_interleave_hi
static u8x16 u8x16_interleave_hi(u8x16 a, u8x16 b)
Definition: vector_sse42.h:102
i16x8_min_scalar
static i16 i16x8_min_scalar(i16x8 x)
Definition: vector_sse42.h:314
i16x8_word_shift_right
#define i16x8_word_shift_right(a, n)
i64x2
epu8_epi32 epu16_epi32 epu32_epi64 epi8_epi32 epi16_epi32 i64x2
Definition: vector_sse42.h:451
i32x4
epu8_epi32 epu16_epi32 epu32_epi64 i32x4
Definition: vector_sse42.h:447
mask
vl_api_pnat_mask_t mask
Definition: pnat.api:45
u8x16_min_scalar
static u8 u8x16_min_scalar(u8x16 x)
Definition: vector_sse42.h:295
u8x16_shuffle
static_always_inline u8x16 u8x16_shuffle(u8x16 v, u8x16 m)
Definition: vector_sse42.h:415
u8x16_is_greater
static_always_inline u8x16 u8x16_is_greater(u8x16 v1, u8x16 v2)
Definition: vector_sse42.h:499
i16x8_mul_lo
adds_epu static subs_epu i16x8 i16x8_mul_lo(i16x8 x, i16x8 y)
Definition: vector_sse42.h:179
u8x16_reflect
static_always_inline u8x16 u8x16_reflect(u8x16 v)
Definition: vector_sse42.h:392
foreach_sse42_vec128u
#define foreach_sse42_vec128u
Definition: vector_sse42.h:47
u32x4_interleave_hi
static u32x4 u32x4_interleave_hi(u32x4 a, u32x4 b)
Definition: vector_sse42.h:126
index
u32 index
Definition: flow_types.api:221
always_inline
#define always_inline
Definition: rdma_mlx5dv.h:23
u16x8
_mm_packus_epi16 u16x8
Definition: vector_sse42.h:159
u64
unsigned long u64
Definition: types.h:89
add_saturate
add_saturate
Definition: vector_sse42.h:175
u32
unsigned int u32
Definition: types.h:88
i8x16_msb_mask
static_always_inline u16 i8x16_msb_mask(i8x16 v)
Definition: vector_sse42.h:364
i32x4_max_scalar
static_always_inline u32 i32x4_max_scalar(i32x4 v)
Definition: vector_sse42.h:350
u32x4
unsigned long long u32x4
Definition: ixge.c:28
epu64
__m128i epu64
Definition: vector_avx512.h:288
u8x16_interleave_lo
static u8x16 u8x16_interleave_lo(u8x16 a, u8x16 b)
Definition: vector_sse42.h:108
i16x8
vmrglw i16x8
Definition: vector_altivec.h:89
u32x4_scatter_one
static_always_inline void u32x4_scatter_one(u32x4 r, int index, void *p)
Definition: vector_sse42.h:493
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
u8
unsigned char u8
Definition: types.h:56
a
a
Definition: bitmap.h:525
i32x4_min_scalar
static_always_inline u32 i32x4_min_scalar(i32x4 v)
Definition: vector_sse42.h:342
i
int i
Definition: flowhash_template.h:376
u16x8_mul_lo
static u16x8 u16x8_mul_lo(u16x8 x, u16x8 y)
Definition: vector_sse42.h:185
u8x16_max_scalar
static u32 u8x16_max_scalar(u8x16 x)
Definition: vector_sse42.h:285
u16x8_mul_hi
static u16x8 u16x8_mul_hi(u16x8 x, u16x8 y)
Definition: vector_sse42.h:197
u32x4_byte_swap
static_always_inline u32x4 u32x4_byte_swap(u32x4 v)
Definition: vector_sse42.h:374
u64x2_interleave_lo
static u64x2 u64x2_interleave_lo(u64x2 a, u64x2 b)
Definition: vector_sse42.h:144
u32x4_interleave_lo
static u32x4 u32x4_interleave_lo(u32x4 a, u32x4 b)
Definition: vector_sse42.h:132
u32x4_hadd
static_always_inline u32x4 u32x4_hadd(u32x4 v1, u32x4 v2)
Definition: vector_sse42.h:401