FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
vector_avx2.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef included_vector_avx2_h
17 #define included_vector_avx2_h
18 
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
21 
22 /* *INDENT-OFF* */
23 #define foreach_avx2_vec256i \
24  _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64)
25 #define foreach_avx2_vec256u \
26  _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64)
27 #define foreach_avx2_vec256f \
28  _(f,32,8,ps) _(f,64,4,pd)
29 
30 #define _mm256_set1_epi64 _mm256_set1_epi64x
31 
32 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
33  is_all_equal */
34 #define _(t, s, c, i) \
35 static_always_inline t##s##x##c \
36 t##s##x##c##_splat (t##s x) \
37 { return (t##s##x##c) _mm256_set1_##i (x); } \
38 \
39 static_always_inline t##s##x##c \
40 t##s##x##c##_load_unaligned (void *p) \
41 { return (t##s##x##c) _mm256_loadu_si256 (p); } \
42 \
43 static_always_inline void \
44 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
45 { _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
46 \
47 static_always_inline int \
48 t##s##x##c##_is_all_zero (t##s##x##c x) \
49 { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
50 \
51 static_always_inline int \
52 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
53 { return t##s##x##c##_is_all_zero (a ^ b); } \
54 \
55 static_always_inline int \
56 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
57 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
58 \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
61 { return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
62 \
63 static_always_inline t##s##x##c \
64 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
65 { return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \
66 
67 
69 #undef _
70 /* *INDENT-ON* */
71 
72 always_inline u32x8
73 u32x8_permute (u32x8 v, u32x8 idx)
74 {
75  return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
76 }
77 
78 #define u64x4_permute(v, m0, m1, m2, m3) \
79  (u64x4) _mm256_permute4x64_epi64 ( \
80  (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6))
81 
82 /* _extract_lo, _extract_hi */
83 /* *INDENT-OFF* */
84 #define _(t1,t2) \
85 always_inline t1 \
86 t2##_extract_lo (t2 v) \
87 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
88 \
89 always_inline t1 \
90 t2##_extract_hi (t2 v) \
91 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
92 \
93 always_inline t2 \
94 t2##_insert_lo (t2 v1, t1 v2) \
95 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
96 \
97 always_inline t2 \
98 t2##_insert_hi (t2 v1, t1 v2) \
99 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
100 
101 _(u8x16, u8x32)
102 _(u16x8, u16x16)
103 _(u32x4, u32x8)
104 _(u64x2, u64x4)
105 #undef _
106 /* *INDENT-ON* */
107 
108 /* 256 bit packs. */
109 #define _(f, t, fn) \
110  always_inline t t##_pack (f lo, f hi) \
111  { \
112  return (t) fn ((__m256i) lo, (__m256i) hi); \
113  }
114 
115 _ (i16x16, i8x32, _mm256_packs_epi16)
116 _ (i16x16, u8x32, _mm256_packus_epi16)
117 _ (i32x8, i16x16, _mm256_packs_epi32)
118 _ (i32x8, u16x16, _mm256_packus_epi32)
119 
120 #undef _
121 
124 {
125  return _mm256_movemask_epi8 ((__m256i) v);
126 }
127 
129 i8x32_msb_mask (i8x32 v)
130 {
131  return _mm256_movemask_epi8 ((__m256i) v);
132 }
133 
134 /* _from_ */
135 /* *INDENT-OFF* */
136 #define _(f,t,i) \
137 static_always_inline t \
138 t##_from_##f (f x) \
139 { return (t) _mm256_cvt##i ((__m128i) x); }
140 
141 _(u16x8, u32x8, epu16_epi32)
142 _(u16x8, u64x4, epu16_epi64)
143 _(u32x4, u64x4, epu32_epi64)
144 _ (u8x16, u16x16, epu8_epi16)
145 _(u8x16, u32x8, epu8_epi32)
146 _(u8x16, u64x4, epu8_epi64)
147 _(i16x8, i32x8, epi16_epi32)
148 _(i16x8, i64x4, epi16_epi64)
149 _(i32x4, i64x4, epi32_epi64)
150 _ (i8x16, i16x16, epi8_epi16)
151 _(i8x16, i32x8, epi8_epi32)
152 _(i8x16, i64x4, epi8_epi64)
153 #undef _
154 /* *INDENT-ON* */
155 
158 {
159  u8x32 swap = {
160  7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
161  7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
162  };
163  return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
164 }
165 
168 {
169  u8x32 swap = {
170  3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
171  3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
172  };
173  return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
174 }
175 
178 {
179  u8x32 swap = {
180  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
181  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
182  };
183  return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
184 }
185 
188 {
189  return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m);
190 }
191 
192 #define u8x32_align_right(a, b, imm) \
193  (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
194 
196 u32x8_sum_elts (u32x8 sum8)
197 {
198  sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
199  sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
200  return sum8[0] + sum8[4];
201 }
202 
204 u32x8_hadd (u32x8 v1, u32x8 v2)
205 {
206  return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
207 }
208 
211 {
212  const u16x16 masks[17] = {
213  {0},
214  {-1},
215  {-1, -1},
216  {-1, -1, -1},
217  {-1, -1, -1, -1},
218  {-1, -1, -1, -1, -1},
219  {-1, -1, -1, -1, -1, -1},
220  {-1, -1, -1, -1, -1, -1, -1},
221  {-1, -1, -1, -1, -1, -1, -1, -1},
222  {-1, -1, -1, -1, -1, -1, -1, -1, -1},
223  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
224  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
225  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
226  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
227  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
228  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
229  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
230  };
231 
232  ASSERT (n_last < 17);
233 
234  return v & masks[16 - n_last];
235 }
236 
239 {
240  return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
241 }
242 
245 {
246  return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
247 }
248 
249 #define u32x8_blend(a,b,m) \
250  (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m)
251 
252 #define u16x16_blend(v1, v2, mask) \
253  (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)
254 
256 u64x4_gather (void *p0, void *p1, void *p2, void *p3)
257 {
258  u64x4 r = {
259  *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
260  };
261  return r;
262 }
263 
265 u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
266  void *p6, void *p7)
267 {
268  u32x8 r = {
269  *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
270  *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
271  };
272  return r;
273 }
274 
275 
277 u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
278 {
279  *(u64 *) p0 = r[0];
280  *(u64 *) p1 = r[1];
281  *(u64 *) p2 = r[2];
282  *(u64 *) p3 = r[3];
283 }
284 
286 u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
287  void *p5, void *p6, void *p7)
288 {
289  *(u32 *) p0 = r[0];
290  *(u32 *) p1 = r[1];
291  *(u32 *) p2 = r[2];
292  *(u32 *) p3 = r[3];
293  *(u32 *) p4 = r[4];
294  *(u32 *) p5 = r[5];
295  *(u32 *) p6 = r[6];
296  *(u32 *) p7 = r[7];
297 }
298 
301 {
302  *(u64 *) p = r[index];
303 }
304 
306 u32x8_scatter_one (u32x8 r, int index, void *p)
307 {
308  *(u32 *) p = r[index];
309 }
310 
313 {
314  return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
315 }
316 
319 {
320  return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
321  (__m256i) mask);
322 }
323 
324 #define u32x8_permute_lanes(a, b, m) \
325  (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
326 #define u64x4_permute_lanes(a, b, m) \
327  (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
328 
330 u32x8_min (u32x8 a, u32x8 b)
331 {
332  return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
333 }
334 
337 {
338  return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
339  u32x8_extract_hi (v)));
340 }
341 
343 u32x8_transpose (u32x8 a[8])
344 {
345  u64x4 r[8], x, y;
346 
347  r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
348  r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
349  r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
350  r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
351  r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
352  r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
353  r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
354  r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
355 
356  x = u64x4_interleave_lo (r[0], r[2]);
357  y = u64x4_interleave_lo (r[4], r[6]);
358  a[0] = u32x8_permute_lanes (x, y, 0x20);
359  a[4] = u32x8_permute_lanes (x, y, 0x31);
360 
361  x = u64x4_interleave_hi (r[0], r[2]);
362  y = u64x4_interleave_hi (r[4], r[6]);
363  a[1] = u32x8_permute_lanes (x, y, 0x20);
364  a[5] = u32x8_permute_lanes (x, y, 0x31);
365 
366  x = u64x4_interleave_lo (r[1], r[3]);
367  y = u64x4_interleave_lo (r[5], r[7]);
368  a[2] = u32x8_permute_lanes (x, y, 0x20);
369  a[6] = u32x8_permute_lanes (x, y, 0x31);
370 
371  x = u64x4_interleave_hi (r[1], r[3]);
372  y = u64x4_interleave_hi (r[5], r[7]);
373  a[3] = u32x8_permute_lanes (x, y, 0x20);
374  a[7] = u32x8_permute_lanes (x, y, 0x31);
375 }
376 
379 {
380  u64x4 r[4];
381 
382  r[0] = u64x4_interleave_lo (a[0], a[1]);
383  r[1] = u64x4_interleave_hi (a[0], a[1]);
384  r[2] = u64x4_interleave_lo (a[2], a[3]);
385  r[3] = u64x4_interleave_hi (a[2], a[3]);
386 
387  a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
388  a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
389  a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
390  a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
391 }
392 
393 #endif /* included_vector_avx2_h */
394 
395 /*
396  * fd.io coding-style-patch-verification: ON
397  *
398  * Local Variables:
399  * eval: (c-set-style "gnu")
400  * End:
401  */
u64x4_transpose
static_always_inline void u64x4_transpose(u64x4 a[8])
Definition: vector_avx2.h:378
u32x8_permute
foreach_avx2_vec256i static foreach_avx2_vec256u u32x8 u32x8_permute(u32x8 v, u32x8 idx)
Definition: vector_avx2.h:73
u32x8_min
static_always_inline u32x8 u32x8_min(u32x8 a, u32x8 b)
Definition: vector_avx2.h:330
u8x32_is_greater
static_always_inline u8x32 u8x32_is_greater(u8x32 v1, u8x32 v2)
Definition: vector_avx2.h:312
clib.h
u8x32_align_right
#define u8x32_align_right(a, b, imm)
Definition: vector_avx2.h:192
u16x16_mask_last
static_always_inline u16x16 u16x16_mask_last(u16x16 v, u8 n_last)
Definition: vector_avx2.h:210
u32x8_byte_swap
static_always_inline u32x8 u32x8_byte_swap(u32x8 v)
Definition: vector_avx2.h:167
u64x2
epu8_epi32 epu16_epi32 u64x2
Definition: vector_sse42.h:444
r
vnet_hw_if_output_node_runtime_t * r
Definition: interface_output.c:1089
u32x8_scatter
static_always_inline void u32x8_scatter(u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4, void *p5, void *p6, void *p7)
Definition: vector_avx2.h:286
u8x32_blend
static_always_inline u8x32 u8x32_blend(u8x32 v1, u8x32 v2, u8x32 mask)
Definition: vector_avx2.h:318
u8x16
u8x16
Definition: vector_sse42.h:157
u32x8_hadd
static_always_inline u32x8 u32x8_hadd(u32x8 v1, u32x8 v2)
Definition: vector_avx2.h:204
static_always_inline
#define static_always_inline
Definition: clib.h:112
u8x32_msb_mask
_mm256_packus_epi16 _mm256_packus_epi32 static_always_inline u32 u8x32_msb_mask(u8x32 v)
Definition: vector_avx2.h:123
mask
vl_api_pnat_mask_t mask
Definition: pnat.api:45
u32x8_permute_lanes
#define u32x8_permute_lanes(a, b, m)
Definition: vector_avx2.h:324
i64x4
epu16_epi64 epu8_epi16 epu8_epi64 i64x4
Definition: vector_avx2.h:148
u64x4_gather
static_always_inline u64x4 u64x4_gather(void *p0, void *p1, void *p2, void *p3)
Definition: vector_avx2.h:256
i8x32_msb_mask
static_always_inline u32 i8x32_msb_mask(i8x32 v)
Definition: vector_avx2.h:129
u16x16_byte_swap
static_always_inline u16x16 u16x16_byte_swap(u16x16 v)
Definition: vector_avx2.h:177
i32x4
i32x4
Definition: vector_altivec.h:87
u32x8_gather
static_always_inline u32x8 u32x8_gather(void *p0, void *p1, void *p2, void *p3, void *p4, void *p5, void *p6, void *p7)
Definition: vector_avx2.h:265
index
u32 index
Definition: flow_types.api:221
always_inline
#define always_inline
Definition: rdma_mlx5dv.h:23
u64x4_scatter_one
static_always_inline void u64x4_scatter_one(u64x4 r, int index, void *p)
Definition: vector_avx2.h:300
u16x8
_mm_packus_epi16 u16x8
Definition: vector_sse42.h:159
u64
unsigned long u64
Definition: types.h:89
u8x32
u8x32
Definition: vector_avx2.h:116
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
f32x8_from_u32x8
static_always_inline f32x8 f32x8_from_u32x8(u32x8 v)
Definition: vector_avx2.h:238
i16x16
epu16_epi64 epu8_epi16 epu8_epi64 epi16_epi64 i16x16
Definition: vector_avx2.h:150
u32
unsigned int u32
Definition: types.h:88
u32x8_sum_elts
static_always_inline u32 u32x8_sum_elts(u32x8 sum8)
Definition: vector_avx2.h:196
foreach_avx2_vec256i
#define foreach_avx2_vec256i
Definition: vector_avx2.h:23
u32x4
unsigned long long u32x4
Definition: ixge.c:28
u32x8_transpose
static_always_inline void u32x8_transpose(u32x8 a[8])
Definition: vector_avx2.h:343
i16x8
vmrglw i16x8
Definition: vector_altivec.h:89
u32x8_scatter_one
static_always_inline void u32x8_scatter_one(u32x8 r, int index, void *p)
Definition: vector_avx2.h:306
u8x32_shuffle
static_always_inline u8x32 u8x32_shuffle(u8x32 v, u8x32 m)
Definition: vector_avx2.h:187
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
u32x8_from_f32x8
static_always_inline u32x8 u32x8_from_f32x8(f32x8 v)
Definition: vector_avx2.h:244
u8
unsigned char u8
Definition: types.h:56
a
a
Definition: bitmap.h:525
u64x4_permute_lanes
#define u64x4_permute_lanes(a, b, m)
Definition: vector_avx2.h:326
u64x4_byte_swap
epu16_epi64 epu8_epi16 epu8_epi64 epi16_epi64 epi8_epi16 epi8_epi64 static_always_inline u64x4 u64x4_byte_swap(u64x4 v)
Definition: vector_avx2.h:157
u32x4_min_scalar
static_always_inline u32 u32x4_min_scalar(u32x4 v)
Definition: vector_neon.h:206
foreach_avx2_vec256u
#define foreach_avx2_vec256u
Definition: vector_avx2.h:25
u32x8_min_scalar
static_always_inline u32 u32x8_min_scalar(u32x8 v)
Definition: vector_avx2.h:336
u16x16
_mm256_packus_epi16 u16x16
Definition: vector_avx2.h:118
u64x4
u64x4
Definition: vector_avx2.h:142
u64x4_scatter
static_always_inline void u64x4_scatter(u64x4 r, void *p0, void *p1, void *p2, void *p3)
Definition: vector_avx2.h:277