FD.io VPP  v21.01.1
Vector Packet Processing
vector_avx2.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef included_vector_avx2_h
17 #define included_vector_avx2_h
18 
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
21 
22 /* *INDENT-OFF* */
23 #define foreach_avx2_vec256i \
24  _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64)
25 #define foreach_avx2_vec256u \
26  _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64)
27 #define foreach_avx2_vec256f \
28  _(f,32,8,ps) _(f,64,4,pd)
29 
30 #define _mm256_set1_epi64 _mm256_set1_epi64x
31 
32 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
33  is_all_equal */
34 #define _(t, s, c, i) \
35 static_always_inline t##s##x##c \
36 t##s##x##c##_splat (t##s x) \
37 { return (t##s##x##c) _mm256_set1_##i (x); } \
38 \
39 static_always_inline t##s##x##c \
40 t##s##x##c##_load_unaligned (void *p) \
41 { return (t##s##x##c) _mm256_loadu_si256 (p); } \
42 \
43 static_always_inline void \
44 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
45 { _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
46 \
47 static_always_inline int \
48 t##s##x##c##_is_all_zero (t##s##x##c x) \
49 { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
50 \
51 static_always_inline int \
52 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
53 { return t##s##x##c##_is_all_zero (a ^ b); } \
54 \
55 static_always_inline int \
56 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
57 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
58 \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
61 { return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
62 \
63 static_always_inline t##s##x##c \
64 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
65 { return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \
66 
67 
69 #undef _
70 /* *INDENT-ON* */
71 
72 always_inline u32x8
73 u32x8_permute (u32x8 v, u32x8 idx)
74 {
75  return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
76 }
77 
78 /* _extract_lo, _extract_hi */
79 /* *INDENT-OFF* */
80 #define _(t1,t2) \
81 always_inline t1 \
82 t2##_extract_lo (t2 v) \
83 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
84 \
85 always_inline t1 \
86 t2##_extract_hi (t2 v) \
87 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
88 \
89 always_inline t2 \
90 t2##_insert_lo (t2 v1, t1 v2) \
91 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
92 \
93 always_inline t2 \
94 t2##_insert_hi (t2 v1, t1 v2) \
95 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
96 
97 _(u8x16, u8x32)
98 _(u16x8, u16x16)
99 _(u32x4, u32x8)
100 _(u64x2, u64x4)
101 #undef _
102 /* *INDENT-ON* */
103 
104 
105 
106 
108 u8x32_msb_mask (u8x32 v)
109 {
110  return _mm256_movemask_epi8 ((__m256i) v);
111 }
112 
113 /* _from_ */
114 /* *INDENT-OFF* */
115 #define _(f,t,i) \
116 static_always_inline t \
117 t##_from_##f (f x) \
118 { return (t) _mm256_cvt##i ((__m128i) x); }
119 
120 _(u16x8, u32x8, epu16_epi32)
121 _(u16x8, u64x4, epu16_epi64)
122 _(u32x4, u64x4, epu32_epi64)
123 _(u8x16, u16x16, epu8_epi64)
124 _(u8x16, u32x8, epu8_epi32)
125 _(u8x16, u64x4, epu8_epi64)
126 _(i16x8, i32x8, epi16_epi32)
127 _(i16x8, i64x4, epi16_epi64)
128 _(i32x4, i64x4, epi32_epi64)
129 _(i8x16, i16x16, epi8_epi64)
130 _(i8x16, i32x8, epi8_epi32)
131 _(i8x16, i64x4, epi8_epi64)
132 #undef _
133 /* *INDENT-ON* */
134 
137 {
138  u8x32 swap = {
139  7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
140  7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
141  };
142  return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
143 }
144 
147 {
148  u8x32 swap = {
149  3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
150  3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
151  };
152  return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
153 }
154 
157 {
158  u8x32 swap = {
159  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
160  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
161  };
162  return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
163 }
164 
166 u8x32_shuffle (u8x32 v, u8x32 m)
167 {
168  return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m);
169 }
170 
171 #define u8x32_align_right(a, b, imm) \
172  (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
173 
175 u32x8_sum_elts (u32x8 sum8)
176 {
177  sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
178  sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
179  return sum8[0] + sum8[4];
180 }
181 
183 u32x8_hadd (u32x8 v1, u32x8 v2)
184 {
185  return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
186 }
187 
190 {
191  const u16x16 masks[17] = {
192  {0},
193  {-1},
194  {-1, -1},
195  {-1, -1, -1},
196  {-1, -1, -1, -1},
197  {-1, -1, -1, -1, -1},
198  {-1, -1, -1, -1, -1, -1},
199  {-1, -1, -1, -1, -1, -1, -1},
200  {-1, -1, -1, -1, -1, -1, -1, -1},
201  {-1, -1, -1, -1, -1, -1, -1, -1, -1},
202  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
203  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
204  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
205  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
206  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
207  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
208  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
209  };
210 
211  ASSERT (n_last < 17);
212 
213  return v & masks[16 - n_last];
214 }
215 
216 #ifdef __AVX512F__
218 u8x32_mask_load (u8x32 a, void *p, u32 mask)
219 {
220  return (u8x32) _mm256_mask_loadu_epi8 ((__m256i) a, mask, p);
221 }
222 #endif
223 
226 {
227  return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
228 }
229 
232 {
233  return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
234 }
235 
236 #define u32x8_blend(a,b,m) \
237  (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m)
238 
239 #define u16x16_blend(v1, v2, mask) \
240  (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)
241 
243 u64x4_gather (void *p0, void *p1, void *p2, void *p3)
244 {
245  u64x4 r = {
246  *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
247  };
248  return r;
249 }
250 
252 u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
253  void *p6, void *p7)
254 {
255  u32x8 r = {
256  *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
257  *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
258  };
259  return r;
260 }
261 
262 
264 u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
265 {
266  *(u64 *) p0 = r[0];
267  *(u64 *) p1 = r[1];
268  *(u64 *) p2 = r[2];
269  *(u64 *) p3 = r[3];
270 }
271 
273 u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
274  void *p5, void *p6, void *p7)
275 {
276  *(u32 *) p0 = r[0];
277  *(u32 *) p1 = r[1];
278  *(u32 *) p2 = r[2];
279  *(u32 *) p3 = r[3];
280  *(u32 *) p4 = r[4];
281  *(u32 *) p5 = r[5];
282  *(u32 *) p6 = r[6];
283  *(u32 *) p7 = r[7];
284 }
285 
287 u64x4_scatter_one (u64x4 r, int index, void *p)
288 {
289  *(u64 *) p = r[index];
290 }
291 
293 u32x8_scatter_one (u32x8 r, int index, void *p)
294 {
295  *(u32 *) p = r[index];
296 }
297 
299 u8x32_is_greater (u8x32 v1, u8x32 v2)
300 {
301  return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
302 }
303 
305 u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
306 {
307  return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
308  (__m256i) mask);
309 }
310 
311 #define u32x8_permute_lanes(a, b, m) \
312  (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
313 #define u64x4_permute_lanes(a, b, m) \
314  (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
315 
317 u32x8_min (u32x8 a, u32x8 b)
318 {
319  return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
320 }
321 
324 {
325  return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
326  u32x8_extract_hi (v)));
327 }
328 
330 u32x8_transpose (u32x8 a[8])
331 {
332  u64x4 r[8], x, y;
333 
334  r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
335  r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
336  r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
337  r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
338  r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
339  r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
340  r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
341  r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
342 
343  x = u64x4_interleave_lo (r[0], r[2]);
344  y = u64x4_interleave_lo (r[4], r[6]);
345  a[0] = u32x8_permute_lanes (x, y, 0x20);
346  a[4] = u32x8_permute_lanes (x, y, 0x31);
347 
348  x = u64x4_interleave_hi (r[0], r[2]);
349  y = u64x4_interleave_hi (r[4], r[6]);
350  a[1] = u32x8_permute_lanes (x, y, 0x20);
351  a[5] = u32x8_permute_lanes (x, y, 0x31);
352 
353  x = u64x4_interleave_lo (r[1], r[3]);
354  y = u64x4_interleave_lo (r[5], r[7]);
355  a[2] = u32x8_permute_lanes (x, y, 0x20);
356  a[6] = u32x8_permute_lanes (x, y, 0x31);
357 
358  x = u64x4_interleave_hi (r[1], r[3]);
359  y = u64x4_interleave_hi (r[5], r[7]);
360  a[3] = u32x8_permute_lanes (x, y, 0x20);
361  a[7] = u32x8_permute_lanes (x, y, 0x31);
362 }
363 
366 {
367  u64x4 r[4];
368 
369  r[0] = u64x4_interleave_lo (a[0], a[1]);
370  r[1] = u64x4_interleave_hi (a[0], a[1]);
371  r[2] = u64x4_interleave_lo (a[2], a[3]);
372  r[3] = u64x4_interleave_hi (a[2], a[3]);
373 
374  a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
375  a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
376  a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
377  a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
378 }
379 
380 #endif /* included_vector_avx2_h */
381 
382 /*
383  * fd.io coding-style-patch-verification: ON
384  *
385  * Local Variables:
386  * eval: (c-set-style "gnu")
387  * End:
388  */
static_always_inline u8x32 u8x32_shuffle(u8x32 v, u8x32 m)
Definition: vector_avx2.h:166
a
Definition: bitmap.h:544
unsigned long u64
Definition: types.h:89
u16x16 u64x4 static_always_inline u32 u8x32_msb_mask(u8x32 v)
Definition: vector_avx2.h:108
foreach_avx2_vec256i static foreach_avx2_vec256u u32x8 u32x8_permute(u32x8 v, u32x8 idx)
Definition: vector_avx2.h:73
static_always_inline u32 u32x8_min_scalar(u32x8 v)
Definition: vector_avx2.h:323
static_always_inline u16x16 u16x16_mask_last(u16x16 v, u8 n_last)
Definition: vector_avx2.h:189
u16 mask
Definition: flow_types.api:52
unsigned char u8
Definition: types.h:56
static_always_inline void u64x4_scatter(u64x4 r, void *p0, void *p1, void *p2, void *p3)
Definition: vector_avx2.h:264
epu16_epi64 epu8_epi64 epu8_epi64 epi16_epi64 i16x16
Definition: vector_avx2.h:129
#define static_always_inline
Definition: clib.h:109
static_always_inline void u64x4_scatter_one(u64x4 r, int index, void *p)
Definition: vector_avx2.h:287
const cJSON *const b
Definition: cJSON.h:255
unsigned int u32
Definition: types.h:88
#define foreach_avx2_vec256i
Definition: vector_avx2.h:23
epu16_epi64 epu8_epi64 epu8_epi64 epi16_epi64 epi8_epi64 epi8_epi64 static_always_inline u64x4 u64x4_byte_swap(u64x4 v)
Definition: vector_avx2.h:136
i32x4
epu8_epi32 epu16_epi32 u64x2
Definition: vector_sse42.h:691
static_always_inline void u32x8_scatter(u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4, void *p5, void *p6, void *p7)
Definition: vector_avx2.h:273
static_always_inline u64x4 u64x4_gather(void *p0, void *p1, void *p2, void *p3)
Definition: vector_avx2.h:243
static_always_inline u32x8 u32x8_byte_swap(u32x8 v)
Definition: vector_avx2.h:146
#define always_inline
Definition: ipsec.h:28
static_always_inline void u32x8_transpose(u32x8 a[8])
Definition: vector_avx2.h:330
static_always_inline u16x16 u16x16_byte_swap(u16x16 v)
Definition: vector_avx2.h:156
static_always_inline void u32x8_scatter_one(u32x8 r, int index, void *p)
Definition: vector_avx2.h:293
static_always_inline u32 u32x8_sum_elts(u32x8 sum8)
Definition: vector_avx2.h:175
static_always_inline u32 u32x4_min_scalar(u32x4 v)
Definition: vector_neon.h:186
#define ASSERT(truth)
static_always_inline u32x8 u32x8_hadd(u32x8 v1, u32x8 v2)
Definition: vector_avx2.h:183
vmrglw i16x8
static_always_inline u32x8 u32x8_min(u32x8 a, u32x8 b)
Definition: vector_avx2.h:317
static_always_inline u8x32 u8x32_is_greater(u8x32 v1, u8x32 v2)
Definition: vector_avx2.h:299
#define u64x4_permute_lanes(a, b, m)
Definition: vector_avx2.h:313
#define foreach_avx2_vec256u
Definition: vector_avx2.h:25
static_always_inline u32x8 u32x8_from_f32x8(f32x8 v)
Definition: vector_avx2.h:231
epu16_epi64 epu8_epi64 epu8_epi64 i64x4
Definition: vector_avx2.h:127
u32 index
Definition: flow_types.api:221
u64x4
Definition: vector_avx2.h:121
#define u32x8_permute_lanes(a, b, m)
Definition: vector_avx2.h:311
#define u8x32_align_right(a, b, imm)
Definition: vector_avx2.h:171
unsigned long long u32x4
Definition: ixge.c:28
static_always_inline void u64x4_transpose(u64x4 a[8])
Definition: vector_avx2.h:365
epu16_epi64 u16x16
Definition: vector_avx2.h:123
static_always_inline f32x8 f32x8_from_u32x8(u32x8 v)
Definition: vector_avx2.h:225
static_always_inline u8x32 u8x32_blend(u8x32 v1, u8x32 v2, u8x32 mask)
Definition: vector_avx2.h:305
static_always_inline u32x8 u32x8_gather(void *p0, void *p1, void *p2, void *p3, void *p4, void *p5, void *p6, void *p7)
Definition: vector_avx2.h:252