FD.io VPP  v21.01.1
Vector Packet Processing
vector_avx512.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef included_vector_avx512_h
17 #define included_vector_avx512_h
18 
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
21 
22 /* *INDENT-OFF* */
23 #define foreach_avx512_vec512i \
24  _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25 #define foreach_avx512_vec512u \
26  _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27 #define foreach_avx512_vec512f \
28  _(f,32,8,ps) _(f,64,4,pd)
29 
30 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
31  is_all_equal, is_zero_mask */
32 #define _(t, s, c, i) \
33 static_always_inline t##s##x##c \
34 t##s##x##c##_splat (t##s x) \
35 { return (t##s##x##c) _mm512_set1_##i (x); } \
36 \
37 static_always_inline t##s##x##c \
38 t##s##x##c##_load_aligned (void *p) \
39 { return (t##s##x##c) _mm512_load_si512 (p); } \
40 \
41 static_always_inline void \
42 t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43 { _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
44 \
45 static_always_inline t##s##x##c \
46 t##s##x##c##_load_unaligned (void *p) \
47 { return (t##s##x##c) _mm512_loadu_si512 (p); } \
48 \
49 static_always_inline void \
50 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51 { _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
52 \
53 static_always_inline int \
54 t##s##x##c##_is_all_zero (t##s##x##c v) \
55 { return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
56 \
57 static_always_inline int \
58 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
59 { return t##s##x##c##_is_all_zero (a ^ b); } \
60 \
61 static_always_inline int \
62 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
64 \
65 static_always_inline u##c \
66 t##s##x##c##_is_zero_mask (t##s##x##c v) \
67 { return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
68 \
69 static_always_inline t##s##x##c \
70 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71 { return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
72 \
73 static_always_inline t##s##x##c \
74 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75 { return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
76 
77 
79 #undef _
80 /* *INDENT-ON* */
81 
83 u16x32_msb_mask (u16x32 v)
84 {
85  return (u32) _mm512_movepi16_mask ((__m512i) v);
86 }
87 
89 u32x16_byte_swap (u32x16 v)
90 {
91  u8x64 swap = {
92  3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
93  3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
94  3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
95  3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
96  };
97  return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
98 }
99 
102 {
103  u8x64 swap = {
104  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
105  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
106  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
107  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
108  };
109  return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
110 }
111 
114 {
115  return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
116 }
117 
120 {
121  return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
122 }
123 
126 {
127  return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
128 }
129 
132 {
133  return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
134 }
135 
138 {
140  u32x16_extract_hi (v)));
141 }
142 
144 u32x16_insert_lo (u32x16 r, u32x8 v)
145 {
146  return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
147 }
148 
150 u32x16_insert_hi (u32x16 r, u32x8 v)
151 {
152  return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
153 }
154 
156 u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
157 {
158  return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
159  (__m512i) b);
160 }
161 
162 
163 #define u32x16_ternary_logic(a, b, c, d) \
164  (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
165 
166 #define u8x64_insert_u8x16(a, b, n) \
167  (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
168 
169 #define u8x64_extract_u8x16(a, n) \
170  (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
171 
172 #define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
173 #define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
174 
176 u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
177 {
178  return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
179  (__m512i) c, 0x96);
180 }
181 
184 {
185  static const u8x64 mask = {
186  15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
187  15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
188  15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
189  15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
190  };
191  return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
192 }
193 
195 u8x64_shuffle (u8x64 v, u8x64 m)
196 {
197  return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
198 }
199 
200 #define u8x64_align_right(a, b, imm) \
201  (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
202 
204 u32x16_sum_elts (u32x16 sum16)
205 {
206  u32x8 sum8;
207  sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
208  sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
209  sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
210  return sum8[0] + sum8[4];
211 }
212 
214 u8x64_mask_load (u8x64 a, void *p, u64 mask)
215 {
216  return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
217 }
218 
220 u8x64_mask_store (u8x64 a, void *p, u64 mask)
221 {
222  _mm512_mask_storeu_epi8 (p, mask, (__m512i) a);
223 }
224 
227 {
228  return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
229 }
230 
233 {
234  return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
235 }
236 
238 u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
239 {
240  return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
241 }
242 
244 u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
245 {
246  return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
247 }
248 
250 u64x8_mask_is_equal (u64x8 a, u64x8 b)
251 {
252  return _mm512_cmpeq_epu64_mask ((__m512i) a, (__m512i) b);
253 }
254 
256 u32x16_transpose (u32x16 m[16])
257 {
258  __m512i r[16], a, b, c, d, x, y;
259 
260  /* *INDENT-OFF* */
261  __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
262  __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
263  __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
264  __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
265  /* *INDENT-ON* */
266 
267  r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
268  r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
269  r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
270  r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
271  r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
272  r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
273  r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
274  r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
275 
276  r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
277  r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
278  r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
279  r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
280  r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
281  r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
282  r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
283  r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
284 
285  a = _mm512_unpacklo_epi64 (r[0], r[1]);
286  b = _mm512_unpacklo_epi64 (r[2], r[3]);
287  c = _mm512_unpacklo_epi64 (r[4], r[5]);
288  d = _mm512_unpacklo_epi64 (r[6], r[7]);
289  x = _mm512_permutex2var_epi64 (a, pm1, b);
290  y = _mm512_permutex2var_epi64 (c, pm1, d);
291  m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
292  m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
293  x = _mm512_permutex2var_epi64 (a, pm2, b);
294  y = _mm512_permutex2var_epi64 (c, pm2, d);
295  m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
296  m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
297 
298  a = _mm512_unpacklo_epi64 (r[8], r[9]);
299  b = _mm512_unpacklo_epi64 (r[10], r[11]);
300  c = _mm512_unpacklo_epi64 (r[12], r[13]);
301  d = _mm512_unpacklo_epi64 (r[14], r[15]);
302  x = _mm512_permutex2var_epi64 (a, pm1, b);
303  y = _mm512_permutex2var_epi64 (c, pm1, d);
304  m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
305  m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
306  x = _mm512_permutex2var_epi64 (a, pm2, b);
307  y = _mm512_permutex2var_epi64 (c, pm2, d);
308  m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
309  m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
310 
311  a = _mm512_unpackhi_epi64 (r[0], r[1]);
312  b = _mm512_unpackhi_epi64 (r[2], r[3]);
313  c = _mm512_unpackhi_epi64 (r[4], r[5]);
314  d = _mm512_unpackhi_epi64 (r[6], r[7]);
315  x = _mm512_permutex2var_epi64 (a, pm1, b);
316  y = _mm512_permutex2var_epi64 (c, pm1, d);
317  m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
318  m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
319  x = _mm512_permutex2var_epi64 (a, pm2, b);
320  y = _mm512_permutex2var_epi64 (c, pm2, d);
321  m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
322  m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
323 
324  a = _mm512_unpackhi_epi64 (r[8], r[9]);
325  b = _mm512_unpackhi_epi64 (r[10], r[11]);
326  c = _mm512_unpackhi_epi64 (r[12], r[13]);
327  d = _mm512_unpackhi_epi64 (r[14], r[15]);
328  x = _mm512_permutex2var_epi64 (a, pm1, b);
329  y = _mm512_permutex2var_epi64 (c, pm1, d);
330  m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
331  m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
332  x = _mm512_permutex2var_epi64 (a, pm2, b);
333  y = _mm512_permutex2var_epi64 (c, pm2, d);
334  m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
335  m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
336 }
337 
338 
339 
341 u64x8_transpose (u64x8 m[8])
342 {
343  __m512i r[8], x, y;
344 
345  /* *INDENT-OFF* */
346  __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
347  __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
348  __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
349  __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
350  /* *INDENT-ON* */
351 
352  r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
353  r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
354  r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
355  r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
356  r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
357  r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
358  r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
359  r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
360 
361  x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
362  y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
363  m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
364  m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
365  x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
366  y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
367  m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
368  m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
369 
370  x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
371  y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
372  m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
373  m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
374  x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
375  y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
376  m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
377  m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
378 }
379 
380 #endif /* included_vector_avx512_h */
381 /*
382  * fd.io coding-style-patch-verification: ON
383  *
384  * Local Variables:
385  * eval: (c-set-style "gnu")
386  * End:
387  */
static_always_inline u64x8 u64x8_permute(u64x8 a, u64x8 b, u64x8 mask)
a
Definition: bitmap.h:544
static_always_inline void u64x8_transpose(u64x8 m[8])
unsigned long u64
Definition: types.h:89
static_always_inline u8x64 u8x64_reflect_u8x16(u8x64 x)
static_always_inline u8x64 u8x64_mask_load(u8x64 a, void *p, u64 mask)
static_always_inline u32 u32x8_min_scalar(u32x8 v)
Definition: vector_avx2.h:323
static_always_inline u8x64 u8x64_shuffle(u8x64 v, u8x64 m)
u16 mask
Definition: flow_types.api:52
static_always_inline void u32x16_transpose(u32x16 m[16])
unsigned char u8
Definition: types.h:56
static_always_inline u32x8 u32x16_extract_hi(u32x16 v)
static_always_inline u8x32 u8x64_extract_hi(u8x64 v)
static_always_inline u32x16 u32x16_insert_lo(u32x16 r, u32x8 v)
static_always_inline u8x64 u8x64_splat_u8x16(u8x16 a)
static_always_inline u32x16 u32x16_splat_u32x4(u32x4 a)
#define static_always_inline
Definition: clib.h:109
const cJSON *const b
Definition: cJSON.h:255
unsigned int u32
Definition: types.h:88
#define foreach_avx512_vec512i
Definition: vector_avx512.h:23
static_always_inline u8x64 u8x64_xor3(u8x64 a, u8x64 b, u8x64 c)
static_always_inline u16x32 u16x32_byte_swap(u16x32 v)
#define u8x64_align_right(a, b, imm)
static_always_inline u32x16 u32x16_byte_swap(u32x16 v)
Definition: vector_avx512.h:89
unsigned short u16
Definition: types.h:57
static_always_inline u32x8 u32x16_extract_lo(u32x16 v)
static_always_inline u32x16 u32x16_mask_blend(u32x16 a, u32x16 b, u16 mask)
static_always_inline u32 u32x16_sum_elts(u32x16 sum16)
svmdb_client_t * c
static_always_inline u8 u64x8_mask_is_equal(u64x8 a, u64x8 b)
static_always_inline u8x32 u8x64_extract_lo(u8x64 v)
static_always_inline u32 u32x16_min_scalar(u32x16 v)
static_always_inline u32x8 u32x8_min(u32x8 a, u32x8 b)
Definition: vector_avx2.h:317
foreach_avx512_vec512i foreach_avx512_vec512u static_always_inline u32 u16x32_msb_mask(u16x32 v)
Definition: vector_avx512.h:83
static_always_inline u32x16 u32x16_insert_hi(u32x16 r, u32x8 v)
static_always_inline u8x64 u8x64_mask_blend(u8x64 a, u8x64 b, u64 mask)
static_always_inline void u8x64_mask_store(u8x64 a, void *p, u64 mask)
unsigned long long u32x4
Definition: ixge.c:28
#define foreach_avx512_vec512u
Definition: vector_avx512.h:25