FD.io VPP  v16.09
Vector Packet Processing
vnet_classify.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __included_vnet_classify_h__
16 #define __included_vnet_classify_h__
17 
18 #include <stdarg.h>
19 
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/pg/pg.h>
23 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/ethernet/packet.h>
25 #include <vnet/ip/ip_packet.h>
26 #include <vnet/ip/ip4_packet.h>
27 #include <vnet/ip/ip6_packet.h>
28 #include <vlib/cli.h>
29 #include <vnet/l2/l2_input.h>
30 #include <vnet/l2/feat_bitmap.h>
31 #include <vnet/api_errno.h> /* for API error numbers */
32 
33 #include <vppinfra/error.h>
34 #include <vppinfra/hash.h>
35 #include <vppinfra/cache.h>
36 #include <vppinfra/xxhash.h>
37 
40 
41 #define CLASSIFY_TRACE 0
42 
43 #if !defined( __aarch64__) && !defined(__arm__)
44 #define CLASSIFY_USE_SSE //Allow usage of SSE operations
45 #endif
46 
47 #define U32X4_ALIGNED(p) PREDICT_TRUE((((intptr_t)p) & 0xf) == 0)
48 
49 struct _vnet_classify_main;
50 typedef struct _vnet_classify_main vnet_classify_main_t;
51 
52 #define foreach_size_in_u32x4 \
53 _(1) \
54 _(2) \
55 _(3) \
56 _(4) \
57 _(5)
58 
59 typedef CLIB_PACKED(struct _vnet_classify_entry {
60  /* Graph node next index */
61  u32 next_index;
62 
63  /* put into vnet_buffer(b)->l2_classfy.opaque_index */
64  union {
65  struct {
66  u32 opaque_index;
67  /* advance on hit, note it's a signed quantity... */
68  i32 advance;
69  };
70  u64 opaque_count;
71  };
72 
73  /* Really only need 1 bit */
74  u32 flags;
75 #define VNET_CLASSIFY_ENTRY_FREE (1<<0)
76 
77  /* Hit counter, last heard time */
78  union {
79  u64 hits;
80  struct _vnet_classify_entry * next_free;
81  };
82 
83  f64 last_heard;
84 
85  /* Must be aligned to a 16-octet boundary */
86  u32x4 key[0];
87 }) vnet_classify_entry_t;
88 
89 static inline int vnet_classify_entry_is_free (vnet_classify_entry_t * e)
90 {
91  return e->flags & VNET_CLASSIFY_ENTRY_FREE;
92 }
93 
94 static inline int vnet_classify_entry_is_busy (vnet_classify_entry_t * e)
95 {
96  return ((e->flags & VNET_CLASSIFY_ENTRY_FREE) == 0);
97 }
98 
99 /* Need these to con the vector allocator */
100 #define _(size) \
101 typedef CLIB_PACKED(struct { \
102  u32 pad0[4]; \
103  u64 pad1[2]; \
104  u32x4 key[size]; \
105 }) vnet_classify_entry_##size##_t;
107 #undef _
108 
109 typedef struct {
110  union {
111  struct {
113  u8 pad[3];
115  };
117  };
119 
120 typedef struct {
121  /* Mask to apply after skipping N vectors */
123  /* Buckets and entries */
125  vnet_classify_entry_t * entries;
126 
127  /* Config parameters */
134  /* Index of next table to try */
136 
137  /* Miss next index, return if next_table_index = 0 */
139 
140  /* Per-bucket working copies, one per thread */
141  vnet_classify_entry_t ** working_copies;
143 
144  /* Free entry freelists */
145  vnet_classify_entry_t **freelists;
146 
147  u8 * name;
148 
149  /* Private allocation arena, protected by the writer lock */
150  void * mheap;
151 
152  /* Writer (only) lock for this table */
153  volatile u32 * writer_lock;
154 
156 
157 struct _vnet_classify_main {
158  /* Table pool */
159  vnet_classify_table_t * tables;
160 
161  /* Registered next-index, opaque unformat fcns */
162  unformat_function_t ** unformat_l2_next_index_fns;
163  unformat_function_t ** unformat_ip_next_index_fns;
164  unformat_function_t ** unformat_acl_next_index_fns;
165  unformat_function_t ** unformat_policer_next_index_fns;
166  unformat_function_t ** unformat_opaque_index_fns;
167 
168  /* convenience variables */
171 };
172 
174 
175 u8 * format_classify_table (u8 * s, va_list * args);
176 
178 
179 static inline u64
181  u8 * h)
182 {
183  u32x4 *mask;
184 
185  union {
186  u32x4 as_u32x4;
187  u64 as_u64[2];
188  } xor_sum __attribute__((aligned(sizeof(u32x4))));
189 
190  ASSERT(t);
191  mask = t->mask;
192 #ifdef CLASSIFY_USE_SSE
193  if (U32X4_ALIGNED(h)) { //SSE can't handle unaligned data
194  u32x4 *data = (u32x4 *)h;
195  xor_sum.as_u32x4 = data[0 + t->skip_n_vectors] & mask[0];
196  switch (t->match_n_vectors)
197  {
198  case 5:
199  xor_sum.as_u32x4 ^= data[4 + t->skip_n_vectors] & mask[4];
200  /* FALLTHROUGH */
201  case 4:
202  xor_sum.as_u32x4 ^= data[3 + t->skip_n_vectors] & mask[3];
203  /* FALLTHROUGH */
204  case 3:
205  xor_sum.as_u32x4 ^= data[2 + t->skip_n_vectors] & mask[2];
206  /* FALLTHROUGH */
207  case 2:
208  xor_sum.as_u32x4 ^= data[1 + t->skip_n_vectors] & mask[1];
209  /* FALLTHROUGH */
210  case 1:
211  break;
212  default:
213  abort();
214  }
215  } else
216 #endif /* CLASSIFY_USE_SSE */
217  {
218  u32 skip_u64 = t->skip_n_vectors * 2;
219  u64 *data64 = (u64 *)h;
220  xor_sum.as_u64[0] = data64[0 + skip_u64] & ((u64 *)mask)[0];
221  xor_sum.as_u64[1] = data64[1 + skip_u64] & ((u64 *)mask)[1];
222  switch (t->match_n_vectors)
223  {
224  case 5:
225  xor_sum.as_u64[0] ^= data64[8 + skip_u64] & ((u64 *)mask)[8];
226  xor_sum.as_u64[1] ^= data64[9 + skip_u64] & ((u64 *)mask)[9];
227  /* FALLTHROUGH */
228  case 4:
229  xor_sum.as_u64[0] ^= data64[6 + skip_u64] & ((u64 *)mask)[6];
230  xor_sum.as_u64[1] ^= data64[7 + skip_u64] & ((u64 *)mask)[7];
231  /* FALLTHROUGH */
232  case 3:
233  xor_sum.as_u64[0] ^= data64[4 + skip_u64] & ((u64 *)mask)[4];
234  xor_sum.as_u64[1] ^= data64[5 + skip_u64] & ((u64 *)mask)[5];
235  /* FALLTHROUGH */
236  case 2:
237  xor_sum.as_u64[0] ^= data64[2 + skip_u64] & ((u64 *)mask)[2];
238  xor_sum.as_u64[1] ^= data64[3 + skip_u64] & ((u64 *)mask)[3];
239  /* FALLTHROUGH */
240  case 1:
241  break;
242 
243  default:
244  abort();
245  }
246  }
247 
248  return clib_xxhash (xor_sum.as_u64[0] ^ xor_sum.as_u64[1]);
249 }
250 
251 static inline void
253 {
254  u32 bucket_index;
255 
256  ASSERT (is_pow2(t->nbuckets));
257 
258  bucket_index = hash & (t->nbuckets - 1);
259 
260  CLIB_PREFETCH(&t->buckets[bucket_index], CLIB_CACHE_LINE_BYTES, LOAD);
261 }
262 
263 static inline vnet_classify_entry_t *
265 {
266  u8 * hp = t->mheap;
267  u8 * vp = hp + offset;
268 
269  return (void *) vp;
270 }
271 
273  vnet_classify_entry_t * v)
274 {
275  u8 * hp, * vp;
276 
277  hp = (u8 *) t->mheap;
278  vp = (u8 *) v;
279 
280  ASSERT((vp - hp) < 0x100000000ULL);
281  return vp - hp;
282 }
283 
284 static inline vnet_classify_entry_t *
286  vnet_classify_entry_t * e,
287  u32 index)
288 {
289  u8 * eu8;
290 
291  eu8 = (u8 *)e;
292 
293  eu8 += index * (sizeof (vnet_classify_entry_t) +
294  (t->match_n_vectors * sizeof (u32x4)));
295 
296  return (vnet_classify_entry_t *) eu8;
297 }
298 
299 static inline void
301  u64 hash)
302 {
303  u32 bucket_index;
304  u32 value_index;
306  vnet_classify_entry_t * e;
307 
308  bucket_index = hash & (t->nbuckets - 1);
309 
310  b = &t->buckets[bucket_index];
311 
312  if (b->offset == 0)
313  return;
314 
315  hash >>= t->log2_nbuckets;
316 
317  e = vnet_classify_get_entry (t, b->offset);
318  value_index = hash & ((1<<b->log2_pages)-1);
319 
320  e = vnet_classify_entry_at_index (t, e, value_index);
321 
323 }
324 
325 vnet_classify_entry_t *
327  u8 * h, u64 hash, f64 now);
328 
329 static inline vnet_classify_entry_t *
331  u8 * h, u64 hash, f64 now)
332  {
333  vnet_classify_entry_t * v;
334  u32x4 *mask, *key;
335  union {
336  u32x4 as_u32x4;
337  u64 as_u64[2];
338  } result __attribute__((aligned(sizeof(u32x4))));
340  u32 value_index;
341  u32 bucket_index;
342  int i;
343 
344  bucket_index = hash & (t->nbuckets-1);
345  b = &t->buckets[bucket_index];
346  mask = t->mask;
347 
348  if (b->offset == 0)
349  return 0;
350 
351  hash >>= t->log2_nbuckets;
352 
353  v = vnet_classify_get_entry (t, b->offset);
354  value_index = hash & ((1<<b->log2_pages)-1);
355  v = vnet_classify_entry_at_index (t, v, value_index);
356 
357 #ifdef CLASSIFY_USE_SSE
358  if (U32X4_ALIGNED(h)) {
359  u32x4 *data = (u32x4 *) h;
360  for (i = 0; i < t->entries_per_page; i++) {
361  key = v->key;
362  result.as_u32x4 = (data[0 + t->skip_n_vectors] & mask[0]) ^ key[0];
363  switch (t->match_n_vectors)
364  {
365  case 5:
366  result.as_u32x4 |= (data[4 + t->skip_n_vectors] & mask[4]) ^ key[4];
367  /* FALLTHROUGH */
368  case 4:
369  result.as_u32x4 |= (data[3 + t->skip_n_vectors] & mask[3]) ^ key[3];
370  /* FALLTHROUGH */
371  case 3:
372  result.as_u32x4 |= (data[2 + t->skip_n_vectors] & mask[2]) ^ key[2];
373  /* FALLTHROUGH */
374  case 2:
375  result.as_u32x4 |= (data[1 + t->skip_n_vectors] & mask[1]) ^ key[1];
376  /* FALLTHROUGH */
377  case 1:
378  break;
379  default:
380  abort();
381  }
382 
383  if (u32x4_zero_byte_mask (result.as_u32x4) == 0xffff) {
384  if (PREDICT_TRUE(now)) {
385  v->hits++;
386  v->last_heard = now;
387  }
388  return (v);
389  }
390  v = vnet_classify_entry_at_index (t, v, 1);
391  }
392  } else
393 #endif /* CLASSIFY_USE_SSE */
394  {
395  u32 skip_u64 = t->skip_n_vectors * 2;
396  u64 *data64 = (u64 *)h;
397  for (i = 0; i < t->entries_per_page; i++) {
398  key = v->key;
399 
400  result.as_u64[0] = (data64[0 + skip_u64] & ((u64 *)mask)[0]) ^ ((u64 *)key)[0];
401  result.as_u64[1] = (data64[1 + skip_u64] & ((u64 *)mask)[1]) ^ ((u64 *)key)[1];
402  switch (t->match_n_vectors)
403  {
404  case 5:
405  result.as_u64[0] |= (data64[8 + skip_u64] & ((u64 *)mask)[8]) ^ ((u64 *)key)[8];
406  result.as_u64[1] |= (data64[9 + skip_u64] & ((u64 *)mask)[9]) ^ ((u64 *)key)[9];
407  /* FALLTHROUGH */
408  case 4:
409  result.as_u64[0] |= (data64[6 + skip_u64] & ((u64 *)mask)[6]) ^ ((u64 *)key)[6];
410  result.as_u64[1] |= (data64[7 + skip_u64] & ((u64 *)mask)[7]) ^ ((u64 *)key)[7];
411  /* FALLTHROUGH */
412  case 3:
413  result.as_u64[0] |= (data64[4 + skip_u64] & ((u64 *)mask)[4]) ^ ((u64 *)key)[4];
414  result.as_u64[1] |= (data64[5 + skip_u64] & ((u64 *)mask)[5]) ^ ((u64 *)key)[5];
415  /* FALLTHROUGH */
416  case 2:
417  result.as_u64[0] |= (data64[2 + skip_u64] & ((u64 *)mask)[2]) ^ ((u64 *)key)[2];
418  result.as_u64[1] |= (data64[3 + skip_u64] & ((u64 *)mask)[3]) ^ ((u64 *)key)[3];
419  /* FALLTHROUGH */
420  case 1:
421  break;
422  default:
423  abort();
424  }
425 
426  if (result.as_u64[0] == 0 && result.as_u64[1] == 0) {
427  if (PREDICT_TRUE(now)) {
428  v->hits++;
429  v->last_heard = now;
430  }
431  return (v);
432  }
433 
434  v = vnet_classify_entry_at_index (t, v, 1);
435  }
436  }
437  return 0;
438  }
439 
442  u8 * mask, u32 nbuckets, u32 memory_size,
443  u32 skip_n_vectors,
444  u32 match_n_vectors);
445 
447  u32 table_index,
448  u8 * match,
449  u32 hit_next_index,
450  u32 opaque_index,
451  i32 advance,
452  int is_add);
453 
455  u8 * mask,
456  u32 nbuckets,
457  u32 memory_size,
458  u32 skip,
459  u32 match,
460  u32 next_table_index,
461  u32 miss_next_index,
462  u32 * table_index,
463  int is_add);
464 
478 
480 (unformat_function_t * fn);
481 
483 (unformat_function_t * fn);
484 
486 (unformat_function_t * fn);
487 
489 (unformat_function_t * fn);
490 
492 
493 #endif /* __included_vnet_classify_h__ */
u64 vnet_classify_hash_packet(vnet_classify_table_t *t, u8 *h)
unformat_function_t unformat_ip4_match
vnet_classify_entry_t ** working_copies
unformat_function_t unformat_vlan_tag
u8 pad[3]
log2 (size of the packing page block)
Definition: bihash_doc.h:61
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
unformat_function_t unformat_l2_mask
unformat_function_t unformat_ip_next_index
uword( unformat_function_t)(unformat_input_t *input, va_list *args)
Definition: format.h:231
#define foreach_size_in_u32x4
Definition: vnet_classify.h:52
u64 as_u64
Definition: bihash_doc.h:63
#define PREDICT_TRUE(x)
Definition: clib.h:98
static vnet_classify_entry_t * vnet_classify_find_entry_inline(vnet_classify_table_t *t, u8 *h, u64 hash, f64 now)
#define VNET_CLASSIFY_ENTRY_FREE
unformat_function_t unformat_ip6_mask
#define U32X4_ALIGNED(p)
Definition: vnet_classify.h:47
static u64 clib_xxhash(u64 key)
Definition: xxhash.h:58
struct _vlib_node_registration vlib_node_registration_t
unformat_function_t unformat_classify_match
vnet_classify_table_t * vnet_classify_new_table(vnet_classify_main_t *cm, u8 *mask, u32 nbuckets, u32 memory_size, u32 skip_n_vectors, u32 match_n_vectors)
unformat_function_t unformat_l3_mask
unformat_function_t unformat_ip4_mask
unformat_function_t unformat_classify_mask
unsigned long long u32x4
Definition: ixge.c:28
int i32
Definition: types.h:81
void vnet_classify_register_unformat_opaque_index_fn(unformat_function_t *fn)
Definition: vnet_classify.c:99
unsigned long u64
Definition: types.h:89
int vlib_main(vlib_main_t *vm, unformat_input_t *input)
Definition: main.c:1572
static void vnet_classify_prefetch_bucket(vnet_classify_table_t *t, u64 hash)
u8 * format_classify_table(u8 *s, va_list *args)
int vnet_classify_add_del_table(vnet_classify_main_t *cm, u8 *mask, u32 nbuckets, u32 memory_size, u32 skip, u32 match, u32 next_table_index, u32 miss_next_index, u32 *table_index, int is_add)
static void vnet_classify_prefetch_entry(vnet_classify_table_t *t, u64 hash)
static int vnet_classify_entry_is_free(vnet_classify_entry_t *e)
Definition: vnet_classify.h:89
static u64 vnet_classify_hash_packet_inline(vnet_classify_table_t *t, u8 *h)
unformat_function_t unformat_l3_match
vnet_classify_entry_t * entries
typedef CLIB_PACKED(struct _vnet_classify_entry{u32 next_index;union{struct{u32 opaque_index;i32 advance;};u64 opaque_count;};u32 flags;#define VNET_CLASSIFY_ENTRY_FREEunion{u64 hits;struct _vnet_classify_entry *next_free;};f64 last_heard;u32x4 key[0];}) vnet_classify_entry_t
unformat_function_t unformat_l2_next_index
vnet_main_t vnet_main
Definition: misc.c:42
void vnet_classify_register_unformat_policer_next_index_fn(unformat_function_t *fn)
Definition: vnet_classify.c:92
static vnet_classify_entry_t * vnet_classify_entry_at_index(vnet_classify_table_t *t, vnet_classify_entry_t *e, u32 index)
static uword vnet_classify_get_offset(vnet_classify_table_t *t, vnet_classify_entry_t *v)
vnet_classify_bucket_t saved_bucket
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
void vnet_classify_register_unformat_l2_next_index_fn(unformat_function_t *fn)
Definition: vnet_classify.c:69
void vnet_classify_register_unformat_acl_next_index_fn(unformat_function_t *fn)
Definition: vnet_classify.c:84
struct _vnet_classify_main vnet_classify_main_t
Definition: vnet_classify.h:50
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
unformat_function_t unformat_ip6_match
static uword is_pow2(uword x)
Definition: clib.h:266
u64 uword
Definition: types.h:112
static int vnet_classify_entry_is_busy(vnet_classify_entry_t *e)
Definition: vnet_classify.h:94
template key/value backing page structure
Definition: bihash_doc.h:44
vlib_node_registration_t ip6_classify_node
(constructor) VLIB_REGISTER_NODE (ip6_classify_node)
Definition: ip_classify.c:38
double f64
Definition: types.h:142
vnet_classify_bucket_t * buckets
unsigned char u8
Definition: types.h:56
vnet_classify_entry_t * vnet_classify_find_entry(vnet_classify_table_t *t, u8 *h, u64 hash, f64 now)
vlib_node_registration_t ip4_classify_node
(constructor) VLIB_REGISTER_NODE (ip4_classify_node)
Definition: ip_classify.c:37
volatile u32 * writer_lock
void vnet_classify_register_unformat_ip_next_index_fn(unformat_function_t *fn)
Definition: vnet_classify.c:76
struct clib_bihash_value offset
template key/value backing page structure
unformat_function_t unformat_l2_match
u32 flags
Definition: vhost-user.h:76
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
vnet_classify_entry_t ** freelists
vnet_classify_main_t vnet_classify_main
Definition: vnet_classify.c:21
int vnet_classify_add_del_session(vnet_classify_main_t *cm, u32 table_index, u8 *match, u32 hit_next_index, u32 opaque_index, i32 advance, int is_add)
static u32 u32x4_zero_byte_mask(u32x4 x)
static vnet_classify_entry_t * vnet_classify_get_entry(vnet_classify_table_t *t, uword offset)