FD.io VPP  v18.07-rc0-415-g6c78436
Vector Packet Processing
hash_lookup.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2017 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <stddef.h>
19 #include <netinet/in.h>
20 
21 #include <vlibapi/api.h>
22 #include <vlibmemory/api.h>
23 
24 #include <vlib/vlib.h>
25 #include <vnet/vnet.h>
26 #include <vnet/pg/pg.h>
27 #include <vppinfra/error.h>
28 #include <vnet/plugin/plugin.h>
29 #include <acl/acl.h>
30 #include <vppinfra/bihash_48_8.h>
31 
32 #include "hash_lookup.h"
33 #include "hash_lookup_private.h"
34 
35 
37 {
38  applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
39 
40 /*is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
41  : vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
42 */
43  return applied_hash_aces;
44 }
45 
46 
47 static void
49 {
50  DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx %016llx add %d",
51  kv->key[0], kv->key[1], kv->key[2],
52  kv->key[3], kv->key[4], kv->key[5], kv->value, is_add);
53  BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
54 }
55 
56 static void
58  applied_hash_ace_entry_t **applied_hash_aces,
59  u32 lc_index,
60  u32 new_index, clib_bihash_kv_48_8_t *kv)
61 {
62  fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
64  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
66 
67  /* apply the mask to ace key */
70 
71  u64 *pmatch = (u64 *) &ace_info->match;
72  u64 *pmask = (u64 *)&mte->mask;
73  u64 *pkey = (u64 *)kv->key;
74 
75  *pkey++ = *pmatch++ & *pmask++;
76  *pkey++ = *pmatch++ & *pmask++;
77  *pkey++ = *pmatch++ & *pmask++;
78  *pkey++ = *pmatch++ & *pmask++;
79  *pkey++ = *pmatch++ & *pmask++;
80  *pkey++ = *pmatch++ & *pmask++;
81 
83  kv_key->pkt.lc_index = lc_index;
84  kv_val->as_u64 = 0;
85  kv_val->applied_entry_index = new_index;
86 }
87 
88 static void
90  u32 lc_index,
91  applied_hash_ace_entry_t **applied_hash_aces,
92  u32 index, int is_add)
93 {
95 
96  fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, index, &kv);
97  hashtable_add_del(am, &kv, is_add);
98 }
99 
100 
101 static u32
103 {
105  /* *INDENT-OFF* */
107  ({
108  if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
109  return (mte - am->ace_mask_type_pool);
110  }));
111  /* *INDENT-ON* */
112  return ~0;
113 }
114 
115 static u32
117 {
118  u32 mask_type_index = find_mask_type_index(am, mask);
120  if(~0 == mask_type_index) {
122  mask_type_index = mte - am->ace_mask_type_pool;
123  clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
124  mte->refcount = 0;
125  /*
126  * We can use only 16 bits, since in the match there is only u16 field.
127  * Realistically, once you go to 64K of mask types, it is a huge
128  * problem anyway, so we might as well stop half way.
129  */
130  ASSERT(mask_type_index < 32768);
131  }
132  mte = am->ace_mask_type_pool + mask_type_index;
133  mte->refcount++;
134  return mask_type_index;
135 }
136 
137 static void
139 {
140  ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
141  mte->refcount--;
142  if (mte->refcount == 0) {
143  /* we are not using this entry anymore */
144  pool_put(am->ace_mask_type_pool, mte);
145  }
146 }
147 
148 static void
151  applied_hash_aces, u32 lc_index)
152 {
153  hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
155 
157  int i;
158  for (i = 0; i < vec_len ((*applied_hash_aces)); i++)
159  {
161  vec_elt_at_index ((*applied_hash_aces), i);
162 
163  /* check if mask_type_index is already there */
164  u32 new_pointer = vec_len (new_hash_applied_mask_info_vec);
165  int search;
166  for (search = 0; search < vec_len (new_hash_applied_mask_info_vec);
167  search++)
168  {
169  minfo = vec_elt_at_index (new_hash_applied_mask_info_vec, search);
170  if (minfo->mask_type_index == pae->mask_type_index)
171  break;
172  }
173 
174  vec_validate ((new_hash_applied_mask_info_vec), search);
175  minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
176  if (search == new_pointer)
177  {
178  minfo->mask_type_index = pae->mask_type_index;
179  minfo->num_entries = 0;
180  minfo->max_collisions = 0;
181  minfo->first_rule_index = ~0;
182  }
183 
184  minfo->num_entries = minfo->num_entries + 1;
185 
186  if (vec_len (pae->colliding_rules) > minfo->max_collisions)
187  minfo->max_collisions = vec_len (pae->colliding_rules);
188 
189  if (minfo->first_rule_index > i)
190  minfo->first_rule_index = i;
191  }
192 
193  hash_applied_mask_info_t **hash_applied_mask_info_vec =
195 
196  vec_free ((*hash_applied_mask_info_vec));
197  (*hash_applied_mask_info_vec) = new_hash_applied_mask_info_vec;
198 }
199 
200 static void
202  u32 applied_entry_index)
203 {
204  u32 i;
205  for (i = 0; i < vec_len ((*pvec)); i++)
206  {
207  collision_match_rule_t *cr = vec_elt_at_index ((*pvec), i);
208  if (cr->applied_entry_index == applied_entry_index)
209  {
210  vec_del1 ((*pvec), i);
211  }
212  }
213 }
214 
215 static void
217  u32 head_index, u32 applied_entry_index)
218 {
219  applied_hash_ace_entry_t *head_pae =
220  vec_elt_at_index ((*applied_hash_aces), head_index);
221  vec_del_collision_rule (&head_pae->colliding_rules, applied_entry_index);
222 }
223 
224 static void
226  applied_hash_ace_entry_t ** applied_hash_aces,
227  u32 head_index, u32 applied_entry_index)
228 {
229  applied_hash_ace_entry_t *head_pae =
230  vec_elt_at_index ((*applied_hash_aces), head_index);
232  vec_elt_at_index ((*applied_hash_aces), applied_entry_index);
233 
235 
236  cr.acl_index = pae->acl_index;
237  cr.ace_index = pae->ace_index;
238  cr.acl_position = pae->acl_position;
239  cr.applied_entry_index = applied_entry_index;
240  cr.rule = am->acls[pae->acl_index].rules[pae->ace_index];
241  vec_add1 (head_pae->colliding_rules, cr);
242 }
243 
244 static void
246  u32 lc_index,
247  applied_hash_ace_entry_t **applied_hash_aces,
248  u32 new_index)
249 {
251  ASSERT(new_index != ~0);
252  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
253  DBG("activate_applied_ace_hash_entry lc_index %d new_index %d", lc_index, new_index);
254 
255  fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, new_index, &kv);
256 
257  DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
258  kv.key[0], kv.key[1], kv.key[2],
259  kv.key[3], kv.key[4], kv.key[5]);
260 
261  clib_bihash_kv_48_8_t result;
262  hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
263  int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
264  ASSERT(new_index != ~0);
265  ASSERT(new_index < vec_len((*applied_hash_aces)));
266  if (res == 0) {
267  /* There already exists an entry or more. Append at the end. */
268  u32 first_index = result_val->applied_entry_index;
269  ASSERT(first_index != ~0);
270  DBG("A key already exists, with applied entry index: %d", first_index);
271  applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
272  u32 last_index = first_pae->tail_applied_entry_index;
273  ASSERT(last_index != ~0);
274  applied_hash_ace_entry_t *last_pae = vec_elt_at_index((*applied_hash_aces), last_index);
275  DBG("...advance to chained entry index: %d", last_index);
276  /* link ourseves in */
277  last_pae->next_applied_entry_index = new_index;
278  pae->prev_applied_entry_index = last_index;
279  /* adjust the pointer to the new tail */
280  first_pae->tail_applied_entry_index = new_index;
281  add_colliding_rule(am, applied_hash_aces, first_index, new_index);
282  } else {
283  /* It's the very first entry */
284  hashtable_add_del(am, &kv, 1);
285  ASSERT(new_index != ~0);
286  pae->tail_applied_entry_index = new_index;
287  add_colliding_rule(am, applied_hash_aces, new_index, new_index);
288  }
289 }
290 
291 
292 static void *
294 {
295  if (0 == am->hash_lookup_mheap) {
296  am->hash_lookup_mheap = mheap_alloc (0 /* use VM */ , am->hash_lookup_mheap_size);
297  if (0 == am->hash_lookup_mheap) {
298  clib_error("ACL plugin failed to allocate hash lookup heap of %U bytes, abort", format_memory_size, am->hash_lookup_mheap_size);
299  }
302  }
303  void *oldheap = clib_mem_set_heap(am->hash_lookup_mheap);
304  return oldheap;
305 }
306 
307 void
309 {
310  acl_main_t *am = &acl_main;
313  if (on) {
316  mheap_validate(h);
317  } else {
318  h->flags &= ~MHEAP_FLAG_VALIDATE;
320  }
321 }
322 
323 void
325 {
326  acl_main_t *am = &acl_main;
329  if (on) {
330  h->flags |= MHEAP_FLAG_TRACE;
331  } else {
332  h->flags &= ~MHEAP_FLAG_TRACE;
333  }
334 }
335 
336 static void
338 {
341 
343  fa_5tuple_t *mask;
344  /*
345  * Start taking base_mask associated to ace, and essentially copy it.
346  * With TupleMerge we will assign a relaxed mask here.
347  */
349  mask = &mte->mask;
350  pae->mask_type_index = assign_mask_type_index(am, mask);
351 }
352 
353 void
354 hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
355 {
356  int i;
357 
358  DBG0("HASH ACL apply: lc_index %d acl %d", lc_index, acl_index);
359  if (!am->acl_lookup_hash_initialized) {
360  BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
363  }
364 
365  void *oldheap = hash_acl_set_heap(am);
367  vec_validate(am->hash_acl_infos, acl_index);
368  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
369 
370  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
371  u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
372 
373  int base_offset = vec_len(*applied_hash_aces);
374 
375  /* Update the bitmap of the mask types with which the lookup
376  needs to happen for the ACLs applied to this lc_index */
378  vec_validate((*applied_hash_acls), lc_index);
379  applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
380 
381  /* ensure the list of applied hash acls is initialized and add this acl# to it */
382  u32 index = vec_search(pal->applied_acls, acl_index);
383  if (index != ~0) {
384  clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to lc",
385  acl_index, lc_index);
386  goto done;
387  }
388  vec_add1(pal->applied_acls, acl_index);
389  u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
390  if (index2 != ~0) {
391  clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to hash h-acl info",
392  acl_index, lc_index);
393  goto done;
394  }
395  vec_add1((*hash_acl_applied_lc_index), lc_index);
396 
397  /*
398  * if the applied ACL is empty, the current code will cause a
399  * different behavior compared to current linear search: an empty ACL will
400  * simply fallthrough to the next ACL, or the default deny in the end.
401  *
402  * This is not a problem, because after vpp-dev discussion,
403  * the consensus was it should not be possible to apply the non-existent
404  * ACL, so the change adding this code also takes care of that.
405  */
406 
407  /* expand the applied aces vector by the necessary amount */
408  vec_resize((*applied_hash_aces), vec_len(ha->rules));
409 
411  /* add the rules from the ACL to the hash table for lookup and append to the vector*/
412  for(i=0; i < vec_len(ha->rules); i++) {
413  u32 new_index = base_offset + i;
414  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
415  pae->acl_index = acl_index;
416  pae->ace_index = ha->rules[i].ace_index;
417  pae->acl_position = acl_position;
418  pae->action = ha->rules[i].action;
419  pae->hitcount = 0;
420  pae->hash_ace_info_index = i;
421  /* we might link it in later */
422  pae->next_applied_entry_index = ~0;
423  pae->prev_applied_entry_index = ~0;
424  pae->tail_applied_entry_index = ~0;
425  pae->colliding_rules = NULL;
426  pae->mask_type_index = ~0;
428  activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
429  }
430  remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
431 done:
432  clib_mem_set_heap (oldheap);
433 }
434 
435 static u32
437 {
438  /*
439  * find back the first entry. Inefficient so might need to be a bit cleverer
440  * if this proves to be a problem..
441  */
442  u32 an_index = curr_index;
443  ASSERT(an_index != ~0);
444  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
445  while(head_pae->prev_applied_entry_index != ~0) {
446  an_index = head_pae->prev_applied_entry_index;
447  ASSERT(an_index != ~0);
448  head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
449  }
450  return an_index;
451 }
452 
453 static void
455  u32 lc_index,
456  applied_hash_ace_entry_t **applied_hash_aces,
457  u32 old_index, u32 new_index)
458 {
459  ASSERT(old_index != ~0);
460  ASSERT(new_index != ~0);
461  /* move the entry */
462  *vec_elt_at_index((*applied_hash_aces), new_index) = *vec_elt_at_index((*applied_hash_aces), old_index);
463 
464  /* update the linkage and hash table if necessary */
465  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
466 
467  if (pae->prev_applied_entry_index != ~0) {
468  applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
469  ASSERT(prev_pae->next_applied_entry_index == old_index);
470  prev_pae->next_applied_entry_index = new_index;
471  } else {
472  /* first entry - so the hash points to it, update */
473  add_del_hashtable_entry(am, lc_index,
474  applied_hash_aces, new_index, 1);
475  ASSERT(pae->tail_applied_entry_index != ~0);
476  }
477  if (pae->next_applied_entry_index != ~0) {
478  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
479  ASSERT(next_pae->prev_applied_entry_index == old_index);
480  next_pae->prev_applied_entry_index = new_index;
481  } else {
482  /*
483  * Moving the very last entry, so we need to update the tail pointer in the first one.
484  */
485  u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
486  ASSERT(head_index != ~0);
487  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
488 
489  ASSERT(head_pae->tail_applied_entry_index == old_index);
490  head_pae->tail_applied_entry_index = new_index;
491  }
492  /* invalidate the old entry */
493  pae->prev_applied_entry_index = ~0;
494  pae->next_applied_entry_index = ~0;
495  pae->tail_applied_entry_index = ~0;
496 }
497 
498 static void
500  u32 lc_index,
501  applied_hash_ace_entry_t **applied_hash_aces,
502  u32 old_index)
503 {
504  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
505  DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
506 
507  if (pae->prev_applied_entry_index != ~0) {
508  DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index);
509  applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
510  ASSERT(prev_pae->next_applied_entry_index == old_index);
512 
513  u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
514  ASSERT(head_index != ~0);
515  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
516  del_colliding_rule(applied_hash_aces, head_index, old_index);
517 
518  if (pae->next_applied_entry_index == ~0) {
519  /* it was a last entry we removed, update the pointer on the first one */
520  ASSERT(head_pae->tail_applied_entry_index == old_index);
522  } else {
523  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
525  }
526  } else {
527  /* It was the first entry. We need either to reset the hash entry or delete it */
528  if (pae->next_applied_entry_index != ~0) {
529  /* the next element becomes the new first one, so needs the tail pointer to be set */
530  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
531  ASSERT(pae->tail_applied_entry_index != ~0);
533  /* Remove ourselves and transfer the ownership of the colliding rules vector */
534  del_colliding_rule(applied_hash_aces, old_index, old_index);
535  next_pae->colliding_rules = pae->colliding_rules;
536  /* unlink from the next element */
537  next_pae->prev_applied_entry_index = ~0;
538  add_del_hashtable_entry(am, lc_index,
539  applied_hash_aces, pae->next_applied_entry_index, 1);
540  } else {
541  /* no next entry, so just delete the entry in the hash table */
542  add_del_hashtable_entry(am, lc_index,
543  applied_hash_aces, old_index, 0);
544  }
545  }
546 
548  /* invalidate the old entry */
549  pae->mask_type_index = ~0;
550  pae->prev_applied_entry_index = ~0;
551  pae->next_applied_entry_index = ~0;
552  pae->tail_applied_entry_index = ~0;
553  /* always has to be 0 */
554  pae->colliding_rules = NULL;
555 }
556 
557 
558 void
559 hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
560 {
561  int i;
562 
563  DBG0("HASH ACL unapply: lc_index %d acl %d", lc_index, acl_index);
565  applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
566 
567  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
568  u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
569 
570  /* remove this acl# from the list of applied hash acls */
571  u32 index = vec_search(pal->applied_acls, acl_index);
572  if (index == ~0) {
573  clib_warning("BUG: trying to unapply unapplied acl_index %d on lc_index %d, according to lc",
574  acl_index, lc_index);
575  return;
576  }
577  vec_del1(pal->applied_acls, index);
578 
579  u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
580  if (index2 == ~0) {
581  clib_warning("BUG: trying to unapply twice acl_index %d on lc_index %d, according to h-acl info",
582  acl_index, lc_index);
583  return;
584  }
585  vec_del1((*hash_acl_applied_lc_index), index2);
586 
587  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
588 
589  for(i=0; i < vec_len((*applied_hash_aces)); i++) {
590  if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) {
591  DBG("Found applied ACL#%d at applied index %d", acl_index, i);
592  break;
593  }
594  }
595  if (vec_len((*applied_hash_aces)) <= i) {
596  DBG("Did not find applied ACL#%d at lc_index %d", acl_index, lc_index);
597  /* we went all the way without finding any entries. Probably a list was empty. */
598  return;
599  }
600 
601  void *oldheap = hash_acl_set_heap(am);
602  int base_offset = i;
603  int tail_offset = base_offset + vec_len(ha->rules);
604  int tail_len = vec_len((*applied_hash_aces)) - tail_offset;
605  DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
606 
607  for(i=0; i < vec_len(ha->rules); i ++) {
609  applied_hash_aces, base_offset + i);
610  }
611  for(i=0; i < tail_len; i ++) {
612  /* move the entry at tail offset to base offset */
613  /* that is, from (tail_offset+i) -> (base_offset+i) */
614  DBG("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
615  move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
616  }
617  /* trim the end of the vector */
618  _vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
619 
620  remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
621 
622  clib_mem_set_heap (oldheap);
623 }
624 
625 /*
626  * Create the applied ACEs and update the hash table,
627  * taking into account that the ACL may not be the last
628  * in the vector of applied ACLs.
629  *
630  * For now, walk from the end of the vector and unapply the ACLs,
631  * then apply the one in question and reapply the rest.
632  */
633 
634 void
635 hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
636 {
638  u32 **applied_acls = &acontext->acl_indices;
639  int i;
640  int start_index = vec_search((*applied_acls), acl_index);
641 
642  DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
643  /*
644  * This function is called after we find out the lc_index where ACL is applied.
645  * If the by-lc_index vector does not have the ACL#, then it's a bug.
646  */
647  ASSERT(start_index < vec_len(*applied_acls));
648 
649  /* unapply all the ACLs at the tail side, up to the current one */
650  for(i = vec_len(*applied_acls) - 1; i > start_index; i--) {
651  hash_acl_unapply(am, lc_index, *vec_elt_at_index(*applied_acls, i));
652  }
653  for(i = start_index; i < vec_len(*applied_acls); i++) {
654  hash_acl_apply(am, lc_index, *vec_elt_at_index(*applied_acls, i), i);
655  }
656 }
657 
658 static void
660 {
661  ip6_address_mask_from_width(addr, prefix_len);
662 }
663 
664 
665 /* Maybe should be moved into the core somewhere */
666 always_inline void
668 {
669  int i, byte, bit, bitnum;
670  ASSERT (width <= 32);
671  memset (a, 0, sizeof (a[0]));
672  for (i = 0; i < width; i++)
673  {
674  bitnum = (7 - (i & 7));
675  byte = i / 8;
676  bit = 1 << bitnum;
677  a->as_u8[byte] |= bit;
678  }
679 }
680 
681 
682 static void
684 {
685  ip4_address_mask_from_width(addr, prefix_len);
686 }
687 
688 static void
689 make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
690 {
691  if (port_first == port_last) {
692  *portmask = 0xffff;
693  /* single port is representable by masked value */
694  return;
695  }
696 
697  *portmask = 0;
698  return;
699 }
700 
701 static void
703 {
704  memset(mask, 0, sizeof(*mask));
705  memset(&hi->match, 0, sizeof(hi->match));
706  hi->action = r->is_permit;
707 
708  /* we will need to be matching based on lc_index and mask_type_index when applied */
709  mask->pkt.lc_index = ~0;
710  /* we will assign the match of mask_type_index later when we find it*/
711  mask->pkt.mask_type_index_lsb = ~0;
712 
713  mask->pkt.is_ip6 = 1;
714  hi->match.pkt.is_ip6 = r->is_ipv6;
715  if (r->is_ipv6) {
717  hi->match.ip6_addr[0] = r->src.ip6;
719  hi->match.ip6_addr[1] = r->dst.ip6;
720  } else {
721  memset(hi->match.l3_zero_pad, 0, sizeof(hi->match.l3_zero_pad));
723  hi->match.ip4_addr[0] = r->src.ip4;
725  hi->match.ip4_addr[1] = r->dst.ip4;
726  }
727 
728  if (r->proto != 0) {
729  mask->l4.proto = ~0; /* L4 proto needs to be matched */
730  hi->match.l4.proto = r->proto;
731 
732  /* Calculate the src/dst port masks and make the src/dst port matches accordingly */
734  hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
735 
737  hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
738  /* L4 info must be valid in order to match */
739  mask->pkt.l4_valid = 1;
740  hi->match.pkt.l4_valid = 1;
741  /* And we must set the mask to check that it is an initial fragment */
742  mask->pkt.is_nonfirst_fragment = 1;
744  if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) {
745  /* if we want to match on TCP flags, they must be masked off as well */
746  mask->pkt.tcp_flags = r->tcp_flags_mask;
748  /* and the flags need to be present within the packet being matched */
749  mask->pkt.tcp_flags_valid = 1;
750  hi->match.pkt.tcp_flags_valid = 1;
751  }
752  }
753  /* Sanitize the mask and the match */
754  u64 *pmask = (u64 *)mask;
755  u64 *pmatch = (u64 *)&hi->match;
756  int j;
757  for(j=0; j<6; j++) {
758  pmatch[j] = pmatch[j] & pmask[j];
759  }
760 }
761 
762 
763 int hash_acl_exists(acl_main_t *am, int acl_index)
764 {
765  if (acl_index >= vec_len(am->hash_acl_infos))
766  return 0;
767 
768  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
769  return ha->hash_acl_exists;
770 }
771 
772 void hash_acl_add(acl_main_t *am, int acl_index)
773 {
774  void *oldheap = hash_acl_set_heap(am);
775  DBG("HASH ACL add : %d", acl_index);
776  int i;
777  acl_list_t *a = &am->acls[acl_index];
778  vec_validate(am->hash_acl_infos, acl_index);
779  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
780  memset(ha, 0, sizeof(*ha));
781  ha->hash_acl_exists = 1;
782 
783  /* walk the newly added ACL entries and ensure that for each of them there
784  is a mask type, increment a reference count for that mask type */
785  for(i=0; i < a->count; i++) {
786  hash_ace_info_t ace_info;
787  fa_5tuple_t mask;
788  memset(&ace_info, 0, sizeof(ace_info));
789  ace_info.acl_index = acl_index;
790  ace_info.ace_index = i;
791 
792  make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info);
793  mask.pkt.flags_reserved = 0b000;
794  ace_info.base_mask_type_index = assign_mask_type_index(am, &mask);
795  /* assign the mask type index for matching itself */
797  DBG("ACE: %d mask_type_index: %d", i, ace_info.base_mask_type_index);
798  vec_add1(ha->rules, ace_info);
799  }
800  /*
801  * if an ACL is applied somewhere, fill the corresponding lookup data structures.
802  * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
803  */
804  if (acl_index < vec_len(am->lc_index_vec_by_acl)) {
805  u32 *lc_index;
806  vec_foreach(lc_index, am->lc_index_vec_by_acl[acl_index]) {
807  hash_acl_reapply(am, *lc_index, acl_index);
808  }
809  }
810  clib_mem_set_heap (oldheap);
811 }
812 
813 void hash_acl_delete(acl_main_t *am, int acl_index)
814 {
815  void *oldheap = hash_acl_set_heap(am);
816  DBG0("HASH ACL delete : %d", acl_index);
817  /*
818  * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply)
819  * this is a different behavior from the linear lookup where an empty ACL is "deny all",
820  *
821  * However, following vpp-dev discussion the ACL that is referenced elsewhere
822  * should not be possible to delete, and the change adding this also adds
823  * the safeguards to that respect, so this is not a problem.
824  *
825  * The part to rememeber is that this routine is called in process of reapplication
826  * during the acl_add_replace() API call - the old acl ruleset is deleted, then
827  * the new one is added, without the change in the applied ACLs - so this case
828  * has to be handled.
829  */
830  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
831  u32 *lc_list_copy = 0;
832  {
833  u32 *lc_index;
834  lc_list_copy = vec_dup(ha->lc_index_list);
835  vec_foreach(lc_index, lc_list_copy) {
836  hash_acl_unapply(am, *lc_index, acl_index);
837  }
838  vec_free(lc_list_copy);
839  }
840 
841  /* walk the mask types for the ACL about-to-be-deleted, and decrease
842  * the reference count, possibly freeing up some of them */
843  int i;
844  for(i=0; i < vec_len(ha->rules); i++) {
846  }
847  ha->hash_acl_exists = 0;
848  vec_free(ha->rules);
849  clib_mem_set_heap (oldheap);
850 }
851 
852 
853 void
855 {
856  vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
857  BV (format_bihash), &am->acl_lookup_hash, verbose);
858 }
859 
860 void
862 {
863  acl_main_t *am = &acl_main;
864  vlib_main_t *vm = am->vlib_main;
866 
867  vlib_cli_output (vm, "Mask-type entries:");
868  /* *INDENT-OFF* */
870  ({
871  vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
872  mte - am->ace_mask_type_pool,
873  mte->mask.kv_40_8.key[0], mte->mask.kv_40_8.key[1], mte->mask.kv_40_8.key[2],
874  mte->mask.kv_40_8.key[3], mte->mask.kv_40_8.key[4], mte->mask.kv_40_8.value, mte->refcount);
875  }));
876  /* *INDENT-ON* */
877 }
878 
879 void
881 {
882  acl_main_t *am = &acl_main;
883  vlib_main_t *vm = am->vlib_main;
884  u32 i, j;
885  u64 *m;
886  vlib_cli_output (vm, "Mask-ready ACL representations\n");
887  for (i = 0; i < vec_len (am->hash_acl_infos); i++)
888  {
889  if ((acl_index != ~0) && (acl_index != i))
890  {
891  continue;
892  }
893  hash_acl_info_t *ha = &am->hash_acl_infos[i];
894  vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
895  vlib_cli_output (vm, " applied lc_index list: %U\n",
896  format_vec32, ha->lc_index_list, "%d");
897  for (j = 0; j < vec_len (ha->rules); j++)
898  {
899  hash_ace_info_t *pa = &ha->rules[j];
900  m = (u64 *) & pa->match;
901  vlib_cli_output (vm,
902  " %4d: %016llx %016llx %016llx %016llx %016llx %016llx base mask index %d acl %d rule %d action %d\n",
903  j, m[0], m[1], m[2], m[3], m[4], m[5],
905  pa->action);
906  }
907  }
908 }
909 
910 static void
912  vlib_cli_output(vm,
913  " %4d: acl %d ace %d acl pos %d pae index: %d",
914  j, cr->acl_index, cr->ace_index, cr->acl_position, cr->applied_entry_index);
915 }
916 
917 static void
919 {
920  vlib_cli_output (vm,
921  " %4d: acl %d rule %d action %d bitmask-ready rule %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
922  j, pae->acl_index, pae->ace_index, pae->action,
926  int jj;
927  for(jj=0; jj<vec_len(pae->colliding_rules); jj++)
929 }
930 
931 static void
933 {
934  vlib_cli_output (vm,
935  " %4d: mask type index %d first rule index %d num_entries %d max_collisions %d",
937 }
938 
939 void
941 {
942  acl_main_t *am = &acl_main;
943  vlib_main_t *vm = am->vlib_main;
944  u32 lci, j;
945  vlib_cli_output (vm, "Applied lookup entries for lookup contexts");
946 
947  for (lci = 0;
948  (lci < vec_len(am->applied_hash_acl_info_by_lc_index)); lci++)
949  {
950  if ((lc_index != ~0) && (lc_index != lci))
951  {
952  continue;
953  }
954  vlib_cli_output (vm, "lc_index %d:", lci);
956  {
959  vlib_cli_output (vm, " applied acls: %U", format_vec32,
960  pal->applied_acls, "%d");
961  }
963  {
964  vlib_cli_output (vm, " applied mask info entries:");
965  for (j = 0;
967  j++)
968  {
971  [lci][j]);
972  }
973  }
974  if (lci < vec_len (am->hash_entry_vec_by_lc_index))
975  {
976  vlib_cli_output (vm, " lookup applied entries:");
977  for (j = 0;
978  j < vec_len (am->hash_entry_vec_by_lc_index[lci]);
979  j++)
980  {
981  acl_plugin_print_pae (vm, j,
983  [lci][j]);
984  }
985  }
986  }
987 }
988 
989 void
990 acl_plugin_show_tables_bihash (u32 show_bihash_verbose)
991 {
992  acl_main_t *am = &acl_main;
993  vlib_main_t *vm = am->vlib_main;
994  show_hash_acl_hash (vm, am, show_bihash_verbose);
995 }
996 
static void move_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index, u32 new_index)
Definition: hash_lookup.c:454
acl_rule_t * rules
Definition: acl.h:95
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:437
static void deactivate_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index)
Definition: hash_lookup.c:499
vmrglw vmrglh hi
u8 is_ipv6
Definition: types.h:24
fa_5tuple_t mask
Definition: acl.h:123
void acl_plugin_show_tables_mask_type(void)
Definition: hash_lookup.c:861
Definition: acl.h:119
a
Definition: bitmap.h:537
u32 acl_index
static void * hash_acl_set_heap(acl_main_t *am)
Definition: hash_lookup.c:293
fa_session_l4_key_t l4
Definition: fa_node.h:71
fa_packet_info_t pkt
Definition: fa_node.h:73
void hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
Definition: hash_lookup.c:559
void acl_plugin_hash_acl_set_trace_heap(int on)
Definition: hash_lookup.c:324
void acl_plugin_show_tables_applied_info(u32 lc_index)
Definition: hash_lookup.c:940
void hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
Definition: hash_lookup.c:635
#define clib_error(format, args...)
Definition: error.h:62
unsigned long u64
Definition: types.h:89
void * mheap_alloc(void *memory, uword size)
Definition: mheap.c:947
#define NULL
Definition: clib.h:55
u8 dst_prefixlen
Definition: types.h:28
u8 action
u32 hash_ace_info_index
u32 count
Definition: acl.h:94
static void add_colliding_rule(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 head_index, u32 applied_entry_index)
Definition: hash_lookup.c:225
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
int i
static mheap_t * mheap_header(u8 *v)
acl_main_t acl_main
Definition: jvpp_acl.h:39
#define MHEAP_FLAG_THREAD_SAFE
unsigned char u8
Definition: types.h:56
static u32 find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 curr_index)
Definition: hash_lookup.c:436
void hash_acl_add(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:772
u32 ** lc_index_vec_by_acl
Definition: acl.h:180
u16 dst_port_or_code_last
Definition: types.h:33
u8 src_prefixlen
Definition: types.h:26
static void make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
Definition: hash_lookup.c:689
u32 acl_position
u32 next_applied_entry_index
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:440
int clib_bihash_add_del(clib_bihash *h, clib_bihash_kv *add_v, int is_add)
Add or delete a (key,value) pair from a bi-hash table.
#define always_inline
Definition: clib.h:92
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
Definition: vec.h:309
static void activate_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 new_index)
Definition: hash_lookup.c:245
ip46_address_t src
Definition: types.h:25
u8 * format_memory_size(u8 *s, va_list *va)
Definition: std-formats.c:193
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 is_permit
Definition: types.h:23
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:240
unsigned int u32
Definition: types.h:88
#define vec_search(v, E)
Search a vector for the index of the entry that matches.
Definition: vec.h:942
static void release_mask_type_index(acl_main_t *am, u32 mask_type_index)
Definition: hash_lookup.c:138
void hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
Definition: hash_lookup.c:354
static u32 assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
Definition: hash_lookup.c:116
int hash_acl_exists(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:763
ip46_address_t dst
Definition: types.h:27
static void add_del_hashtable_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 index, int is_add)
Definition: hash_lookup.c:89
collision_match_rule_t * colliding_rules
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:461
u32 l3_zero_pad[6]
Definition: fa_node.h:66
static void ip4_address_mask_from_width(ip4_address_t *a, u32 width)
Definition: hash_lookup.c:667
u32 mask_type_index
u16 dst_port_or_code_first
Definition: types.h:32
hash_applied_mask_info_t ** hash_applied_mask_info_vec_by_lc_index
Definition: acl.h:201
unsigned short u16
Definition: types.h:57
hash_acl_info_t * hash_acl_infos
Definition: acl.h:141
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:273
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:373
static void make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi)
Definition: hash_lookup.c:702
u64 hitcount
hash_ace_info_t * rules
#define vec_del1(v, i)
Delete the element at index I.
Definition: vec.h:806
clib_bihash_48_8_t acl_lookup_hash
Definition: acl.h:142
void show_hash_acl_hash(vlib_main_t *vm, acl_main_t *am, u32 verbose)
Definition: hash_lookup.c:854
void acl_plugin_hash_acl_set_validate_heap(int on)
Definition: hash_lookup.c:308
u8 proto
Definition: types.h:29
void clib_bihash_init(clib_bihash *h, char *name, u32 nbuckets, uword memory_size)
initialize a bounded index extensible hash table
u16 src_port_or_type_first
Definition: types.h:30
void acl_plugin_show_tables_bihash(u32 show_bihash_verbose)
Definition: hash_lookup.c:990
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P (general version).
Definition: pool.h:188
static void vec_del_collision_rule(collision_match_rule_t **pvec, u32 applied_entry_index)
Definition: hash_lookup.c:201
u32 refcount
Definition: acl.h:124
vlib_main_t * vm
Definition: buffer.c:294
applied_hash_acl_info_t * applied_hash_acl_info_by_lc_index
Definition: acl.h:157
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
static void make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len)
Definition: hash_lookup.c:683
int acl_lookup_hash_initialized
Definition: acl.h:149
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:226
void hash_acl_delete(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:813
#define clib_warning(format, args...)
Definition: error.h:59
#define clib_memcpy(a, b, c)
Definition: string.h:75
u8 * format_vec32(u8 *s, va_list *va)
Definition: std-formats.c:43
#define DBG(...)
applied_hash_ace_entry_t ** hash_entry_vec_by_lc_index
Definition: acl.h:156
static void acl_plugin_print_colliding_rule(vlib_main_t *vm, int j, collision_match_rule_t *cr)
Definition: hash_lookup.c:911
u32 hash_lookup_hash_buckets
Definition: acl.h:143
static void assign_mask_type_index_to_pae(acl_main_t *am, applied_hash_ace_entry_t *pae)
Definition: hash_lookup.c:337
#define ASSERT(truth)
u8 tcp_flags_valid
Definition: fa_node.h:33
u16 src_port_or_type_last
Definition: types.h:31
static void acl_plugin_print_pae(vlib_main_t *vm, int j, applied_hash_ace_entry_t *pae)
Definition: hash_lookup.c:918
#define MHEAP_FLAG_VALIDATE
static void acl_plugin_print_applied_mask_info(vlib_main_t *vm, int j, hash_applied_mask_info_t *mi)
Definition: hash_lookup.c:932
#define DBG0(...)
#define MHEAP_FLAG_TRACE
static applied_hash_ace_entry_t ** get_applied_hash_aces(acl_main_t *am, u32 lc_index)
Definition: hash_lookup.c:36
#define MHEAP_FLAG_SMALL_OBJECT_CACHE
ace_mask_type_entry_t * ace_mask_type_pool
Definition: acl.h:198
static u32 find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
Definition: hash_lookup.c:102
u32 prev_applied_entry_index
static void fill_applied_hash_ace_kv(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 lc_index, u32 new_index, clib_bihash_kv_48_8_t *kv)
Definition: hash_lookup.c:57
u8 is_nonfirst_fragment
Definition: fa_node.h:35
acl_lookup_context_t * acl_lookup_contexts
Definition: acl.h:138
uword hash_lookup_mheap_size
Definition: acl.h:148
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u8 tcp_flags_mask
Definition: types.h:35
static void hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
Definition: hash_lookup.c:48
static void ip6_address_mask_from_width(ip6_address_t *a, u32 width)
Definition: ip6_packet.h:257
u8 tcp_flags_value
Definition: types.h:34
void mheap_validate(void *v)
Definition: mheap.c:1339
#define vec_foreach(var, vec)
Vector iterator.
void * hash_lookup_mheap
Definition: acl.h:147
static void del_colliding_rule(applied_hash_ace_entry_t **applied_hash_aces, u32 head_index, u32 applied_entry_index)
Definition: hash_lookup.c:216
u32 tail_applied_entry_index
ip4_address_t ip4_addr[2]
Definition: fa_node.h:67
vhost_vring_addr_t addr
Definition: vhost-user.h:83
u16 mask_type_index_lsb
Definition: fa_node.h:31
static void remake_hash_applied_mask_info_vec(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 lc_index)
Definition: hash_lookup.c:149
Definition: acl.h:89
static void make_ip6_address_mask(ip6_address_t *addr, u8 prefix_len)
Definition: hash_lookup.c:659
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:62
void acl_plugin_show_tables_acl_hash_info(u32 acl_index)
Definition: hash_lookup.c:880
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:681
acl_list_t * acls
Definition: acl.h:140
u32 hash_lookup_hash_memory
Definition: acl.h:144
u32 ace_index
ip6_address_t ip6_addr[2]
Definition: fa_node.h:69
foreach_fa_cleaner_counter vlib_main_t * vlib_main
Definition: acl.h:319