FD.io VPP  v18.07.1-19-g511ce25
Vector Packet Processing
hash_lookup.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2017 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <stddef.h>
19 #include <netinet/in.h>
20 
21 #include <vlibapi/api.h>
22 #include <vlibmemory/api.h>
23 
24 #include <vlib/vlib.h>
25 #include <vnet/vnet.h>
26 #include <vnet/pg/pg.h>
27 #include <vppinfra/error.h>
28 #include <vnet/plugin/plugin.h>
29 #include <acl/acl.h>
30 #include <vppinfra/bihash_48_8.h>
31 
32 #include "hash_lookup.h"
33 #include "hash_lookup_private.h"
34 
35 
37 {
38  applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
39 
40 /*is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
41  : vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
42 */
43  return applied_hash_aces;
44 }
45 
46 
47 static void
49 {
50  DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx %016llx add %d",
51  kv->key[0], kv->key[1], kv->key[2],
52  kv->key[3], kv->key[4], kv->key[5], kv->value, is_add);
53  BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
54 }
55 
56 /*
57  * TupleMerge
58  *
59  * Initial adaptation by Valerio Bruschi (valerio.bruschi@telecom-paristech.fr)
60  * based on the TupleMerge [1] simulator kindly made available
61  * by James Daly (dalyjamese@gmail.com) and Eric Torng (torng@cse.msu.edu)
62  * ( http://www.cse.msu.edu/~dalyjame/ or http://www.cse.msu.edu/~torng/ ),
63  * refactoring by Andrew Yourtchenko.
64  *
65  * [1] James Daly, Eric Torng "TupleMerge: Building Online Packet Classifiers
66  * by Omitting Bits", In Proc. IEEE ICCCN 2017, pp. 1-10
67  *
68  */
69 
70 static int
72 {
73  int counter = 0;
74  while (word)
75  {
76  counter += word & 1;
77  word >>= 1;
78  }
79  return counter;
80 }
81 
82 /* check if mask2 can be contained by mask1 */
83 static u8
85 {
86  int i;
87  if (is_ip6)
88  {
89  for (i = 0; i < 2; i++)
90  {
91  if ((mask1->ip6_addr[0].as_u64[i] & mask2->ip6_addr[0].as_u64[i]) !=
92  mask1->ip6_addr[0].as_u64[i])
93  return 0;
94  if ((mask1->ip6_addr[1].as_u64[i] & mask2->ip6_addr[1].as_u64[i]) !=
95  mask1->ip6_addr[1].as_u64[i])
96  return 0;
97  }
98  }
99  else
100  {
101  /* check the pads, both masks must have it 0 */
102  u32 padcheck = 0;
103  int i;
104  for (i=0; i<6; i++) {
105  padcheck |= mask1->l3_zero_pad[i];
106  padcheck |= mask2->l3_zero_pad[i];
107  }
108  if (padcheck != 0)
109  return 0;
110  if ((mask1->ip4_addr[0].as_u32 & mask2->ip4_addr[0].as_u32) !=
111  mask1->ip4_addr[0].as_u32)
112  return 0;
113  if ((mask1->ip4_addr[1].as_u32 & mask2->ip4_addr[1].as_u32) !=
114  mask1->ip4_addr[1].as_u32)
115  return 0;
116  }
117 
118  /* take care if port are not exact-match */
119  if ((mask1->l4.as_u64 & mask2->l4.as_u64) != mask1->l4.as_u64)
120  return 0;
121 
122  if ((mask1->pkt.as_u64 & mask2->pkt.as_u64) != mask1->pkt.as_u64)
123  return 0;
124 
125  return 1;
126 }
127 
128 
129 
130 /*
131  * TupleMerge:
132  *
133  * Consider the situation when we have to create a new table
134  * T for a given rule R. This occurs for the first rule inserted and
135  * for later rules if it is incompatible with all existing tables.
136  * In this event, we need to determine mT for a new table.
137  * Setting mT = mR is not a good strategy; if another similar,
138  * but slightly less specific, rule appears we will be unable to
139  * add it to T and will thus have to create another new table. We
140  * thus consider two factors: is the rule more strongly aligned
141  * with source or destination addresses (usually the two most
142  * important fields) and how much slack needs to be given to
143  * allow for other rules. If the source and destination addresses
144  * are close together (within 4 bits for our experiments), we use
145  * both of them. Otherwise, we drop the smaller (less specific)
146  * address and its associated port field from consideration; R is
147  * predominantly aligned with one of the two fields and should
148  * be grouped with other similar rules. This is similar to TSS
149  * dropping port fields, but since it is based on observable rule
150  * characteristics it is more likely to keep important fields and
151  * discard less useful ones.
152  * We then look at the absolute lengths of the addresses. If
153  * the address is long, we are more likely to try to add shorter
154  * lengths and likewise the reverse. We thus remove a few bits
155  * from both address fields with more bits removed from longer
156  * addresses. For 32 bit addresses, we remove 4 bits, 3 for more
157  * than 24, 2 for more than 16, and so on (so 8 and fewer bits
158  * don’t have any removed). We only do this for prefix fields like
159  * addresses; both range fields (like ports) and exact match fields
160  * (like protocol) should remain as they are.
161  */
162 
163 
164 static u32
165 shift_ip4_if(u32 mask, u32 thresh, int numshifts, u32 else_val)
166 {
167  if (mask > thresh)
168  return clib_host_to_net_u32((clib_net_to_host_u32(mask) << numshifts) & 0xFFFFFFFF);
169  else
170  return else_val;
171 }
172 
173 static void
174 relax_ip4_addr(ip4_address_t *ip4_mask, int relax2) {
175  int shifts_per_relax[2][4] = { { 6, 5, 4, 2 }, { 3, 2, 1, 1 } };
176 
177  int *shifts = shifts_per_relax[relax2];
178  if(ip4_mask->as_u32 == 0xffffffff)
179  ip4_mask->as_u32 = clib_host_to_net_u32((clib_net_to_host_u32(ip4_mask->as_u32) << shifts[0])&0xFFFFFFFF);
180  else
181  ip4_mask->as_u32 = shift_ip4_if(ip4_mask->as_u32, 0xffffff00, shifts[1],
182  shift_ip4_if(ip4_mask->as_u32, 0xffff0000, shifts[2],
183  shift_ip4_if(ip4_mask->as_u32, 0xff000000, shifts[3], ip4_mask->as_u32)));
184 }
185 
186 static void
187 relax_ip6_addr(ip6_address_t *ip6_mask, int relax2) {
188  /*
189  * This "better than nothing" relax logic is based on heuristics
190  * from IPv6 knowledge, and may not be optimal.
191  * Some further tuning may be needed in the future.
192  */
193  if (ip6_mask->as_u64[0] == 0xffffffffffffffffULL) {
194  if (ip6_mask->as_u64[1] == 0xffffffffffffffffULL) {
195  /* relax a /128 down to /64 - likely to have more hosts */
196  ip6_mask->as_u64[1] = 0;
197  } else if (ip6_mask->as_u64[1] == 0) {
198  /* relax a /64 down to /56 - likely to have more subnets */
199  ip6_mask->as_u64[0] = clib_host_to_net_u64(0xffffffffffffff00ULL);
200  }
201  }
202 }
203 
204 static void
205 relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2){
206  fa_5tuple_t save_mask = *mask;
207 
208  int counter_s = 0, counter_d = 0;
209  if (is_ip6) {
210  int i;
211  for(i=0; i<2; i++){
212  counter_s += count_bits(mask->ip6_addr[0].as_u64[i]);
213  counter_d += count_bits(mask->ip6_addr[1].as_u64[i]);
214  }
215  } else {
216  counter_s += count_bits(mask->ip4_addr[0].as_u32);
217  counter_d += count_bits(mask->ip4_addr[1].as_u32);
218  }
219 
220 /*
221  * is the rule more strongly aligned with source or destination addresses
222  * (usually the two most important fields) and how much slack needs to be
223  * given to allow for other rules. If the source and destination addresses
224  * are close together (within 4 bits for our experiments), we use both of them.
225  * Otherwise, we drop the smaller (less specific) address and its associated
226  * port field from consideration
227  */
228  const int deltaThreshold = 4;
229  /* const int deltaThreshold = 8; if IPV6? */
230  int delta = counter_s - counter_d;
231  if (-delta > deltaThreshold) {
232  if (is_ip6)
233  mask->ip6_addr[0].as_u64[1] = mask->ip6_addr[0].as_u64[0] = 0;
234  else
235  mask->ip4_addr[0].as_u32 = 0;
236  mask->l4.port[0] = 0;
237  } else if (delta > deltaThreshold) {
238  if (is_ip6)
239  mask->ip6_addr[1].as_u64[1] = mask->ip6_addr[1].as_u64[0] = 0;
240  else
241  mask->ip4_addr[1].as_u32 = 0;
242  mask->l4.port[1] = 0;
243  }
244 
245  if (is_ip6) {
246  relax_ip6_addr(&mask->ip6_addr[0], relax2);
247  relax_ip6_addr(&mask->ip6_addr[1], relax2);
248  } else {
249  relax_ip4_addr(&mask->ip4_addr[0], relax2);
250  relax_ip4_addr(&mask->ip4_addr[1], relax2);
251  }
252  mask->pkt.is_nonfirst_fragment = 0;
253  mask->pkt.l4_valid = 0;
254  if(!first_mask_contains_second_mask(is_ip6, mask, &save_mask)){
255  DBG( "TM-relaxing-ERROR");
256  *mask = save_mask;
257  }
258  DBG( "TM-relaxing-end");
259 }
260 
261 static u32
263 {
265  /* *INDENT-OFF* */
267  ({
268  if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
269  return (mte - am->ace_mask_type_pool);
270  }));
271  /* *INDENT-ON* */
272  return ~0;
273 }
274 
275 static u32
277 {
278  u32 mask_type_index = find_mask_type_index(am, mask);
280  if(~0 == mask_type_index) {
282  mask_type_index = mte - am->ace_mask_type_pool;
283  clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
284  mte->refcount = 0;
285 
286  /*
287  * We can use only 16 bits, since in the match there is only u16 field.
288  * Realistically, once you go to 64K of mask types, it is a huge
289  * problem anyway, so we might as well stop half way.
290  */
291  ASSERT(mask_type_index < 32768);
292  }
293  mte = am->ace_mask_type_pool + mask_type_index;
294  mte->refcount++;
295  DBG0("ASSIGN MTE index %d new refcount %d", mask_type_index, mte->refcount);
296  return mask_type_index;
297 }
298 
299 static void
300 lock_mask_type_index(acl_main_t *am, u32 mask_type_index)
301 {
302  DBG0("LOCK MTE index %d", mask_type_index);
303  ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
304  mte->refcount++;
305  DBG0("LOCK MTE index %d new refcount %d", mask_type_index, mte->refcount);
306 }
307 
308 
309 static void
311 {
312  DBG0("RELEAS MTE index %d", mask_type_index);
313  ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
314  mte->refcount--;
315  DBG0("RELEAS MTE index %d new refcount %d", mask_type_index, mte->refcount);
316  if (mte->refcount == 0) {
317  /* we are not using this entry anymore */
318  memset(mte, 0xae, sizeof(*mte));
319  pool_put(am->ace_mask_type_pool, mte);
320  }
321 }
322 
323 
324 static u32
325 tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
326 {
327  u32 mask_type_index = ~0;
328  u32 for_mask_type_index = ~0;
329  ace_mask_type_entry_t *mte = 0;
330  int order_index;
331  /* look for existing mask comparable with the one in input */
332 
333  hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
335 
336  if (vec_len(*hash_applied_mask_info_vec) > 0) {
337  for(order_index = vec_len((*hash_applied_mask_info_vec)) -1; order_index >= 0; order_index--) {
338  minfo = vec_elt_at_index((*hash_applied_mask_info_vec), order_index);
339  for_mask_type_index = minfo->mask_type_index;
340  mte = vec_elt_at_index(am->ace_mask_type_pool, for_mask_type_index);
341  if(first_mask_contains_second_mask(is_ip6, &mte->mask, mask)){
342  mask_type_index = (mte - am->ace_mask_type_pool);
343  lock_mask_type_index(am, mask_type_index);
344  break;
345  }
346  }
347  }
348 
349  if(~0 == mask_type_index) {
350  /* if no mask is found, then let's use a relaxed version of the original one, in order to be used by new ace_entries */
351  DBG( "TM-assigning mask type index-new one");
352  fa_5tuple_t relaxed_mask = *mask;
353  relax_tuple(&relaxed_mask, is_ip6, 0);
354  mask_type_index = assign_mask_type_index(am, &relaxed_mask);
355 
356  hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
357 
358  int spot = vec_len((*hash_applied_mask_info_vec));
359  vec_validate((*hash_applied_mask_info_vec), spot);
360  minfo = vec_elt_at_index((*hash_applied_mask_info_vec), spot);
361  minfo->mask_type_index = mask_type_index;
362  minfo->num_entries = 0;
363  minfo->max_collisions = 0;
364  minfo->first_rule_index = ~0;
365 
366  /*
367  * We can use only 16 bits, since in the match there is only u16 field.
368  * Realistically, once you go to 64K of mask types, it is a huge
369  * problem anyway, so we might as well stop half way.
370  */
371  ASSERT(mask_type_index < 32768);
372  }
373  mte = am->ace_mask_type_pool + mask_type_index;
374  DBG0("TM-ASSIGN MTE index %d new refcount %d", mask_type_index, mte->refcount);
375  return mask_type_index;
376 }
377 
378 
379 static void
381  applied_hash_ace_entry_t **applied_hash_aces,
382  u32 lc_index,
383  u32 new_index, clib_bihash_kv_48_8_t *kv)
384 {
385  fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
387  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
389 
390  /* apply the mask to ace key */
393 
394  u64 *pmatch = (u64 *) &ace_info->match;
395  u64 *pmask = (u64 *)&mte->mask;
396  u64 *pkey = (u64 *)kv->key;
397 
398  *pkey++ = *pmatch++ & *pmask++;
399  *pkey++ = *pmatch++ & *pmask++;
400  *pkey++ = *pmatch++ & *pmask++;
401  *pkey++ = *pmatch++ & *pmask++;
402  *pkey++ = *pmatch++ & *pmask++;
403  *pkey++ = *pmatch++ & *pmask++;
404 
405  kv_key->pkt.mask_type_index_lsb = pae->mask_type_index;
406  kv_key->pkt.lc_index = lc_index;
407  kv_val->as_u64 = 0;
408  kv_val->applied_entry_index = new_index;
409 }
410 
411 static void
413  u32 lc_index,
414  applied_hash_ace_entry_t **applied_hash_aces,
415  u32 index, int is_add)
416 {
418 
419  fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, index, &kv);
420  hashtable_add_del(am, &kv, is_add);
421 }
422 
423 
424 static void
427  applied_hash_aces, u32 lc_index)
428 {
429  DBG0("remake applied hash mask info lc_index %d", lc_index);
430  hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
432 
434  int i;
435  for (i = 0; i < vec_len ((*applied_hash_aces)); i++)
436  {
438  vec_elt_at_index ((*applied_hash_aces), i);
439 
440  /* check if mask_type_index is already there */
441  u32 new_pointer = vec_len (new_hash_applied_mask_info_vec);
442  int search;
443  for (search = 0; search < vec_len (new_hash_applied_mask_info_vec);
444  search++)
445  {
446  minfo = vec_elt_at_index (new_hash_applied_mask_info_vec, search);
447  if (minfo->mask_type_index == pae->mask_type_index)
448  break;
449  }
450 
451  vec_validate ((new_hash_applied_mask_info_vec), search);
452  minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
453  if (search == new_pointer)
454  {
455  DBG0("remaking index %d", search);
456  minfo->mask_type_index = pae->mask_type_index;
457  minfo->num_entries = 0;
458  minfo->max_collisions = 0;
459  minfo->first_rule_index = ~0;
460  }
461 
462  minfo->num_entries = minfo->num_entries + 1;
463 
464  if (vec_len (pae->colliding_rules) > minfo->max_collisions)
465  minfo->max_collisions = vec_len (pae->colliding_rules);
466 
467  if (minfo->first_rule_index > i)
468  minfo->first_rule_index = i;
469  }
470 
471  hash_applied_mask_info_t **hash_applied_mask_info_vec =
473 
474  vec_free ((*hash_applied_mask_info_vec));
475  (*hash_applied_mask_info_vec) = new_hash_applied_mask_info_vec;
476 }
477 
478 static void
480  u32 applied_entry_index)
481 {
482  u32 i = 0;
483  u32 deleted = 0;
484  while (i < _vec_len ((*pvec)))
485  {
486  collision_match_rule_t *cr = vec_elt_at_index ((*pvec), i);
487  if (cr->applied_entry_index == applied_entry_index)
488  {
489  /* vec_del1 ((*pvec), i) would be more efficient but would reorder the elements. */
490  vec_delete((*pvec), 1, i);
491  deleted++;
492  DBG0("vec_del_collision_rule deleting one at index %d", i);
493  }
494  else
495  {
496  i++;
497  }
498  }
499  ASSERT(deleted > 0);
500 }
501 
502 static void
504 
505 static void
507  u32 head_index, u32 applied_entry_index)
508 {
509  DBG0("DEL COLLIDING RULE: head_index %d applied index %d", head_index, applied_entry_index);
510 
511 
512  applied_hash_ace_entry_t *head_pae =
513  vec_elt_at_index ((*applied_hash_aces), head_index);
514  if (ACL_HASH_LOOKUP_DEBUG > 0)
515  acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
516  vec_del_collision_rule (&head_pae->colliding_rules, applied_entry_index);
517  if (vec_len(head_pae->colliding_rules) == 0) {
518  vec_free(head_pae->colliding_rules);
519  }
520  if (ACL_HASH_LOOKUP_DEBUG > 0)
521  acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
522 }
523 
524 static void
526  applied_hash_ace_entry_t ** applied_hash_aces,
527  u32 head_index, u32 applied_entry_index)
528 {
529  applied_hash_ace_entry_t *head_pae =
530  vec_elt_at_index ((*applied_hash_aces), head_index);
532  vec_elt_at_index ((*applied_hash_aces), applied_entry_index);
533  DBG0("ADD COLLIDING RULE: head_index %d applied index %d", head_index, applied_entry_index);
534  if (ACL_HASH_LOOKUP_DEBUG > 0)
535  acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
536 
538 
539  cr.acl_index = pae->acl_index;
540  cr.ace_index = pae->ace_index;
541  cr.acl_position = pae->acl_position;
542  cr.applied_entry_index = applied_entry_index;
543  cr.rule = am->acls[pae->acl_index].rules[pae->ace_index];
544  vec_add1 (head_pae->colliding_rules, cr);
545  if (ACL_HASH_LOOKUP_DEBUG > 0)
546  acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
547 }
548 
549 static u32
551  u32 lc_index,
552  applied_hash_ace_entry_t **applied_hash_aces,
553  u32 new_index)
554 {
556  ASSERT(new_index != ~0);
557  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
558  DBG("activate_applied_ace_hash_entry lc_index %d new_index %d", lc_index, new_index);
559 
560  fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, new_index, &kv);
561 
562  DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
563  kv.key[0], kv.key[1], kv.key[2],
564  kv.key[3], kv.key[4], kv.key[5]);
565 
566  clib_bihash_kv_48_8_t result;
567  hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
568  int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
569  ASSERT(new_index != ~0);
570  ASSERT(new_index < vec_len((*applied_hash_aces)));
571  if (res == 0) {
572  /* There already exists an entry or more. Append at the end. */
573  u32 first_index = result_val->applied_entry_index;
574  ASSERT(first_index != ~0);
575  DBG("A key already exists, with applied entry index: %d", first_index);
576  applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
577  u32 last_index = first_pae->tail_applied_entry_index;
578  ASSERT(last_index != ~0);
579  applied_hash_ace_entry_t *last_pae = vec_elt_at_index((*applied_hash_aces), last_index);
580  DBG("...advance to chained entry index: %d", last_index);
581  /* link ourseves in */
582  last_pae->next_applied_entry_index = new_index;
583  pae->prev_applied_entry_index = last_index;
584  /* adjust the pointer to the new tail */
585  first_pae->tail_applied_entry_index = new_index;
586  add_colliding_rule(am, applied_hash_aces, first_index, new_index);
587  return first_index;
588  } else {
589  /* It's the very first entry */
590  hashtable_add_del(am, &kv, 1);
591  ASSERT(new_index != ~0);
592  pae->tail_applied_entry_index = new_index;
593  add_colliding_rule(am, applied_hash_aces, new_index, new_index);
594  return new_index;
595  }
596 }
597 
598 
599 static void *
601 {
602  if (0 == am->hash_lookup_mheap) {
603  am->hash_lookup_mheap = mheap_alloc (0 /* use VM */ , am->hash_lookup_mheap_size);
604  if (0 == am->hash_lookup_mheap) {
605  clib_error("ACL plugin failed to allocate hash lookup heap of %U bytes, abort", format_memory_size, am->hash_lookup_mheap_size);
606  }
609  }
610  void *oldheap = clib_mem_set_heap(am->hash_lookup_mheap);
611  return oldheap;
612 }
613 
614 void
616 {
617  acl_main_t *am = &acl_main;
620  if (on) {
623  mheap_validate(h);
624  } else {
625  h->flags &= ~MHEAP_FLAG_VALIDATE;
627  }
628 }
629 
630 void
632 {
633  acl_main_t *am = &acl_main;
636  if (on) {
637  h->flags |= MHEAP_FLAG_TRACE;
638  } else {
639  h->flags &= ~MHEAP_FLAG_TRACE;
640  }
641 }
642 
643 static void
645 {
648 
650  fa_5tuple_t *mask;
651  /*
652  * Start taking base_mask associated to ace, and essentially copy it.
653  * With TupleMerge we will assign a relaxed mask here.
654  */
656  mask = &mte->mask;
657  if (am->use_tuple_merge)
658  pae->mask_type_index = tm_assign_mask_type_index(am, mask, is_ip6, lc_index);
659  else
660  pae->mask_type_index = assign_mask_type_index(am, mask);
661 }
662 
663 static void
664 split_partition(acl_main_t *am, u32 first_index,
665  u32 lc_index, int is_ip6);
666 
667 
668 static void
669 check_collision_count_and_maybe_split(acl_main_t *am, u32 lc_index, int is_ip6, u32 first_index)
670 {
671  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
672  applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
673  if (vec_len(first_pae->colliding_rules) > am->tuple_merge_split_threshold) {
674  split_partition(am, first_index, lc_index, is_ip6);
675  }
676 }
677 
678 void
679 hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
680 {
681  int i;
682 
683  DBG0("HASH ACL apply: lc_index %d acl %d", lc_index, acl_index);
684  if (!am->acl_lookup_hash_initialized) {
685  BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
688  }
689 
690  void *oldheap = hash_acl_set_heap(am);
692  vec_validate(am->hash_acl_infos, acl_index);
693  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
694 
695  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
696  u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
697 
698  int base_offset = vec_len(*applied_hash_aces);
699 
700  /* Update the bitmap of the mask types with which the lookup
701  needs to happen for the ACLs applied to this lc_index */
703  vec_validate((*applied_hash_acls), lc_index);
704  applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
705 
706  /* ensure the list of applied hash acls is initialized and add this acl# to it */
707  u32 index = vec_search(pal->applied_acls, acl_index);
708  if (index != ~0) {
709  clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to lc",
710  acl_index, lc_index);
711  goto done;
712  }
713  vec_add1(pal->applied_acls, acl_index);
714  u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
715  if (index2 != ~0) {
716  clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to hash h-acl info",
717  acl_index, lc_index);
718  goto done;
719  }
720  vec_add1((*hash_acl_applied_lc_index), lc_index);
721 
722  /*
723  * if the applied ACL is empty, the current code will cause a
724  * different behavior compared to current linear search: an empty ACL will
725  * simply fallthrough to the next ACL, or the default deny in the end.
726  *
727  * This is not a problem, because after vpp-dev discussion,
728  * the consensus was it should not be possible to apply the non-existent
729  * ACL, so the change adding this code also takes care of that.
730  */
731 
732  /* expand the applied aces vector by the necessary amount */
733  vec_resize((*applied_hash_aces), vec_len(ha->rules));
734 
736  /* add the rules from the ACL to the hash table for lookup and append to the vector*/
737  for(i=0; i < vec_len(ha->rules); i++) {
738  int is_ip6 = ha->rules[i].match.pkt.is_ip6;
739  u32 new_index = base_offset + i;
740  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
741  pae->acl_index = acl_index;
742  pae->ace_index = ha->rules[i].ace_index;
743  pae->acl_position = acl_position;
744  pae->action = ha->rules[i].action;
745  pae->hitcount = 0;
746  pae->hash_ace_info_index = i;
747  /* we might link it in later */
748  pae->next_applied_entry_index = ~0;
749  pae->prev_applied_entry_index = ~0;
750  pae->tail_applied_entry_index = ~0;
751  pae->colliding_rules = NULL;
752  pae->mask_type_index = ~0;
753  assign_mask_type_index_to_pae(am, lc_index, is_ip6, pae);
754  u32 first_index = activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
755  if (am->use_tuple_merge)
756  check_collision_count_and_maybe_split(am, lc_index, is_ip6, first_index);
757  }
758  remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
759 done:
760  clib_mem_set_heap (oldheap);
761 }
762 
763 static u32
765 {
766  /*
767  * find back the first entry. Inefficient so might need to be a bit cleverer
768  * if this proves to be a problem..
769  */
770  u32 an_index = curr_index;
771  ASSERT(an_index != ~0);
772  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
773  while(head_pae->prev_applied_entry_index != ~0) {
774  an_index = head_pae->prev_applied_entry_index;
775  ASSERT(an_index != ~0);
776  head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
777  }
778  return an_index;
779 }
780 
781 static void
783  u32 lc_index,
784  applied_hash_ace_entry_t **applied_hash_aces,
785  u32 old_index, u32 new_index)
786 {
787  ASSERT(old_index != ~0);
788  ASSERT(new_index != ~0);
789  /* move the entry */
790  *vec_elt_at_index((*applied_hash_aces), new_index) = *vec_elt_at_index((*applied_hash_aces), old_index);
791 
792  /* update the linkage and hash table if necessary */
793  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
794  applied_hash_ace_entry_t *new_pae = vec_elt_at_index((*applied_hash_aces), new_index);
795 
796  if (ACL_HASH_LOOKUP_DEBUG > 0) {
797  clib_warning("Moving pae from %d to %d", old_index, new_index);
798  acl_plugin_print_pae(am->vlib_main, old_index, pae);
799  }
800 
801  if (new_pae->tail_applied_entry_index == old_index) {
802  /* fix-up the tail index if we are the tail and the start */
803  new_pae->tail_applied_entry_index = new_index;
804  }
805 
806  if (pae->prev_applied_entry_index != ~0) {
807  applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
808  ASSERT(prev_pae->next_applied_entry_index == old_index);
809  prev_pae->next_applied_entry_index = new_index;
810  } else {
811  /* first entry - so the hash points to it, update */
812  add_del_hashtable_entry(am, lc_index,
813  applied_hash_aces, new_index, 1);
814  ASSERT(pae->tail_applied_entry_index != ~0);
815  }
816  if (pae->next_applied_entry_index != ~0) {
817  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
818  ASSERT(next_pae->prev_applied_entry_index == old_index);
819  next_pae->prev_applied_entry_index = new_index;
820  } else {
821  /*
822  * Moving the very last entry, so we need to update the tail pointer in the first one.
823  */
824  u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
825  ASSERT(head_index != ~0);
826  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
827 
828  ASSERT(head_pae->tail_applied_entry_index == old_index);
829  head_pae->tail_applied_entry_index = new_index;
830  }
831  if (new_pae->colliding_rules) {
832  /* update the information within the collision rule entry */
833  ASSERT(vec_len(new_pae->colliding_rules) > 0);
835  ASSERT(cr->applied_entry_index == old_index);
836  cr->applied_entry_index = new_index;
837  } else {
838  /* find the index in the collision rule entry on the head element */
839  u32 head_index = find_head_applied_ace_index(applied_hash_aces, new_index);
840  ASSERT(head_index != ~0);
841  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
842  ASSERT(vec_len(head_pae->colliding_rules) > 0);
843  u32 i;
844  for (i=0; i<vec_len(head_pae->colliding_rules); i++) {
846  if (cr->applied_entry_index == old_index) {
847  cr->applied_entry_index = new_index;
848  }
849  }
850  if (ACL_HASH_LOOKUP_DEBUG > 0) {
851  clib_warning("Head pae at index %d after adjustment", head_index);
852  acl_plugin_print_pae(am->vlib_main, head_index, head_pae);
853  }
854  }
855  /* invalidate the old entry */
856  pae->prev_applied_entry_index = ~0;
857  pae->next_applied_entry_index = ~0;
858  pae->tail_applied_entry_index = ~0;
859  pae->colliding_rules = NULL;
860 }
861 
862 static void
864  u32 lc_index,
865  applied_hash_ace_entry_t **applied_hash_aces,
866  u32 old_index)
867 {
868  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
869  DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
870  if (ACL_HASH_LOOKUP_DEBUG > 0) {
871  clib_warning("Deactivating pae at index %d", old_index);
872  acl_plugin_print_pae(am->vlib_main, old_index, pae);
873  }
874 
875  if (pae->prev_applied_entry_index != ~0) {
876  DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index);
877  applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
878  ASSERT(prev_pae->next_applied_entry_index == old_index);
880 
881  u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
882  ASSERT(head_index != ~0);
883  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
884  del_colliding_rule(applied_hash_aces, head_index, old_index);
885 
886  if (pae->next_applied_entry_index == ~0) {
887  /* it was a last entry we removed, update the pointer on the first one */
888  ASSERT(head_pae->tail_applied_entry_index == old_index);
890  } else {
891  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
893  }
894  } else {
895  /* It was the first entry. We need either to reset the hash entry or delete it */
896  /* delete our entry from the collision vector first */
897  del_colliding_rule(applied_hash_aces, old_index, old_index);
898  if (pae->next_applied_entry_index != ~0) {
899  /* the next element becomes the new first one, so needs the tail pointer to be set */
900  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
901  ASSERT(pae->tail_applied_entry_index != ~0);
903  /* Remove ourselves and transfer the ownership of the colliding rules vector */
904  next_pae->colliding_rules = pae->colliding_rules;
905  /* unlink from the next element */
906  next_pae->prev_applied_entry_index = ~0;
907  add_del_hashtable_entry(am, lc_index,
908  applied_hash_aces, pae->next_applied_entry_index, 1);
909  } else {
910  /* no next entry, so just delete the entry in the hash table */
911  add_del_hashtable_entry(am, lc_index,
912  applied_hash_aces, old_index, 0);
913  }
914  }
915  DBG0("Releasing mask type index %d for pae index %d on lc_index %d", pae->mask_type_index, old_index, lc_index);
917  /* invalidate the old entry */
918  pae->mask_type_index = ~0;
919  pae->prev_applied_entry_index = ~0;
920  pae->next_applied_entry_index = ~0;
921  pae->tail_applied_entry_index = ~0;
922  /* always has to be 0 */
923  pae->colliding_rules = NULL;
924 }
925 
926 
927 void
928 hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
929 {
930  int i;
931 
932  DBG0("HASH ACL unapply: lc_index %d acl %d", lc_index, acl_index);
934  applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
935 
936  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
937  u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
938 
939  if (ACL_HASH_LOOKUP_DEBUG > 0) {
940  clib_warning("unapplying acl %d", acl_index);
944  }
945 
946  /* remove this acl# from the list of applied hash acls */
947  u32 index = vec_search(pal->applied_acls, acl_index);
948  if (index == ~0) {
949  clib_warning("BUG: trying to unapply unapplied acl_index %d on lc_index %d, according to lc",
950  acl_index, lc_index);
951  return;
952  }
953  vec_del1(pal->applied_acls, index);
954 
955  u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
956  if (index2 == ~0) {
957  clib_warning("BUG: trying to unapply twice acl_index %d on lc_index %d, according to h-acl info",
958  acl_index, lc_index);
959  return;
960  }
961  vec_del1((*hash_acl_applied_lc_index), index2);
962 
963  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
964 
965  for(i=0; i < vec_len((*applied_hash_aces)); i++) {
966  if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) {
967  DBG("Found applied ACL#%d at applied index %d", acl_index, i);
968  break;
969  }
970  }
971  if (vec_len((*applied_hash_aces)) <= i) {
972  DBG("Did not find applied ACL#%d at lc_index %d", acl_index, lc_index);
973  /* we went all the way without finding any entries. Probably a list was empty. */
974  return;
975  }
976 
977  void *oldheap = hash_acl_set_heap(am);
978  int base_offset = i;
979  int tail_offset = base_offset + vec_len(ha->rules);
980  int tail_len = vec_len((*applied_hash_aces)) - tail_offset;
981  DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
982 
983  for(i=0; i < vec_len(ha->rules); i ++) {
985  applied_hash_aces, base_offset + i);
986  }
987  for(i=0; i < tail_len; i ++) {
988  /* move the entry at tail offset to base offset */
989  /* that is, from (tail_offset+i) -> (base_offset+i) */
990  DBG0("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
991  move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
992  }
993  /* trim the end of the vector */
994  _vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
995 
996  remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
997 
998  if (vec_len((*applied_hash_aces)) == 0) {
999  vec_free((*applied_hash_aces));
1000  }
1001 
1002  clib_mem_set_heap (oldheap);
1003 }
1004 
1005 /*
1006  * Create the applied ACEs and update the hash table,
1007  * taking into account that the ACL may not be the last
1008  * in the vector of applied ACLs.
1009  *
1010  * For now, walk from the end of the vector and unapply the ACLs,
1011  * then apply the one in question and reapply the rest.
1012  */
1013 
1014 void
1015 hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
1016 {
1017  acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
1018  u32 **applied_acls = &acontext->acl_indices;
1019  int i;
1020  int start_index = vec_search((*applied_acls), acl_index);
1021 
1022  DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
1023  /*
1024  * This function is called after we find out the lc_index where ACL is applied.
1025  * If the by-lc_index vector does not have the ACL#, then it's a bug.
1026  */
1027  ASSERT(start_index < vec_len(*applied_acls));
1028 
1029  /* unapply all the ACLs at the tail side, up to the current one */
1030  for(i = vec_len(*applied_acls) - 1; i > start_index; i--) {
1031  hash_acl_unapply(am, lc_index, *vec_elt_at_index(*applied_acls, i));
1032  }
1033  for(i = start_index; i < vec_len(*applied_acls); i++) {
1034  hash_acl_apply(am, lc_index, *vec_elt_at_index(*applied_acls, i), i);
1035  }
1036 }
1037 
1038 static void
1040 {
1041  ip6_address_mask_from_width(addr, prefix_len);
1042 }
1043 
1044 
1045 /* Maybe should be moved into the core somewhere */
1046 always_inline void
1048 {
1049  int i, byte, bit, bitnum;
1050  ASSERT (width <= 32);
1051  memset (a, 0, sizeof (a[0]));
1052  for (i = 0; i < width; i++)
1053  {
1054  bitnum = (7 - (i & 7));
1055  byte = i / 8;
1056  bit = 1 << bitnum;
1057  a->as_u8[byte] |= bit;
1058  }
1059 }
1060 
1061 
1062 static void
1064 {
1065  ip4_address_mask_from_width(addr, prefix_len);
1066 }
1067 
1068 static void
1069 make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
1070 {
1071  if (port_first == port_last) {
1072  *portmask = 0xffff;
1073  /* single port is representable by masked value */
1074  return;
1075  }
1076 
1077  *portmask = 0;
1078  return;
1079 }
1080 
1081 static void
1083 {
1084  memset(mask, 0, sizeof(*mask));
1085  memset(&hi->match, 0, sizeof(hi->match));
1086  hi->action = r->is_permit;
1087 
1088  /* we will need to be matching based on lc_index and mask_type_index when applied */
1089  mask->pkt.lc_index = ~0;
1090  /* we will assign the match of mask_type_index later when we find it*/
1091  mask->pkt.mask_type_index_lsb = ~0;
1092 
1093  mask->pkt.is_ip6 = 1;
1094  hi->match.pkt.is_ip6 = r->is_ipv6;
1095  if (r->is_ipv6) {
1097  hi->match.ip6_addr[0] = r->src.ip6;
1099  hi->match.ip6_addr[1] = r->dst.ip6;
1100  } else {
1101  memset(hi->match.l3_zero_pad, 0, sizeof(hi->match.l3_zero_pad));
1103  hi->match.ip4_addr[0] = r->src.ip4;
1105  hi->match.ip4_addr[1] = r->dst.ip4;
1106  }
1107 
1108  if (r->proto != 0) {
1109  mask->l4.proto = ~0; /* L4 proto needs to be matched */
1110  hi->match.l4.proto = r->proto;
1111 
1112  /* Calculate the src/dst port masks and make the src/dst port matches accordingly */
1114  hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
1115 
1117  hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
1118  /* L4 info must be valid in order to match */
1119  mask->pkt.l4_valid = 1;
1120  hi->match.pkt.l4_valid = 1;
1121  /* And we must set the mask to check that it is an initial fragment */
1122  mask->pkt.is_nonfirst_fragment = 1;
1123  hi->match.pkt.is_nonfirst_fragment = 0;
1124  if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) {
1125  /* if we want to match on TCP flags, they must be masked off as well */
1126  mask->pkt.tcp_flags = r->tcp_flags_mask;
1128  /* and the flags need to be present within the packet being matched */
1129  mask->pkt.tcp_flags_valid = 1;
1130  hi->match.pkt.tcp_flags_valid = 1;
1131  }
1132  }
1133  /* Sanitize the mask and the match */
1134  u64 *pmask = (u64 *)mask;
1135  u64 *pmatch = (u64 *)&hi->match;
1136  int j;
1137  for(j=0; j<6; j++) {
1138  pmatch[j] = pmatch[j] & pmask[j];
1139  }
1140 }
1141 
1142 
1143 int hash_acl_exists(acl_main_t *am, int acl_index)
1144 {
1145  if (acl_index >= vec_len(am->hash_acl_infos))
1146  return 0;
1147 
1148  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1149  return ha->hash_acl_exists;
1150 }
1151 
1152 void hash_acl_add(acl_main_t *am, int acl_index)
1153 {
1154  void *oldheap = hash_acl_set_heap(am);
1155  DBG("HASH ACL add : %d", acl_index);
1156  int i;
1157  acl_list_t *a = &am->acls[acl_index];
1158  vec_validate(am->hash_acl_infos, acl_index);
1159  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1160  memset(ha, 0, sizeof(*ha));
1161  ha->hash_acl_exists = 1;
1162 
1163  /* walk the newly added ACL entries and ensure that for each of them there
1164  is a mask type, increment a reference count for that mask type */
1165  for(i=0; i < a->count; i++) {
1166  hash_ace_info_t ace_info;
1167  fa_5tuple_t mask;
1168  memset(&ace_info, 0, sizeof(ace_info));
1169  ace_info.acl_index = acl_index;
1170  ace_info.ace_index = i;
1171 
1172  make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info);
1173  mask.pkt.flags_reserved = 0b000;
1174  ace_info.base_mask_type_index = assign_mask_type_index(am, &mask);
1175  /* assign the mask type index for matching itself */
1176  ace_info.match.pkt.mask_type_index_lsb = ace_info.base_mask_type_index;
1177  DBG("ACE: %d mask_type_index: %d", i, ace_info.base_mask_type_index);
1178  vec_add1(ha->rules, ace_info);
1179  }
1180  /*
1181  * if an ACL is applied somewhere, fill the corresponding lookup data structures.
1182  * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
1183  */
1184  if (acl_index < vec_len(am->lc_index_vec_by_acl)) {
1185  u32 *lc_index;
1186  vec_foreach(lc_index, am->lc_index_vec_by_acl[acl_index]) {
1187  hash_acl_reapply(am, *lc_index, acl_index);
1188  }
1189  }
1190  clib_mem_set_heap (oldheap);
1191 }
1192 
1193 void hash_acl_delete(acl_main_t *am, int acl_index)
1194 {
1195  void *oldheap = hash_acl_set_heap(am);
1196  DBG0("HASH ACL delete : %d", acl_index);
1197  /*
1198  * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply)
1199  * this is a different behavior from the linear lookup where an empty ACL is "deny all",
1200  *
1201  * However, following vpp-dev discussion the ACL that is referenced elsewhere
1202  * should not be possible to delete, and the change adding this also adds
1203  * the safeguards to that respect, so this is not a problem.
1204  *
1205  * The part to rememeber is that this routine is called in process of reapplication
1206  * during the acl_add_replace() API call - the old acl ruleset is deleted, then
1207  * the new one is added, without the change in the applied ACLs - so this case
1208  * has to be handled.
1209  */
1210  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1211  u32 *lc_list_copy = 0;
1212  {
1213  u32 *lc_index;
1214  lc_list_copy = vec_dup(ha->lc_index_list);
1215  vec_foreach(lc_index, lc_list_copy) {
1216  hash_acl_unapply(am, *lc_index, acl_index);
1217  }
1218  vec_free(lc_list_copy);
1219  }
1220  vec_free(ha->lc_index_list);
1221 
1222  /* walk the mask types for the ACL about-to-be-deleted, and decrease
1223  * the reference count, possibly freeing up some of them */
1224  int i;
1225  for(i=0; i < vec_len(ha->rules); i++) {
1227  }
1228  ha->hash_acl_exists = 0;
1229  vec_free(ha->rules);
1230  clib_mem_set_heap (oldheap);
1231 }
1232 
1233 
1234 void
1236 {
1237  vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
1238  BV (format_bihash), &am->acl_lookup_hash, verbose);
1239 }
1240 
1241 void
1243 {
1244  acl_main_t *am = &acl_main;
1245  vlib_main_t *vm = am->vlib_main;
1246  ace_mask_type_entry_t *mte;
1247 
1248  vlib_cli_output (vm, "Mask-type entries:");
1249  /* *INDENT-OFF* */
1251  ({
1252  vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
1253  mte - am->ace_mask_type_pool,
1254  mte->mask.kv_40_8.key[0], mte->mask.kv_40_8.key[1], mte->mask.kv_40_8.key[2],
1255  mte->mask.kv_40_8.key[3], mte->mask.kv_40_8.key[4], mte->mask.kv_40_8.value, mte->refcount);
1256  }));
1257  /* *INDENT-ON* */
1258 }
1259 
1260 void
1262 {
1263  acl_main_t *am = &acl_main;
1264  vlib_main_t *vm = am->vlib_main;
1265  u32 i, j;
1266  u64 *m;
1267  vlib_cli_output (vm, "Mask-ready ACL representations\n");
1268  for (i = 0; i < vec_len (am->hash_acl_infos); i++)
1269  {
1270  if ((acl_index != ~0) && (acl_index != i))
1271  {
1272  continue;
1273  }
1274  hash_acl_info_t *ha = &am->hash_acl_infos[i];
1275  vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
1276  vlib_cli_output (vm, " applied lc_index list: %U\n",
1277  format_vec32, ha->lc_index_list, "%d");
1278  for (j = 0; j < vec_len (ha->rules); j++)
1279  {
1280  hash_ace_info_t *pa = &ha->rules[j];
1281  m = (u64 *) & pa->match;
1282  vlib_cli_output (vm,
1283  " %4d: %016llx %016llx %016llx %016llx %016llx %016llx base mask index %d acl %d rule %d action %d\n",
1284  j, m[0], m[1], m[2], m[3], m[4], m[5],
1286  pa->action);
1287  }
1288  }
1289 }
1290 
1291 static void
1293  vlib_cli_output(vm,
1294  " %4d: acl %d ace %d acl pos %d pae index: %d",
1295  j, cr->acl_index, cr->ace_index, cr->acl_position, cr->applied_entry_index);
1296 }
1297 
1298 static void
1300 {
1301  vlib_cli_output (vm,
1302  " %4d: acl %d rule %d action %d bitmask-ready rule %d mask type index: %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
1303  j, pae->acl_index, pae->ace_index, pae->action,
1307  int jj;
1308  for(jj=0; jj<vec_len(pae->colliding_rules); jj++)
1310 }
1311 
1312 static void
1314 {
1315  vlib_cli_output (vm,
1316  " %4d: mask type index %d first rule index %d num_entries %d max_collisions %d",
1318 }
1319 
1320 void
1322 {
1323  acl_main_t *am = &acl_main;
1324  vlib_main_t *vm = am->vlib_main;
1325  u32 lci, j;
1326  vlib_cli_output (vm, "Applied lookup entries for lookup contexts");
1327 
1328  for (lci = 0;
1329  (lci < vec_len(am->applied_hash_acl_info_by_lc_index)); lci++)
1330  {
1331  if ((lc_index != ~0) && (lc_index != lci))
1332  {
1333  continue;
1334  }
1335  vlib_cli_output (vm, "lc_index %d:", lci);
1337  {
1340  vlib_cli_output (vm, " applied acls: %U", format_vec32,
1341  pal->applied_acls, "%d");
1342  }
1344  {
1345  vlib_cli_output (vm, " applied mask info entries:");
1346  for (j = 0;
1348  j++)
1349  {
1352  [lci][j]);
1353  }
1354  }
1355  if (lci < vec_len (am->hash_entry_vec_by_lc_index))
1356  {
1357  vlib_cli_output (vm, " lookup applied entries:");
1358  for (j = 0;
1359  j < vec_len (am->hash_entry_vec_by_lc_index[lci]);
1360  j++)
1361  {
1362  acl_plugin_print_pae (vm, j,
1364  [lci][j]);
1365  }
1366  }
1367  }
1368 }
1369 
1370 void
1371 acl_plugin_show_tables_bihash (u32 show_bihash_verbose)
1372 {
1373  acl_main_t *am = &acl_main;
1374  vlib_main_t *vm = am->vlib_main;
1375  show_hash_acl_hash (vm, am, show_bihash_verbose);
1376 }
1377 
1378 /*
1379  * Split of the partition needs to happen when the collision count
1380  * goes over a specified threshold.
1381  *
1382  * This is a signal that we ignored too many bits in
1383  * mT and we need to split the table into two tables. We select
1384  * all of the colliding rules L and find their maximum common
1385  * tuple mL. Normally mL is specific enough to hash L with few
1386  * or no collisions. We then create a new table T2 with tuple mL
1387  * and transfer all compatible rules from T to T2. If mL is not
1388  * specific enough, we find the field with the biggest difference
1389  * between the minimum and maximum tuple lengths for all of
1390  * the rules in L and set that field to be the average of those two
1391  * values. We then transfer all compatible rules as before. This
1392  * guarantees that some rules from L will move and that T2 will
1393  * have a smaller number of collisions than T did.
1394  */
1395 
1396 
1397 static void
1399 {
1400  int update =
1401  (clib_net_to_host_u64 (mask_addr->as_u64[0]) <
1402  clib_net_to_host_u64 (min_addr->as_u64[0]))
1403  ||
1404  ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1405  clib_net_to_host_u64 (min_addr->as_u64[0]))
1406  && (clib_net_to_host_u64 (mask_addr->as_u64[1]) <
1407  clib_net_to_host_u64 (min_addr->as_u64[1])));
1408  if (update)
1409  {
1410  min_addr->as_u64[0] = mask_addr->as_u64[0];
1411  min_addr->as_u64[1] = mask_addr->as_u64[1];
1412  }
1413 }
1414 
1415 static void
1417 {
1418  int update =
1419  (clib_net_to_host_u64 (mask_addr->as_u64[0]) >
1420  clib_net_to_host_u64 (max_addr->as_u64[0]))
1421  ||
1422  ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1423  clib_net_to_host_u64 (max_addr->as_u64[0]))
1424  && (clib_net_to_host_u64 (mask_addr->as_u64[1]) >
1425  clib_net_to_host_u64 (max_addr->as_u64[1])));
1426  if (update)
1427  {
1428  max_addr->as_u64[0] = mask_addr->as_u64[0];
1429  max_addr->as_u64[1] = mask_addr->as_u64[1];
1430  }
1431 }
1432 
1433 static void
1435 {
1436  int update =
1437  (clib_net_to_host_u32 (mask_addr->as_u32) <
1438  clib_net_to_host_u32 (min_addr->as_u32));
1439  if (update)
1440  min_addr->as_u32 = mask_addr->as_u32;
1441 }
1442 
1443 static void
1445 {
1446  int update =
1447  (clib_net_to_host_u32 (mask_addr->as_u32) >
1448  clib_net_to_host_u32 (max_addr->as_u32));
1449  if (update)
1450  max_addr->as_u32 = mask_addr->as_u32;
1451 }
1452 
1453 enum {
1459 };
1460 
1461 
1462 
1463 static void
1464 split_partition(acl_main_t *am, u32 first_index,
1465  u32 lc_index, int is_ip6){
1466  DBG( "TM-split_partition - first_entry:%d", first_index);
1467  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
1468  ace_mask_type_entry_t *mte;
1469  fa_5tuple_t the_min_tuple, *min_tuple = &the_min_tuple;
1470  fa_5tuple_t the_max_tuple, *max_tuple = &the_max_tuple;
1471  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), first_index);
1473  hash_ace_info_t *ace_info;
1474  u32 coll_mask_type_index = pae->mask_type_index;
1475  memset(&the_min_tuple, 0, sizeof(the_min_tuple));
1476  memset(&the_max_tuple, 0, sizeof(the_max_tuple));
1477 
1478  int i=0;
1479  u64 collisions = vec_len(pae->colliding_rules);
1480  for(i=0; i<collisions; i++){
1481  /* reload the hash acl info as it might be a different ACL# */
1482  ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
1483 
1484  DBG( "TM-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1485  pae->ace_index, pae->mask_type_index, coll_mask_type_index);
1486 
1487  ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
1489  fa_5tuple_t *mask = &mte->mask;
1490 
1491  if(pae->mask_type_index != coll_mask_type_index) continue;
1492  /* Computing min_mask and max_mask for colliding rules */
1493  if(i==0){
1494  clib_memcpy(min_tuple, mask, sizeof(fa_5tuple_t));
1495  clib_memcpy(max_tuple, mask, sizeof(fa_5tuple_t));
1496  }else{
1497  int j;
1498  for(j=0; j<2; j++){
1499  if (is_ip6)
1500  ensure_ip6_min_addr(&min_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1501  else
1502  ensure_ip4_min_addr(&min_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1503 
1504  if ((mask->l4.port[j] < min_tuple->l4.port[j]))
1505  min_tuple->l4.port[j] = mask->l4.port[j];
1506  }
1507 
1508  if ((mask->l4.proto < min_tuple->l4.proto))
1509  min_tuple->l4.proto = mask->l4.proto;
1510 
1511  if(mask->pkt.as_u64 < min_tuple->pkt.as_u64)
1512  min_tuple->pkt.as_u64 = mask->pkt.as_u64;
1513 
1514 
1515  for(j=0; j<2; j++){
1516  if (is_ip6)
1517  ensure_ip6_max_addr(&max_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1518  else
1519  ensure_ip4_max_addr(&max_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1520 
1521  if ((mask->l4.port[j] > max_tuple->l4.port[j]))
1522  max_tuple->l4.port[j] = mask->l4.port[j];
1523  }
1524 
1525  if ((mask->l4.proto < max_tuple->l4.proto))
1526  max_tuple->l4.proto = mask->l4.proto;
1527 
1528  if(mask->pkt.as_u64 > max_tuple->pkt.as_u64)
1529  max_tuple->pkt.as_u64 = mask->pkt.as_u64;
1530  }
1531 
1532  pae = pae->next_applied_entry_index == ~0 ? 0 : vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
1533  }
1534 
1535  /* Computing field with max difference between (min/max)_mask */
1536  int best_dim=-1, best_delta=0, delta=0;
1537 
1538  /* SRC_addr dimension */
1539  if (is_ip6) {
1540  int i;
1541  for(i=0; i<2; i++){
1542  delta += count_bits(max_tuple->ip6_addr[0].as_u64[i]) - count_bits(min_tuple->ip6_addr[0].as_u64[i]);
1543  }
1544  } else {
1545  delta += count_bits(max_tuple->ip4_addr[0].as_u32) - count_bits(min_tuple->ip4_addr[0].as_u32);
1546  }
1547  if(delta > best_delta){
1548  best_delta = delta;
1549  best_dim = DIM_SRC_ADDR;
1550  }
1551 
1552  /* DST_addr dimension */
1553  delta = 0;
1554  if (is_ip6) {
1555  int i;
1556  for(i=0; i<2; i++){
1557  delta += count_bits(max_tuple->ip6_addr[1].as_u64[i]) - count_bits(min_tuple->ip6_addr[1].as_u64[i]);
1558  }
1559  } else {
1560  delta += count_bits(max_tuple->ip4_addr[1].as_u32) - count_bits(min_tuple->ip4_addr[1].as_u32);
1561  }
1562  if(delta > best_delta){
1563  best_delta = delta;
1564  best_dim = DIM_DST_ADDR;
1565  }
1566 
1567  /* SRC_port dimension */
1568  delta = count_bits(max_tuple->l4.port[0]) - count_bits(min_tuple->l4.port[0]);
1569  if(delta > best_delta){
1570  best_delta = delta;
1571  best_dim = DIM_SRC_PORT;
1572  }
1573 
1574  /* DST_port dimension */
1575  delta = count_bits(max_tuple->l4.port[1]) - count_bits(min_tuple->l4.port[1]);
1576  if(delta > best_delta){
1577  best_delta = delta;
1578  best_dim = DIM_DST_PORT;
1579  }
1580 
1581  /* Proto dimension */
1582  delta = count_bits(max_tuple->l4.proto) - count_bits(min_tuple->l4.proto);
1583  if(delta > best_delta){
1584  best_delta = delta;
1585  best_dim = DIM_PROTO;
1586  }
1587 
1588  int shifting = 0; //, ipv4_block = 0;
1589  switch(best_dim){
1590  case DIM_SRC_ADDR:
1591  shifting = (best_delta)/2; // FIXME IPV4-only
1592  // ipv4_block = count_bits(max_tuple->ip4_addr[0].as_u32);
1593  min_tuple->ip4_addr[0].as_u32 =
1594  clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[0].as_u32) << (shifting))&0xFFFFFFFF);
1595 
1596  break;
1597  case DIM_DST_ADDR:
1598  shifting = (best_delta)/2;
1599 /*
1600  ipv4_block = count_bits(max_tuple->addr[1].as_u64[1]);
1601  if(ipv4_block > shifting)
1602  min_tuple->addr[1].as_u64[1] =
1603  clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[1]) << (shifting))&0xFFFFFFFF);
1604  else{
1605  shifting = shifting - ipv4_block;
1606  min_tuple->addr[1].as_u64[1] = 0;
1607  min_tuple->addr[1].as_u64[0] =
1608  clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[0]) << (shifting))&0xFFFFFFFF);
1609  }
1610 */
1611  min_tuple->ip4_addr[1].as_u32 =
1612  clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[1].as_u32) << (shifting))&0xFFFFFFFF);
1613 
1614  break;
1615  case DIM_SRC_PORT: min_tuple->l4.port[0] = max_tuple->l4.port[0] << (best_delta)/2;
1616  break;
1617  case DIM_DST_PORT: min_tuple->l4.port[1] = max_tuple->l4.port[1] << (best_delta)/2;
1618  break;
1619  case DIM_PROTO: min_tuple->l4.proto = max_tuple->l4.proto << (best_delta)/2;
1620  break;
1621  default: relax_tuple(min_tuple, is_ip6, 1);
1622  break;
1623  }
1624 
1625  min_tuple->pkt.is_nonfirst_fragment = 0;
1626  u32 new_mask_type_index = assign_mask_type_index(am, min_tuple);
1627 
1628  hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
1629 
1630  hash_applied_mask_info_t *minfo;
1631  //search in order pool if mask_type_index is already there
1632  int search;
1633  for (search=0; search < vec_len((*hash_applied_mask_info_vec)); search++){
1634  minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1635  if(minfo->mask_type_index == new_mask_type_index)
1636  break;
1637  }
1638 
1639  vec_validate((*hash_applied_mask_info_vec), search);
1640  minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1641  minfo->mask_type_index = new_mask_type_index;
1642  minfo->num_entries = 0;
1643  minfo->max_collisions = 0;
1644  minfo->first_rule_index = ~0;
1645 
1646  DBG( "TM-split_partition - mask type index-assigned!! -> %d", new_mask_type_index);
1647 
1648  if(coll_mask_type_index == new_mask_type_index){
1649  //vlib_cli_output(vm, "TM-There are collisions over threshold, but i'm not able to split! %d %d", coll_mask_type_index, new_mask_type_index);
1650  return;
1651  }
1652 
1653 
1654  /* populate new partition */
1655  DBG( "TM-Populate new partition");
1656  u32 r_ace_index = first_index;
1657  int repopulate_count = 0;
1658 
1659 // for(i=0; i<collisions; i++){
1660  for(r_ace_index=0; r_ace_index < vec_len((*applied_hash_aces)); r_ace_index++) {
1661 
1662  applied_hash_ace_entry_t *pop_pae = vec_elt_at_index((*applied_hash_aces), r_ace_index);
1663  DBG( "TM-Population-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1664  pop_pae->ace_index, pop_pae->mask_type_index, coll_mask_type_index);
1665 
1666  if(pop_pae->mask_type_index != coll_mask_type_index) continue;
1667  u32 next_index = pop_pae->next_applied_entry_index;
1668 
1669  ace_info = vec_elt_at_index(ha->rules, pop_pae->hash_ace_info_index);
1671  //can insert rule?
1672  //mte = vec_elt_at_index(am->ace_mask_type_pool, pop_pae->mask_type_index);
1673  fa_5tuple_t *pop_mask = &mte->mask;
1674 
1675  if(!first_mask_contains_second_mask(is_ip6, min_tuple, pop_mask)) continue;
1676  DBG( "TM-new partition can insert -> applied_ace:%d", r_ace_index);
1677 
1678  //delete and insert in new format
1679  deactivate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1680 
1681  /* insert the new entry */
1682  pop_pae->mask_type_index = new_mask_type_index;
1683  /* The very first repopulation gets the lock by virtue of a new mask being created above */
1684  if (++repopulate_count > 1)
1685  lock_mask_type_index(am, new_mask_type_index);
1686 
1687  activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1688 
1689  r_ace_index = next_index;
1690  }
1691 
1692  DBG( "TM-Populate new partition-END");
1693  DBG( "TM-split_partition - END");
1694 
1695 }
1696 
static void move_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index, u32 new_index)
Definition: hash_lookup.c:782
acl_rule_t * rules
Definition: acl.h:95
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:437
static void ensure_ip4_max_addr(ip4_address_t *max_addr, ip4_address_t *mask_addr)
Definition: hash_lookup.c:1444
static void deactivate_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index)
Definition: hash_lookup.c:863
vmrglw vmrglh hi
u8 is_ipv6
Definition: types.h:24
fa_5tuple_t mask
Definition: acl.h:123
void acl_plugin_show_tables_mask_type(void)
Definition: hash_lookup.c:1242
static void ensure_ip6_min_addr(ip6_address_t *min_addr, ip6_address_t *mask_addr)
Definition: hash_lookup.c:1398
Definition: acl.h:119
a
Definition: bitmap.h:538
u32 acl_index
static void * hash_acl_set_heap(acl_main_t *am)
Definition: hash_lookup.c:600
fa_session_l4_key_t l4
Definition: fa_node.h:71
fa_packet_info_t pkt
Definition: fa_node.h:73
void hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
Definition: hash_lookup.c:928
void acl_plugin_hash_acl_set_trace_heap(int on)
Definition: hash_lookup.c:631
void acl_plugin_show_tables_applied_info(u32 lc_index)
Definition: hash_lookup.c:1321
void hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
Definition: hash_lookup.c:1015
#define clib_error(format, args...)
Definition: error.h:62
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
void * mheap_alloc(void *memory, uword size)
Definition: mheap.c:963
#define NULL
Definition: clib.h:55
u8 dst_prefixlen
Definition: types.h:28
u8 action
u32 hash_ace_info_index
u32 count
Definition: acl.h:94
static void add_colliding_rule(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 head_index, u32 applied_entry_index)
Definition: hash_lookup.c:525
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
int i
static mheap_t * mheap_header(u8 *v)
acl_main_t acl_main
Definition: jvpp_acl.h:39
static u32 activate_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 new_index)
Definition: hash_lookup.c:550
#define MHEAP_FLAG_THREAD_SAFE
vhost_vring_addr_t addr
Definition: vhost_user.h:116
unsigned char u8
Definition: types.h:56
static u32 find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 curr_index)
Definition: hash_lookup.c:764
void hash_acl_add(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:1152
u32 ** lc_index_vec_by_acl
Definition: acl.h:181
static void ensure_ip6_max_addr(ip6_address_t *max_addr, ip6_address_t *mask_addr)
Definition: hash_lookup.c:1416
u16 dst_port_or_code_last
Definition: types.h:33
u8 src_prefixlen
Definition: types.h:26
static void make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
Definition: hash_lookup.c:1069
u32 acl_position
u32 next_applied_entry_index
#define ACL_HASH_LOOKUP_DEBUG
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:443
i64 word
Definition: types.h:111
int clib_bihash_add_del(clib_bihash *h, clib_bihash_kv *add_v, int is_add)
Add or delete a (key,value) pair from a bi-hash table.
#define always_inline
Definition: clib.h:92
static void assign_mask_type_index_to_pae(acl_main_t *am, u32 lc_index, int is_ip6, applied_hash_ace_entry_t *pae)
Definition: hash_lookup.c:644
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
Definition: vec.h:309
ip46_address_t src
Definition: types.h:25
u8 * format_memory_size(u8 *s, va_list *va)
Definition: std-formats.c:193
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static void lock_mask_type_index(acl_main_t *am, u32 mask_type_index)
Definition: hash_lookup.c:300
u8 is_permit
Definition: types.h:23
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:240
unsigned int u32
Definition: types.h:88
#define vec_search(v, E)
Search a vector for the index of the entry that matches.
Definition: vec.h:942
static void release_mask_type_index(acl_main_t *am, u32 mask_type_index)
Definition: hash_lookup.c:310
void hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
Definition: hash_lookup.c:679
static u32 assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
Definition: hash_lookup.c:276
int hash_acl_exists(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:1143
ip46_address_t dst
Definition: types.h:27
static void add_del_hashtable_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 index, int is_add)
Definition: hash_lookup.c:412
collision_match_rule_t * colliding_rules
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
u32 l3_zero_pad[6]
Definition: fa_node.h:66
static void ip4_address_mask_from_width(ip4_address_t *a, u32 width)
Definition: hash_lookup.c:1047
u32 mask_type_index
u16 dst_port_or_code_first
Definition: types.h:32
hash_applied_mask_info_t ** hash_applied_mask_info_vec_by_lc_index
Definition: acl.h:209
unsigned short u16
Definition: types.h:57
hash_acl_info_t * hash_acl_infos
Definition: acl.h:142
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:274
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:373
static void make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi)
Definition: hash_lookup.c:1082
u64 hitcount
hash_ace_info_t * rules
#define vec_del1(v, i)
Delete the element at index I.
Definition: vec.h:806
static void check_collision_count_and_maybe_split(acl_main_t *am, u32 lc_index, int is_ip6, u32 first_index)
Definition: hash_lookup.c:669
clib_bihash_48_8_t acl_lookup_hash
Definition: acl.h:143
void show_hash_acl_hash(vlib_main_t *vm, acl_main_t *am, u32 verbose)
Definition: hash_lookup.c:1235
static u32 shift_ip4_if(u32 mask, u32 thresh, int numshifts, u32 else_val)
Definition: hash_lookup.c:165
void acl_plugin_hash_acl_set_validate_heap(int on)
Definition: hash_lookup.c:615
u8 proto
Definition: types.h:29
void clib_bihash_init(clib_bihash *h, char *name, u32 nbuckets, uword memory_size)
initialize a bounded index extensible hash table
u16 src_port_or_type_first
Definition: types.h:30
void acl_plugin_show_tables_bihash(u32 show_bihash_verbose)
Definition: hash_lookup.c:1371
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P (general version).
Definition: pool.h:188
static void vec_del_collision_rule(collision_match_rule_t **pvec, u32 applied_entry_index)
Definition: hash_lookup.c:479
u32 refcount
Definition: acl.h:124
static void split_partition(acl_main_t *am, u32 first_index, u32 lc_index, int is_ip6)
Definition: hash_lookup.c:1464
applied_hash_acl_info_t * applied_hash_acl_info_by_lc_index
Definition: acl.h:158
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
static void make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len)
Definition: hash_lookup.c:1063
int acl_lookup_hash_initialized
Definition: acl.h:150
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:226
void hash_acl_delete(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:1193
#define clib_warning(format, args...)
Definition: error.h:59
#define clib_memcpy(a, b, c)
Definition: string.h:75
u8 * format_vec32(u8 *s, va_list *va)
Definition: std-formats.c:43
#define DBG(...)
static int count_bits(u64 word)
Definition: hash_lookup.c:71
applied_hash_ace_entry_t ** hash_entry_vec_by_lc_index
Definition: acl.h:157
static u8 first_mask_contains_second_mask(int is_ip6, fa_5tuple_t *mask1, fa_5tuple_t *mask2)
Definition: hash_lookup.c:84
int tuple_merge_split_threshold
Definition: acl.h:203
static void relax_ip6_addr(ip6_address_t *ip6_mask, int relax2)
Definition: hash_lookup.c:187
static void acl_plugin_print_colliding_rule(vlib_main_t *vm, int j, collision_match_rule_t *cr)
Definition: hash_lookup.c:1292
u32 hash_lookup_hash_buckets
Definition: acl.h:144
#define ASSERT(truth)
static void ensure_ip4_min_addr(ip4_address_t *min_addr, ip4_address_t *mask_addr)
Definition: hash_lookup.c:1434
#define vec_delete(V, N, M)
Delete N elements starting at element M.
Definition: vec.h:786
u8 tcp_flags_valid
Definition: fa_node.h:33
u16 src_port_or_type_last
Definition: types.h:31
static void acl_plugin_print_pae(vlib_main_t *vm, int j, applied_hash_ace_entry_t *pae)
Definition: hash_lookup.c:1299
#define MHEAP_FLAG_VALIDATE
static void acl_plugin_print_applied_mask_info(vlib_main_t *vm, int j, hash_applied_mask_info_t *mi)
Definition: hash_lookup.c:1313
#define DBG0(...)
#define MHEAP_FLAG_TRACE
static applied_hash_ace_entry_t ** get_applied_hash_aces(acl_main_t *am, u32 lc_index)
Definition: hash_lookup.c:36
#define MHEAP_FLAG_SMALL_OBJECT_CACHE
ace_mask_type_entry_t * ace_mask_type_pool
Definition: acl.h:206
static u32 find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
Definition: hash_lookup.c:262
u32 prev_applied_entry_index
static void fill_applied_hash_ace_kv(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 lc_index, u32 new_index, clib_bihash_kv_48_8_t *kv)
Definition: hash_lookup.c:380
u8 is_nonfirst_fragment
Definition: fa_node.h:35
acl_lookup_context_t * acl_lookup_contexts
Definition: acl.h:139
uword hash_lookup_mheap_size
Definition: acl.h:149
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u8 tcp_flags_mask
Definition: types.h:35
static void relax_ip4_addr(ip4_address_t *ip4_mask, int relax2)
Definition: hash_lookup.c:174
int use_tuple_merge
Definition: acl.h:199
static void hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
Definition: hash_lookup.c:48
static void ip6_address_mask_from_width(ip6_address_t *a, u32 width)
Definition: ip6_packet.h:257
u8 tcp_flags_value
Definition: types.h:34
static u32 tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
Definition: hash_lookup.c:325
void mheap_validate(void *v)
Definition: mheap.c:1355
#define vec_foreach(var, vec)
Vector iterator.
void * hash_lookup_mheap
Definition: acl.h:148
static void del_colliding_rule(applied_hash_ace_entry_t **applied_hash_aces, u32 head_index, u32 applied_entry_index)
Definition: hash_lookup.c:506
static void relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2)
Definition: hash_lookup.c:205
u32 tail_applied_entry_index
ip4_address_t ip4_addr[2]
Definition: fa_node.h:67
u16 mask_type_index_lsb
Definition: fa_node.h:31
static void remake_hash_applied_mask_info_vec(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 lc_index)
Definition: hash_lookup.c:425
Definition: acl.h:89
static void make_ip6_address_mask(ip6_address_t *addr, u8 prefix_len)
Definition: hash_lookup.c:1039
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:62
void acl_plugin_show_tables_acl_hash_info(u32 acl_index)
Definition: hash_lookup.c:1261
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:681
acl_list_t * acls
Definition: acl.h:141
u32 hash_lookup_hash_memory
Definition: acl.h:145
u32 ace_index
ip6_address_t ip6_addr[2]
Definition: fa_node.h:69
foreach_fa_cleaner_counter vlib_main_t * vlib_main
Definition: acl.h:327