FD.io VPP  v21.06-3-gbb25fbf28
Vector Packet Processing
ip4_mtrie.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * ip/ip4_fib.h: ip4 mtrie fib
17  *
18  * Copyright (c) 2012 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 
40 #include <vnet/ip/ip.h>
41 #include <vnet/ip/ip4_mtrie.h>
42 #include <vnet/fib/ip4_fib.h>
43 
44 
45 /**
46  * Global pool of IPv4 8bit PLYs
47  */
49 
52 {
53  /*
54  * It's 'non-empty' if the length of the leaf stored is greater than the
55  * length of a leaf in the covering ply. i.e. the leaf is more specific
56  * than it's would be cover in the covering ply
57  */
59  return (1);
60  return (0);
61 }
62 
65 {
67  l = 1 + 2 * adj_index;
68  ASSERT (ip4_fib_mtrie_leaf_get_adj_index (l) == adj_index);
69  return l;
70 }
71 
74 {
75  return (n & 1) == 0;
76 }
77 
80 {
82  return n >> 1;
83 }
84 
87 {
89  l = 0 + 2 * i;
91  return l;
92 }
93 
94 #ifndef __ALTIVEC__
95 #define PLY_X4_SPLAT_INIT(init_x4, init) \
96  init_x4 = u32x4_splat (init);
97 #else
98 #define PLY_X4_SPLAT_INIT(init_x4, init) \
99 { \
100  u32x4_union_t y; \
101  y.as_u32[0] = init; \
102  y.as_u32[1] = init; \
103  y.as_u32[2] = init; \
104  y.as_u32[3] = init; \
105  init_x4 = y.as_u32x4; \
106 }
107 #endif
108 
109 #ifdef CLIB_HAVE_VEC128
110 #define PLY_INIT_LEAVES(p) \
111 { \
112  u32x4 *l, init_x4; \
113  \
114  PLY_X4_SPLAT_INIT(init_x4, init); \
115  for (l = p->leaves_as_u32x4; \
116  l < p->leaves_as_u32x4 + ARRAY_LEN (p->leaves_as_u32x4); \
117  l += 4) \
118  { \
119  l[0] = init_x4; \
120  l[1] = init_x4; \
121  l[2] = init_x4; \
122  l[3] = init_x4; \
123  } \
124 }
125 #else
126 #define PLY_INIT_LEAVES(p) \
127 { \
128  u32 *l; \
129  \
130  for (l = p->leaves; l < p->leaves + ARRAY_LEN (p->leaves); l += 4) \
131  { \
132  l[0] = init; \
133  l[1] = init; \
134  l[2] = init; \
135  l[3] = init; \
136  } \
137 }
138 #endif
139 
140 #define PLY_INIT(p, init, prefix_len, ply_base_len) \
141 { \
142  /* \
143  * A leaf is 'empty' if it represents a leaf from the covering PLY \
144  * i.e. if the prefix length of the leaf is less than or equal to \
145  * the prefix length of the PLY \
146  */ \
147  p->n_non_empty_leafs = (prefix_len > ply_base_len ? \
148  ARRAY_LEN (p->leaves) : 0); \
149  clib_memset (p->dst_address_bits_of_leaves, prefix_len, \
150  sizeof (p->dst_address_bits_of_leaves)); \
151  p->dst_address_bits_base = ply_base_len; \
152  \
153  /* Initialize leaves. */ \
154  PLY_INIT_LEAVES(p); \
155 }
156 
157 static void
159  ip4_fib_mtrie_leaf_t init, uword prefix_len, u32 ply_base_len)
160 {
161  PLY_INIT (p, init, prefix_len, ply_base_len);
162 }
163 
164 static void
166  ip4_fib_mtrie_leaf_t init, uword prefix_len)
167 {
168  clib_memset (p->dst_address_bits_of_leaves, prefix_len,
169  sizeof (p->dst_address_bits_of_leaves));
171 }
172 
175  ip4_fib_mtrie_leaf_t init_leaf,
176  u32 leaf_prefix_len, u32 ply_base_len)
177 {
179  /* Get cache aligned ply. */
180 
182 
183  ply_8_init (p, init_leaf, leaf_prefix_len, ply_base_len);
185 }
186 
189 {
191 
193 }
194 
195 void
197 {
198  /* the root ply is embedded so there is nothing to do,
199  * the assumption being that the IP4 FIB table has emptied the trie
200  * before deletion.
201  */
202 #if CLIB_DEBUG > 0
203  int i;
204  for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++)
205  {
207  }
208 #endif
209 }
210 
211 void
213 {
215 }
216 
217 typedef struct
218 {
219  ip4_address_t dst_address;
220  u32 dst_address_length;
221  u32 adj_index;
222  u32 cover_address_length;
223  u32 cover_adj_index;
225 
226 static void
228  ip4_fib_mtrie_8_ply_t * ply,
229  ip4_fib_mtrie_leaf_t new_leaf,
230  uword new_leaf_dst_address_bits)
231 {
232  ip4_fib_mtrie_leaf_t old_leaf;
233  uword i;
234 
236 
237  for (i = 0; i < ARRAY_LEN (ply->leaves); i++)
238  {
239  old_leaf = ply->leaves[i];
240 
241  /* Recurse into sub plies. */
242  if (!ip4_fib_mtrie_leaf_is_terminal (old_leaf))
243  {
244  ip4_fib_mtrie_8_ply_t *sub_ply =
245  get_next_ply_for_leaf (m, old_leaf);
246  set_ply_with_more_specific_leaf (m, sub_ply, new_leaf,
247  new_leaf_dst_address_bits);
248  }
249 
250  /* Replace less specific terminal leaves with new leaf. */
251  else if (new_leaf_dst_address_bits >=
253  {
254  clib_atomic_store_rel_n (&ply->leaves[i], new_leaf);
255  ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits;
257  }
258  }
259 }
260 
261 static void
264  u32 old_ply_index, u32 dst_address_byte_index)
265 {
266  ip4_fib_mtrie_leaf_t old_leaf, new_leaf;
267  i32 n_dst_bits_next_plies;
268  u8 dst_byte;
269  ip4_fib_mtrie_8_ply_t *old_ply;
270 
271  old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
272 
273  ASSERT (a->dst_address_length <= 32);
274  ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
275 
276  /* how many bits of the destination address are in the next PLY */
277  n_dst_bits_next_plies =
278  a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
279 
280  dst_byte = a->dst_address.as_u8[dst_address_byte_index];
281 
282  /* Number of bits next plies <= 0 => insert leaves this ply. */
283  if (n_dst_bits_next_plies <= 0)
284  {
285  /* The mask length of the address to insert maps to this ply */
286  uword old_leaf_is_terminal;
287  u32 i, n_dst_bits_this_ply;
288 
289  /* The number of bits, and hence slots/buckets, we will fill */
290  n_dst_bits_this_ply = clib_min (8, -n_dst_bits_next_plies);
291  ASSERT ((a->dst_address.as_u8[dst_address_byte_index] &
292  pow2_mask (n_dst_bits_this_ply)) == 0);
293 
294  /* Starting at the value of the byte at this section of the v4 address
295  * fill the buckets/slots of the ply */
296  for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
297  {
298  ip4_fib_mtrie_8_ply_t *new_ply;
299 
300  old_leaf = old_ply->leaves[i];
301  old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf);
302 
303  if (a->dst_address_length >= old_ply->dst_address_bits_of_leaves[i])
304  {
305  /* The new leaf is more or equally specific than the one currently
306  * occupying the slot */
307  new_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index);
308 
309  if (old_leaf_is_terminal)
310  {
311  /* The current leaf is terminal, we can replace it with
312  * the new one */
313  old_ply->n_non_empty_leafs -=
315 
316  old_ply->dst_address_bits_of_leaves[i] =
317  a->dst_address_length;
318  clib_atomic_store_rel_n (&old_ply->leaves[i], new_leaf);
319 
320  old_ply->n_non_empty_leafs +=
322  ASSERT (old_ply->n_non_empty_leafs <=
323  ARRAY_LEN (old_ply->leaves));
324  }
325  else
326  {
327  /* Existing leaf points to another ply. We need to place
328  * new_leaf into all more specific slots. */
329  new_ply = get_next_ply_for_leaf (m, old_leaf);
330  set_ply_with_more_specific_leaf (m, new_ply, new_leaf,
331  a->dst_address_length);
332  }
333  }
334  else if (!old_leaf_is_terminal)
335  {
336  /* The current leaf is less specific and not termial (i.e. a ply),
337  * recurse on down the trie */
338  new_ply = get_next_ply_for_leaf (m, old_leaf);
339  set_leaf (m, a, new_ply - ip4_ply_pool,
340  dst_address_byte_index + 1);
341  }
342  /*
343  * else
344  * the route we are adding is less specific than the leaf currently
345  * occupying this slot. leave it there
346  */
347  }
348  }
349  else
350  {
351  /* The address to insert requires us to move down at a lower level of
352  * the trie - recurse on down */
353  ip4_fib_mtrie_8_ply_t *new_ply;
354  u8 ply_base_len;
355 
356  ply_base_len = 8 * (dst_address_byte_index + 1);
357 
358  old_leaf = old_ply->leaves[dst_byte];
359 
360  if (ip4_fib_mtrie_leaf_is_terminal (old_leaf))
361  {
362  /* There is a leaf occupying the slot. Replace it with a new ply */
363  old_ply->n_non_empty_leafs -=
364  ip4_fib_mtrie_leaf_is_non_empty (old_ply, dst_byte);
365 
366  new_leaf =
367  ply_create (m, old_leaf,
368  old_ply->dst_address_bits_of_leaves[dst_byte],
369  ply_base_len);
370  new_ply = get_next_ply_for_leaf (m, new_leaf);
371 
372  /* Refetch since ply_create may move pool. */
373  old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
374 
375  clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
376  old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
377 
378  old_ply->n_non_empty_leafs +=
379  ip4_fib_mtrie_leaf_is_non_empty (old_ply, dst_byte);
380  ASSERT (old_ply->n_non_empty_leafs >= 0);
381  }
382  else
383  new_ply = get_next_ply_for_leaf (m, old_leaf);
384 
385  set_leaf (m, a, new_ply - ip4_ply_pool, dst_address_byte_index + 1);
386  }
387 }
388 
389 static void
392 {
393  ip4_fib_mtrie_leaf_t old_leaf, new_leaf;
394  ip4_fib_mtrie_16_ply_t *old_ply;
395  i32 n_dst_bits_next_plies;
396  u16 dst_byte;
397 
398  old_ply = &m->root_ply;
399 
400  ASSERT (a->dst_address_length <= 32);
401 
402  /* how many bits of the destination address are in the next PLY */
403  n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
404 
405  dst_byte = a->dst_address.as_u16[0];
406 
407  /* Number of bits next plies <= 0 => insert leaves this ply. */
408  if (n_dst_bits_next_plies <= 0)
409  {
410  /* The mask length of the address to insert maps to this ply */
411  uword old_leaf_is_terminal;
412  u32 i, n_dst_bits_this_ply;
413 
414  /* The number of bits, and hence slots/buckets, we will fill */
415  n_dst_bits_this_ply = 16 - a->dst_address_length;
416  ASSERT ((clib_host_to_net_u16 (a->dst_address.as_u16[0]) &
417  pow2_mask (n_dst_bits_this_ply)) == 0);
418 
419  /* Starting at the value of the byte at this section of the v4 address
420  * fill the buckets/slots of the ply */
421  for (i = 0; i < (1 << n_dst_bits_this_ply); i++)
422  {
423  ip4_fib_mtrie_8_ply_t *new_ply;
424  u16 slot;
425 
426  slot = clib_net_to_host_u16 (dst_byte);
427  slot += i;
428  slot = clib_host_to_net_u16 (slot);
429 
430  old_leaf = old_ply->leaves[slot];
431  old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf);
432 
433  if (a->dst_address_length >=
435  {
436  /* The new leaf is more or equally specific than the one currently
437  * occupying the slot */
438  new_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index);
439 
440  if (old_leaf_is_terminal)
441  {
442  /* The current leaf is terminal, we can replace it with
443  * the new one */
444  old_ply->dst_address_bits_of_leaves[slot] =
445  a->dst_address_length;
446  clib_atomic_store_rel_n (&old_ply->leaves[slot], new_leaf);
447  }
448  else
449  {
450  /* Existing leaf points to another ply. We need to place
451  * new_leaf into all more specific slots. */
452  new_ply = get_next_ply_for_leaf (m, old_leaf);
453  set_ply_with_more_specific_leaf (m, new_ply, new_leaf,
454  a->dst_address_length);
455  }
456  }
457  else if (!old_leaf_is_terminal)
458  {
459  /* The current leaf is less specific and not termial (i.e. a ply),
460  * recurse on down the trie */
461  new_ply = get_next_ply_for_leaf (m, old_leaf);
462  set_leaf (m, a, new_ply - ip4_ply_pool, 2);
463  }
464  /*
465  * else
466  * the route we are adding is less specific than the leaf currently
467  * occupying this slot. leave it there
468  */
469  }
470  }
471  else
472  {
473  /* The address to insert requires us to move down at a lower level of
474  * the trie - recurse on down */
475  ip4_fib_mtrie_8_ply_t *new_ply;
476  u8 ply_base_len;
477 
478  ply_base_len = 16;
479 
480  old_leaf = old_ply->leaves[dst_byte];
481 
482  if (ip4_fib_mtrie_leaf_is_terminal (old_leaf))
483  {
484  /* There is a leaf occupying the slot. Replace it with a new ply */
485  new_leaf =
486  ply_create (m, old_leaf,
487  old_ply->dst_address_bits_of_leaves[dst_byte],
488  ply_base_len);
489  new_ply = get_next_ply_for_leaf (m, new_leaf);
490 
491  clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
492  old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
493  }
494  else
495  new_ply = get_next_ply_for_leaf (m, old_leaf);
496 
497  set_leaf (m, a, new_ply - ip4_ply_pool, 2);
498  }
499 }
500 
501 static uword
504  ip4_fib_mtrie_8_ply_t * old_ply, u32 dst_address_byte_index)
505 {
506  ip4_fib_mtrie_leaf_t old_leaf, del_leaf;
507  i32 n_dst_bits_next_plies;
508  i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
509  u8 dst_byte;
510 
511  ASSERT (a->dst_address_length <= 32);
512  ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
513 
514  n_dst_bits_next_plies =
515  a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
516 
517  dst_byte = a->dst_address.as_u8[dst_address_byte_index];
518  if (n_dst_bits_next_plies < 0)
519  dst_byte &= ~pow2_mask (-n_dst_bits_next_plies);
520 
521  n_dst_bits_this_ply =
522  n_dst_bits_next_plies <= 0 ? -n_dst_bits_next_plies : 0;
523  n_dst_bits_this_ply = clib_min (8, n_dst_bits_this_ply);
524 
525  del_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index);
526 
527  for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
528  {
529  old_leaf = old_ply->leaves[i];
530  old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf);
531 
532  if (old_leaf == del_leaf
533  || (!old_leaf_is_terminal
534  && unset_leaf (m, a, get_next_ply_for_leaf (m, old_leaf),
535  dst_address_byte_index + 1)))
536  {
537  old_ply->n_non_empty_leafs -=
539 
540  clib_atomic_store_rel_n (&old_ply->leaves[i],
542  (a->cover_adj_index));
543  old_ply->dst_address_bits_of_leaves[i] = a->cover_address_length;
544 
545  old_ply->n_non_empty_leafs +=
547 
548  ASSERT (old_ply->n_non_empty_leafs >= 0);
549  if (old_ply->n_non_empty_leafs == 0 && dst_address_byte_index > 0)
550  {
551  pool_put (ip4_ply_pool, old_ply);
552  /* Old ply was deleted. */
553  return 1;
554  }
555 #if CLIB_DEBUG > 0
556  else if (dst_address_byte_index)
557  {
558  int ii, count = 0;
559  for (ii = 0; ii < ARRAY_LEN (old_ply->leaves); ii++)
560  {
561  count += ip4_fib_mtrie_leaf_is_non_empty (old_ply, ii);
562  }
563  ASSERT (count);
564  }
565 #endif
566  }
567  }
568 
569  /* Old ply was not deleted. */
570  return 0;
571 }
572 
573 static void
576 {
577  ip4_fib_mtrie_leaf_t old_leaf, del_leaf;
578  i32 n_dst_bits_next_plies;
579  i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
580  u16 dst_byte;
581  ip4_fib_mtrie_16_ply_t *old_ply;
582 
583  ASSERT (a->dst_address_length <= 32);
584 
585  old_ply = &m->root_ply;
586  n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
587 
588  dst_byte = a->dst_address.as_u16[0];
589 
590  n_dst_bits_this_ply = (n_dst_bits_next_plies <= 0 ?
591  (16 - a->dst_address_length) : 0);
592 
593  del_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index);
594 
595  /* Starting at the value of the byte at this section of the v4 address
596  * fill the buckets/slots of the ply */
597  for (i = 0; i < (1 << n_dst_bits_this_ply); i++)
598  {
599  u16 slot;
600 
601  slot = clib_net_to_host_u16 (dst_byte);
602  slot += i;
603  slot = clib_host_to_net_u16 (slot);
604 
605  old_leaf = old_ply->leaves[slot];
606  old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf);
607 
608  if (old_leaf == del_leaf
609  || (!old_leaf_is_terminal
610  && unset_leaf (m, a, get_next_ply_for_leaf (m, old_leaf), 2)))
611  {
612  clib_atomic_store_rel_n (&old_ply->leaves[slot],
614  (a->cover_adj_index));
615  old_ply->dst_address_bits_of_leaves[slot] = a->cover_address_length;
616  }
617  }
618 }
619 
620 void
622  const ip4_address_t * dst_address,
623  u32 dst_address_length, u32 adj_index)
624 {
626  ip4_main_t *im = &ip4_main;
627 
628  /* Honor dst_address_length. Fib masks are in network byte order */
629  a.dst_address.as_u32 = (dst_address->as_u32 &
630  im->fib_masks[dst_address_length]);
631  a.dst_address_length = dst_address_length;
632  a.adj_index = adj_index;
633 
635 }
636 
637 void
639  const ip4_address_t * dst_address,
640  u32 dst_address_length,
641  u32 adj_index,
642  u32 cover_address_length, u32 cover_adj_index)
643 {
645  ip4_main_t *im = &ip4_main;
646 
647  /* Honor dst_address_length. Fib masks are in network byte order */
648  a.dst_address.as_u32 = (dst_address->as_u32 &
649  im->fib_masks[dst_address_length]);
650  a.dst_address_length = dst_address_length;
651  a.adj_index = adj_index;
652  a.cover_adj_index = cover_adj_index;
653  a.cover_address_length = cover_address_length;
654 
655  /* the top level ply is never removed */
656  unset_root_leaf (m, &a);
657 }
658 
659 /* Returns number of bytes of memory used by mtrie. */
660 static uword
662 {
663  uword bytes, i;
664 
665  bytes = sizeof (p[0]);
666  for (i = 0; i < ARRAY_LEN (p->leaves); i++)
667  {
668  ip4_fib_mtrie_leaf_t l = p->leaves[i];
670  bytes += mtrie_ply_memory_usage (m, get_next_ply_for_leaf (m, l));
671  }
672 
673  return bytes;
674 }
675 
676 /* Returns number of bytes of memory used by mtrie. */
677 uword
679 {
680  uword bytes, i;
681 
682  bytes = sizeof (*m);
683  for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++)
684  {
687  bytes += mtrie_ply_memory_usage (m, get_next_ply_for_leaf (m, l));
688  }
689 
690  return bytes;
691 }
692 
693 static u8 *
694 format_ip4_fib_mtrie_leaf (u8 * s, va_list * va)
695 {
696  ip4_fib_mtrie_leaf_t l = va_arg (*va, ip4_fib_mtrie_leaf_t);
697 
699  s = format (s, "lb-index %d", ip4_fib_mtrie_leaf_get_adj_index (l));
700  else
701  s = format (s, "next ply %d", ip4_fib_mtrie_leaf_get_next_ply_index (l));
702  return s;
703 }
704 
705 #define FORMAT_PLY(s, _p, _a, _i, _base_address, _ply_max_len, _indent) \
706 ({ \
707  u32 a, ia_length; \
708  ip4_address_t ia; \
709  ip4_fib_mtrie_leaf_t _l = p->leaves[(_i)]; \
710  \
711  a = (_base_address) + ((_a) << (32 - (_ply_max_len))); \
712  ia.as_u32 = clib_host_to_net_u32 (a); \
713  ia_length = (_p)->dst_address_bits_of_leaves[(_i)]; \
714  s = format (s, "\n%U%U %U", \
715  format_white_space, (_indent) + 4, \
716  format_ip4_address_and_length, &ia, ia_length, \
717  format_ip4_fib_mtrie_leaf, _l); \
718  \
719  if (ip4_fib_mtrie_leaf_is_next_ply (_l)) \
720  s = format (s, "\n%U", \
721  format_ip4_fib_mtrie_ply, m, a, (_indent) + 8, \
722  ip4_fib_mtrie_leaf_get_next_ply_index (_l)); \
723  s; \
724 })
725 
726 static u8 *
727 format_ip4_fib_mtrie_ply (u8 * s, va_list * va)
728 {
729  ip4_fib_mtrie_t *m = va_arg (*va, ip4_fib_mtrie_t *);
730  u32 base_address = va_arg (*va, u32);
731  u32 indent = va_arg (*va, u32);
732  u32 ply_index = va_arg (*va, u32);
734  int i;
735 
736  p = pool_elt_at_index (ip4_ply_pool, ply_index);
737  s = format (s, "%Uply index %d, %d non-empty leaves",
738  format_white_space, indent, ply_index, p->n_non_empty_leafs);
739 
740  for (i = 0; i < ARRAY_LEN (p->leaves); i++)
741  {
743  {
744  s = FORMAT_PLY (s, p, i, i, base_address,
745  p->dst_address_bits_base + 8, indent);
746  }
747  }
748 
749  return s;
750 }
751 
752 u8 *
753 format_ip4_fib_mtrie (u8 * s, va_list * va)
754 {
755  ip4_fib_mtrie_t *m = va_arg (*va, ip4_fib_mtrie_t *);
756  int verbose = va_arg (*va, int);
758  u32 base_address = 0;
759  int i;
760 
761  s = format (s, "%d plies, memory usage %U\n",
764  s = format (s, "root-ply");
765  p = &m->root_ply;
766 
767  if (verbose)
768  {
769  s = format (s, "root-ply");
770  p = &m->root_ply;
771 
772  for (i = 0; i < ARRAY_LEN (p->leaves); i++)
773  {
774  u16 slot;
775 
776  slot = clib_host_to_net_u16 (i);
777 
778  if (p->dst_address_bits_of_leaves[slot] > 0)
779  {
780  s = FORMAT_PLY (s, p, i, slot, base_address, 16, 0);
781  }
782  }
783  }
784 
785  return s;
786 }
787 
788 /** Default heap size for the IPv4 mtries */
789 #define IP4_FIB_DEFAULT_MTRIE_HEAP_SIZE (32<<20)
790 #ifndef MAP_HUGE_SHIFT
791 #define MAP_HUGE_SHIFT 26
792 #endif
793 
794 static clib_error_t *
796 {
798  clib_error_t *error = NULL;
799 
800  /* Burn one ply so index 0 is taken */
801  pool_get (ip4_ply_pool, p);
802 
803  return (error);
804 }
805 
807 
808 /*
809  * fd.io coding-style-patch-verification: ON
810  *
811  * Local Variables:
812  * eval: (c-set-style "gnu")
813  * End:
814  */
ip4_fib_mtrie_8_ply_t_::leaves
ip4_fib_mtrie_leaf_t leaves[256]
Definition: ip4_mtrie.h:93
im
vnet_interface_main_t * im
Definition: interface_output.c:395
ip4_fib_mtrie_16_ply_t_::leaves
ip4_fib_mtrie_leaf_t leaves[PLY_16_SIZE]
Definition: ip4_mtrie.h:70
ip4_fib_mtrie_8_ply_t_::dst_address_bits_base
i32 dst_address_bits_base
The length of the ply's covering prefix.
Definition: ip4_mtrie.h:115
PLY_INIT_LEAVES
#define PLY_INIT_LEAVES(p)
Definition: ip4_mtrie.c:126
ip4_fib_mtrie_leaf_get_adj_index
static u32 ip4_fib_mtrie_leaf_get_adj_index(ip4_fib_mtrie_leaf_t n)
From the stored slot value extract the LB index value.
Definition: ip4_mtrie.h:192
ip4_main
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:1105
pow2_mask
static uword pow2_mask(uword x)
Definition: clib.h:252
pool_elt_at_index
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:553
ip4_ply_pool
ip4_fib_mtrie_8_ply_t * ip4_ply_pool
Global pool of IPv4 8bit PLYs.
Definition: ip4_mtrie.c:48
ip4_fib_mtrie_16_ply_t_
Definition: ip4_mtrie.h:63
init
static void init(void)
Definition: client.c:83
ply_create
static ip4_fib_mtrie_leaf_t ply_create(ip4_fib_mtrie_t *m, ip4_fib_mtrie_leaf_t init_leaf, u32 leaf_prefix_len, u32 ply_base_len)
Definition: ip4_mtrie.c:170
pool_get_aligned
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
Definition: pool.h:249
ip4_fib_mtrie_leaf_is_next_ply
static u32 ip4_fib_mtrie_leaf_is_next_ply(ip4_fib_mtrie_leaf_t n)
Definition: ip4_mtrie.c:73
ip4_fib_mtrie_t
The mutiway-TRIE.
Definition: ip4_mtrie.h:129
ip4_address_t::as_u32
u32 as_u32
Definition: ip4_packet.h:57
u16
unsigned short u16
Definition: types.h:57
pool_put
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:305
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
format_ip4_fib_mtrie_leaf
static u8 * format_ip4_fib_mtrie_leaf(u8 *s, va_list *va)
Definition: ip4_mtrie.c:690
ip4_mtrie.h
format_ip4_fib_mtrie_ply
static u8 * format_ip4_fib_mtrie_ply(u8 *s, va_list *va)
Definition: ip4_mtrie.c:723
PLY_INIT
#define PLY_INIT(p, init, prefix_len, ply_base_len)
Definition: ip4_mtrie.c:140
ip4_fib_mtrie_8_ply_t_::dst_address_bits_of_leaves
u8 dst_address_bits_of_leaves[256]
Prefix length for leaves/ply.
Definition: ip4_mtrie.h:103
error
Definition: cJSON.c:88
i32
signed int i32
Definition: types.h:77
FORMAT_PLY
#define FORMAT_PLY(s, _p, _a, _i, _base_address, _ply_max_len, _indent)
Definition: ip4_mtrie.c:701
ply_8_init
static void ply_8_init(ip4_fib_mtrie_8_ply_t *p, ip4_fib_mtrie_leaf_t init, uword prefix_len, u32 ply_base_len)
Definition: ip4_mtrie.c:154
ip4_fib_mtrie_memory_usage
uword ip4_fib_mtrie_memory_usage(ip4_fib_mtrie_t *m)
return the memory used by the table
Definition: ip4_mtrie.c:674
count
u8 count
Definition: dhcp.api:208
set_ply_with_more_specific_leaf
static void set_ply_with_more_specific_leaf(ip4_fib_mtrie_t *m, ip4_fib_mtrie_8_ply_t *ply, ip4_fib_mtrie_leaf_t new_leaf, uword new_leaf_dst_address_bits)
Definition: ip4_mtrie.c:223
slot
u8 slot
Definition: pci_types.api:22
ip4_mtrie_module_init
static clib_error_t * ip4_mtrie_module_init(vlib_main_t *vm)
Definition: ip4_mtrie.c:791
ip4_fib_mtrie_set_unset_leaf_args_t
Definition: ip4_mtrie.c:213
CLIB_UNUSED
#define CLIB_UNUSED(x)
Definition: clib.h:90
ARRAY_LEN
#define ARRAY_LEN(x)
Definition: clib.h:70
BITS
#define BITS(x)
Definition: clib.h:69
set_root_leaf
static void set_root_leaf(ip4_fib_mtrie_t *m, const ip4_fib_mtrie_set_unset_leaf_args_t *a)
Definition: ip4_mtrie.c:386
uword
u64 uword
Definition: types.h:112
mtrie_ply_memory_usage
static uword mtrie_ply_memory_usage(ip4_fib_mtrie_t *m, ip4_fib_mtrie_8_ply_t *p)
Definition: ip4_mtrie.c:657
format_ip4_fib_mtrie
u8 * format_ip4_fib_mtrie(u8 *s, va_list *va)
Definition: ip4_mtrie.c:749
i
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
ip4_fib_mtrie_16_ply_t_::dst_address_bits_of_leaves
u8 dst_address_bits_of_leaves[PLY_16_SIZE]
Prefix length for terminal leaves.
Definition: ip4_mtrie.h:80
get_next_ply_for_leaf
static ip4_fib_mtrie_8_ply_t * get_next_ply_for_leaf(ip4_fib_mtrie_t *m, ip4_fib_mtrie_leaf_t l)
Definition: ip4_mtrie.c:184
pool_get
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:255
IP4_FIB_MTRIE_LEAF_EMPTY
#define IP4_FIB_MTRIE_LEAF_EMPTY
Definition: ip4_mtrie.h:54
ip4_fib_mtrie_8_ply_t_::n_non_empty_leafs
i32 n_non_empty_leafs
Number of non-empty leafs (whether terminal or not).
Definition: ip4_mtrie.h:108
ip4_address_t
Definition: ip4_packet.h:50
clib_min
#define clib_min(x, y)
Definition: clib.h:342
CLIB_CACHE_LINE_BYTES
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
format_memory_size
u8 * format_memory_size(u8 *s, va_list *va)
Definition: std-formats.c:209
ip4_fib_mtrie_leaf_t
u32 ip4_fib_mtrie_leaf_t
Definition: ip4_mtrie.h:52
always_inline
#define always_inline
Definition: rdma_mlx5dv.h:23
ip4_fib_mtrie_route_add
void ip4_fib_mtrie_route_add(ip4_fib_mtrie_t *m, const ip4_address_t *dst_address, u32 dst_address_length, u32 adj_index)
Add a route/entry to the mtrie.
Definition: ip4_mtrie.c:617
ply_16_init
static void ply_16_init(ip4_fib_mtrie_16_ply_t *p, ip4_fib_mtrie_leaf_t init, uword prefix_len)
Definition: ip4_mtrie.c:161
ip4_fib_mtrie_8_ply_t_
One ply of the 4 ply mtrie fib.
Definition: ip4_mtrie.h:86
set_leaf
static void set_leaf(ip4_fib_mtrie_t *m, const ip4_fib_mtrie_set_unset_leaf_args_t *a, u32 old_ply_index, u32 dst_address_byte_index)
Definition: ip4_mtrie.c:258
ip4_fib_mtrie_leaf_is_terminal
static u32 ip4_fib_mtrie_leaf_is_terminal(ip4_fib_mtrie_leaf_t n)
Is the leaf terminal (i.e.
Definition: ip4_mtrie.h:183
ip4_fib_mtrie_leaf_get_next_ply_index
static u32 ip4_fib_mtrie_leaf_get_next_ply_index(ip4_fib_mtrie_leaf_t n)
Definition: ip4_mtrie.c:79
unset_root_leaf
static void unset_root_leaf(ip4_fib_mtrie_t *m, const ip4_fib_mtrie_set_unset_leaf_args_t *a)
Definition: ip4_mtrie.c:570
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
ip4_fib_mtrie_route_del
void ip4_fib_mtrie_route_del(ip4_fib_mtrie_t *m, const ip4_address_t *dst_address, u32 dst_address_length, u32 adj_index, u32 cover_address_length, u32 cover_adj_index)
remove a route/entry to the mtrie
Definition: ip4_mtrie.c:634
ip4_fib_mtrie_leaf_is_non_empty
static u32 ip4_fib_mtrie_leaf_is_non_empty(ip4_fib_mtrie_8_ply_t *p, u8 dst_byte)
Definition: ip4_mtrie.c:51
ip.h
u32
unsigned int u32
Definition: types.h:88
VLIB_INIT_FUNCTION
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:172
ip4_mtrie_free
void ip4_mtrie_free(ip4_fib_mtrie_t *m)
Free an mtrie, It must be emty when free'd.
Definition: ip4_mtrie.c:192
ip4_fib_mtrie_leaf_set_adj_index
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_leaf_set_adj_index(u32 adj_index)
Definition: ip4_mtrie.c:64
pool_elts
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:127
unset_leaf
static uword unset_leaf(ip4_fib_mtrie_t *m, const ip4_fib_mtrie_set_unset_leaf_args_t *a, ip4_fib_mtrie_8_ply_t *old_ply, u32 dst_address_byte_index)
Definition: ip4_mtrie.c:498
ip4_fib.h
clib_memset
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
vlib_main_t
Definition: main.h:102
u8
unsigned char u8
Definition: types.h:56
clib_error_t
Definition: clib_error.h:21
ip4_fib_mtrie_t::root_ply
ip4_fib_mtrie_16_ply_t root_ply
Embed the PLY with the mtrie struct.
Definition: ip4_mtrie.h:136
a
a
Definition: bitmap.h:544
vlib_init_function_t
clib_error_t *() vlib_init_function_t(struct vlib_main_t *vm)
Definition: init.h:51
ip4_mtrie_init
void ip4_mtrie_init(ip4_fib_mtrie_t *m)
Initialise an mtrie.
Definition: ip4_mtrie.c:208
clib_atomic_store_rel_n
#define clib_atomic_store_rel_n(a, b)
Definition: atomics.h:52
ip4_fib_mtrie_leaf_set_next_ply_index
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_leaf_set_next_ply_index(u32 i)
Definition: ip4_mtrie.c:86
ip4_main_t
IPv4 main type.
Definition: ip4.h:107
format_white_space
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129