|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
67 l = 1 + 2 * adj_index;
95 #define PLY_X4_SPLAT_INIT(init_x4, init) \
96 init_x4 = u32x4_splat (init);
98 #define PLY_X4_SPLAT_INIT(init_x4, init) \
101 y.as_u32[0] = init; \
102 y.as_u32[1] = init; \
103 y.as_u32[2] = init; \
104 y.as_u32[3] = init; \
105 init_x4 = y.as_u32x4; \
109 #ifdef CLIB_HAVE_VEC128
110 #define PLY_INIT_LEAVES(p) \
114 PLY_X4_SPLAT_INIT(init_x4, init); \
115 for (l = p->leaves_as_u32x4; \
116 l < p->leaves_as_u32x4 + ARRAY_LEN (p->leaves_as_u32x4); \
126 #define PLY_INIT_LEAVES(p) \
130 for (l = p->leaves; l < p->leaves + ARRAY_LEN (p->leaves); l += 4) \
140 #define PLY_INIT(p, init, prefix_len, ply_base_len) \
147 p->n_non_empty_leafs = (prefix_len > ply_base_len ? \
148 ARRAY_LEN (p->leaves) : 0); \
149 clib_memset (p->dst_address_bits_of_leaves, prefix_len, \
150 sizeof (p->dst_address_bits_of_leaves)); \
151 p->dst_address_bits_base = ply_base_len; \
154 PLY_INIT_LEAVES(p); \
176 u32 leaf_prefix_len,
u32 ply_base_len)
183 ply_8_init (p, init_leaf, leaf_prefix_len, ply_base_len);
220 u32 dst_address_length;
222 u32 cover_address_length;
230 uword new_leaf_dst_address_bits)
247 new_leaf_dst_address_bits);
251 else if (new_leaf_dst_address_bits >=
264 u32 old_ply_index,
u32 dst_address_byte_index)
267 i32 n_dst_bits_next_plies;
273 ASSERT (
a->dst_address_length <= 32);
277 n_dst_bits_next_plies =
278 a->dst_address_length -
BITS (
u8) * (dst_address_byte_index + 1);
280 dst_byte =
a->dst_address.as_u8[dst_address_byte_index];
283 if (n_dst_bits_next_plies <= 0)
286 uword old_leaf_is_terminal;
287 u32 i, n_dst_bits_this_ply;
290 n_dst_bits_this_ply =
clib_min (8, -n_dst_bits_next_plies);
291 ASSERT ((
a->dst_address.as_u8[dst_address_byte_index] &
296 for (
i = dst_byte;
i < dst_byte + (1 << n_dst_bits_this_ply);
i++)
309 if (old_leaf_is_terminal)
317 a->dst_address_length;
331 a->dst_address_length);
334 else if (!old_leaf_is_terminal)
340 dst_address_byte_index + 1);
356 ply_base_len = 8 * (dst_address_byte_index + 1);
358 old_leaf = old_ply->
leaves[dst_byte];
395 i32 n_dst_bits_next_plies;
400 ASSERT (
a->dst_address_length <= 32);
403 n_dst_bits_next_plies =
a->dst_address_length -
BITS (
u16);
405 dst_byte =
a->dst_address.as_u16[0];
408 if (n_dst_bits_next_plies <= 0)
411 uword old_leaf_is_terminal;
412 u32 i, n_dst_bits_this_ply;
415 n_dst_bits_this_ply = 16 -
a->dst_address_length;
416 ASSERT ((clib_host_to_net_u16 (
a->dst_address.as_u16[0]) &
421 for (
i = 0;
i < (1 << n_dst_bits_this_ply);
i++)
426 slot = clib_net_to_host_u16 (dst_byte);
433 if (
a->dst_address_length >=
440 if (old_leaf_is_terminal)
445 a->dst_address_length;
454 a->dst_address_length);
457 else if (!old_leaf_is_terminal)
480 old_leaf = old_ply->
leaves[dst_byte];
507 i32 n_dst_bits_next_plies;
508 i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
511 ASSERT (
a->dst_address_length <= 32);
514 n_dst_bits_next_plies =
515 a->dst_address_length -
BITS (
u8) * (dst_address_byte_index + 1);
517 dst_byte =
a->dst_address.as_u8[dst_address_byte_index];
518 if (n_dst_bits_next_plies < 0)
519 dst_byte &= ~
pow2_mask (-n_dst_bits_next_plies);
521 n_dst_bits_this_ply =
522 n_dst_bits_next_plies <= 0 ? -n_dst_bits_next_plies : 0;
523 n_dst_bits_this_ply =
clib_min (8, n_dst_bits_this_ply);
527 for (
i = dst_byte;
i < dst_byte + (1 << n_dst_bits_this_ply);
i++)
532 if (old_leaf == del_leaf
533 || (!old_leaf_is_terminal
535 dst_address_byte_index + 1)))
542 (
a->cover_adj_index));
556 else if (dst_address_byte_index)
578 i32 n_dst_bits_next_plies;
579 i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
583 ASSERT (
a->dst_address_length <= 32);
586 n_dst_bits_next_plies =
a->dst_address_length -
BITS (
u16);
588 dst_byte =
a->dst_address.as_u16[0];
590 n_dst_bits_this_ply = (n_dst_bits_next_plies <= 0 ?
591 (16 -
a->dst_address_length) : 0);
597 for (
i = 0;
i < (1 << n_dst_bits_this_ply);
i++)
601 slot = clib_net_to_host_u16 (dst_byte);
608 if (old_leaf == del_leaf
609 || (!old_leaf_is_terminal
614 (
a->cover_adj_index));
623 u32 dst_address_length,
u32 adj_index)
629 a.dst_address.as_u32 = (dst_address->
as_u32 &
630 im->fib_masks[dst_address_length]);
631 a.dst_address_length = dst_address_length;
632 a.adj_index = adj_index;
640 u32 dst_address_length,
642 u32 cover_address_length,
u32 cover_adj_index)
648 a.dst_address.as_u32 = (dst_address->
as_u32 &
649 im->fib_masks[dst_address_length]);
650 a.dst_address_length = dst_address_length;
651 a.adj_index = adj_index;
652 a.cover_adj_index = cover_adj_index;
653 a.cover_address_length = cover_address_length;
665 bytes =
sizeof (p[0]);
705 #define FORMAT_PLY(s, _p, _a, _i, _base_address, _ply_max_len, _indent) \
709 ip4_fib_mtrie_leaf_t _l = p->leaves[(_i)]; \
711 a = (_base_address) + ((_a) << (32 - (_ply_max_len))); \
712 ia.as_u32 = clib_host_to_net_u32 (a); \
713 ia_length = (_p)->dst_address_bits_of_leaves[(_i)]; \
714 s = format (s, "\n%U%U %U", \
715 format_white_space, (_indent) + 4, \
716 format_ip4_address_and_length, &ia, ia_length, \
717 format_ip4_fib_mtrie_leaf, _l); \
719 if (ip4_fib_mtrie_leaf_is_next_ply (_l)) \
720 s = format (s, "\n%U", \
721 format_ip4_fib_mtrie_ply, m, a, (_indent) + 8, \
722 ip4_fib_mtrie_leaf_get_next_ply_index (_l)); \
730 u32 base_address = va_arg (*va,
u32);
731 u32 indent = va_arg (*va,
u32);
732 u32 ply_index = va_arg (*va,
u32);
737 s =
format (s,
"%Uply index %d, %d non-empty leaves",
756 int verbose = va_arg (*va,
int);
758 u32 base_address = 0;
761 s =
format (s,
"%d plies, memory usage %U\n",
764 s =
format (s,
"root-ply");
769 s =
format (s,
"root-ply");
776 slot = clib_host_to_net_u16 (
i);
789 #define IP4_FIB_DEFAULT_MTRIE_HEAP_SIZE (32<<20)
790 #ifndef MAP_HUGE_SHIFT
791 #define MAP_HUGE_SHIFT 26
ip4_fib_mtrie_leaf_t leaves[256]
vnet_interface_main_t * im
ip4_fib_mtrie_leaf_t leaves[PLY_16_SIZE]
i32 dst_address_bits_base
The length of the ply's covering prefix.
#define PLY_INIT_LEAVES(p)
static u32 ip4_fib_mtrie_leaf_get_adj_index(ip4_fib_mtrie_leaf_t n)
From the stored slot value extract the LB index value.
ip4_main_t ip4_main
Global ip4 main structure.
static uword pow2_mask(uword x)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
ip4_fib_mtrie_8_ply_t * ip4_ply_pool
Global pool of IPv4 8bit PLYs.
static ip4_fib_mtrie_leaf_t ply_create(ip4_fib_mtrie_t *m, ip4_fib_mtrie_leaf_t init_leaf, u32 leaf_prefix_len, u32 ply_base_len)
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
static u32 ip4_fib_mtrie_leaf_is_next_ply(ip4_fib_mtrie_leaf_t n)
#define pool_put(P, E)
Free an object E in pool P.
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
static u8 * format_ip4_fib_mtrie_leaf(u8 *s, va_list *va)
static u8 * format_ip4_fib_mtrie_ply(u8 *s, va_list *va)
#define PLY_INIT(p, init, prefix_len, ply_base_len)
u8 dst_address_bits_of_leaves[256]
Prefix length for leaves/ply.
#define FORMAT_PLY(s, _p, _a, _i, _base_address, _ply_max_len, _indent)
static void ply_8_init(ip4_fib_mtrie_8_ply_t *p, ip4_fib_mtrie_leaf_t init, uword prefix_len, u32 ply_base_len)
uword ip4_fib_mtrie_memory_usage(ip4_fib_mtrie_t *m)
return the memory used by the table
static void set_ply_with_more_specific_leaf(ip4_fib_mtrie_t *m, ip4_fib_mtrie_8_ply_t *ply, ip4_fib_mtrie_leaf_t new_leaf, uword new_leaf_dst_address_bits)
static clib_error_t * ip4_mtrie_module_init(vlib_main_t *vm)
static void set_root_leaf(ip4_fib_mtrie_t *m, const ip4_fib_mtrie_set_unset_leaf_args_t *a)
static uword mtrie_ply_memory_usage(ip4_fib_mtrie_t *m, ip4_fib_mtrie_8_ply_t *p)
u8 * format_ip4_fib_mtrie(u8 *s, va_list *va)
sll srl srl sll sra u16x4 i
u8 dst_address_bits_of_leaves[PLY_16_SIZE]
Prefix length for terminal leaves.
static ip4_fib_mtrie_8_ply_t * get_next_ply_for_leaf(ip4_fib_mtrie_t *m, ip4_fib_mtrie_leaf_t l)
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
#define IP4_FIB_MTRIE_LEAF_EMPTY
i32 n_non_empty_leafs
Number of non-empty leafs (whether terminal or not).
#define CLIB_CACHE_LINE_BYTES
void ip4_fib_mtrie_route_add(ip4_fib_mtrie_t *m, const ip4_address_t *dst_address, u32 dst_address_length, u32 adj_index)
Add a route/entry to the mtrie.
static void ply_16_init(ip4_fib_mtrie_16_ply_t *p, ip4_fib_mtrie_leaf_t init, uword prefix_len)
One ply of the 4 ply mtrie fib.
static void set_leaf(ip4_fib_mtrie_t *m, const ip4_fib_mtrie_set_unset_leaf_args_t *a, u32 old_ply_index, u32 dst_address_byte_index)
static u32 ip4_fib_mtrie_leaf_is_terminal(ip4_fib_mtrie_leaf_t n)
Is the leaf terminal (i.e.
static u32 ip4_fib_mtrie_leaf_get_next_ply_index(ip4_fib_mtrie_leaf_t n)
static void unset_root_leaf(ip4_fib_mtrie_t *m, const ip4_fib_mtrie_set_unset_leaf_args_t *a)
description fragment has unexpected format
void ip4_fib_mtrie_route_del(ip4_fib_mtrie_t *m, const ip4_address_t *dst_address, u32 dst_address_length, u32 adj_index, u32 cover_address_length, u32 cover_adj_index)
remove a route/entry to the mtrie
static u32 ip4_fib_mtrie_leaf_is_non_empty(ip4_fib_mtrie_8_ply_t *p, u8 dst_byte)
#define VLIB_INIT_FUNCTION(x)
void ip4_mtrie_free(ip4_fib_mtrie_t *m)
Free an mtrie, It must be emty when free'd.
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_leaf_set_adj_index(u32 adj_index)
static uword pool_elts(void *v)
Number of active elements in a pool.
static uword unset_leaf(ip4_fib_mtrie_t *m, const ip4_fib_mtrie_set_unset_leaf_args_t *a, ip4_fib_mtrie_8_ply_t *old_ply, u32 dst_address_byte_index)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
ip4_fib_mtrie_16_ply_t root_ply
Embed the PLY with the mtrie struct.
clib_error_t *() vlib_init_function_t(struct vlib_main_t *vm)
void ip4_mtrie_init(ip4_fib_mtrie_t *m)
Initialise an mtrie.
#define clib_atomic_store_rel_n(a, b)
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_leaf_set_next_ply_index(u32 i)