37 for (i = 0; i < 31; i++)
38 if ((1 << i) >= nbuckets)
54 reass->frags_per_reass_list_head_index)) !=
73 reass->frags_per_reass_list_head_index)) !=
102 nbuckets, nbuckets * 1024);
122 nbuckets, nbuckets * 1024);
182 nat_reass_ip4_t *reass;
202 nat_reass_ip4_t *reass = 0;
220 u16 frag_id,
u8 proto,
u8 reset_timeout,
224 nat_reass_ip4_t *reass = 0;
229 u32 oldest_index, elt_index;
244 reass->last_heard = now;
246 reass->lru_list_index);
249 reass->lru_list_index);
259 ASSERT (oldest_index != ~0);
275 kv.
key[0] = reass->key.as_u64[0];
276 kv.
key[1] = reass->key.as_u64[1];
301 reass->frags_per_reass_list_head_index =
304 reass->frags_per_reass_list_head_index);
308 reass->key.as_u64[0] = kv.
key[0] = k.
as_u64[0];
309 reass->key.as_u64[1] = kv.
key[1] = k.
as_u64[1];
311 reass->sess_index = (
u32) ~ 0;
312 reass->thread_index = (
u32) ~ 0;
313 reass->last_heard = now;
344 reass->frags_per_reass_list_head_index, elt_index);
367 nat_reass_ip4_t *reass;
374 if (now < reass->last_heard + (f64) srm->ip4_timeout)
388 nat_reass_ip6_t *reass;
410 u32 frag_id,
u8 proto,
u8 reset_timeout,
414 nat_reass_ip6_t *reass = 0;
419 u32 oldest_index, elt_index;
437 reass->last_heard = now;
439 reass->lru_list_index);
442 reass->lru_list_index);
452 ASSERT (oldest_index != ~0);
491 reass->frags_per_reass_list_head_index =
494 reass->frags_per_reass_list_head_index);
498 reass->key.as_u64[0] = kv.
key[0] = k.
as_u64[0];
499 reass->key.as_u64[1] = kv.
key[1] = k.
as_u64[1];
500 reass->key.as_u64[2] = kv.
key[2] = k.
as_u64[2];
501 reass->key.as_u64[3] = kv.
key[3] = k.
as_u64[3];
502 reass->key.as_u64[4] = kv.
key[4] = k.
as_u64[4];
503 reass->key.as_u64[5] = kv.
key[5] = k.
as_u64[5];
505 reass->sess_index = (
u32) ~ 0;
506 reass->last_heard = now;
536 reass->frags_per_reass_list_head_index, elt_index);
559 nat_reass_ip6_t *reass;
566 if (now < reass->last_heard + (f64) srm->ip4_timeout)
582 u32 nbuckets, head_index;
600 clib_bihash_init_16_8 (&srm->
ip4_reass_hash,
"nat-ip4-reass", nbuckets,
621 clib_bihash_init_48_8 (&srm->
ip6_reass_hash,
"nat-ip6-reass", nbuckets,
638 u32 timeout = 0, max_reass = 0, max_frag = 0;
639 u8 drop_frag = (
u8) ~ 0, is_ip6 = 0;
648 if (
unformat (line_input,
"max-reassemblies %u", &max_reass))
650 else if (
unformat (line_input,
"max-fragments %u", &max_frag))
652 else if (
unformat (line_input,
"timeout %u", &timeout))
654 else if (
unformat (line_input,
"enable"))
656 else if (
unformat (line_input,
"disable"))
658 else if (
unformat (line_input,
"ip4"))
660 else if (
unformat (line_input,
"ip6"))
676 if (drop_frag == (
u8) ~ 0)
703 clib_net_to_host_u16 (reass->key.frag_id), reass->frag_n);
717 clib_net_to_host_u32 (reass->key.frag_id), reass->frag_n);
726 vlib_cli_output (vm,
"NAT IPv4 virtual fragmentation reassembly is %s",
734 vlib_cli_output (vm,
"NAT IPv6 virtual fragmentation reassembly is %s",
748 .path =
"nat virtual-reassembly",
749 .short_help =
"nat virtual-reassembly ip4|ip6 [max-reassemblies <n>] " 750 "[max-fragments <n>] [timeout <sec>] [enable|disable]",
756 .path =
"show nat virtual-reassembly",
757 .short_help =
"show nat virtual-reassembly",
int nat_ip6_reass_add_fragment(nat_reass_ip6_t *reass, u32 bi)
Cache fragment.
u32 nat_reass_get_timeout(u8 is_ip6)
Get reassembly timeout.
sll srl srl sll sra u16x4 i
static void clib_dlist_init(dlist_elt_t *pool, u32 index)
vnet_main_t * vnet_get_main(void)
clib_error_t * nat_reass_init(vlib_main_t *vm)
Initialize NAT virtual fragmentation reassembly.
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
static f64 vlib_time_now(vlib_main_t *vm)
int nat_ip4_reass_add_fragment(nat_reass_ip4_t *reass, u32 bi)
Cache fragment.
static int nat_ip6_reass_walk_cli(nat_reass_ip6_t *reass, void *ctx)
void nat_ip6_reass_get_frags(nat_reass_ip6_t *reass, u32 **bi)
Get cached fragments.
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
nat_reass_ip4_t * nat_ip4_reass_find(ip4_address_t src, ip4_address_t dst, u16 frag_id, u8 proto)
Find reassembly.
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
int(* nat_ip6_reass_walk_fn_t)(nat_reass_ip6_t *reass, void *ctx)
Call back function when walking IPv6 reassemblies, non-zero return value stop walk.
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
void nat_ip4_reass_walk(nat_ip4_reass_walk_fn_t fn, void *ctx)
Walk IPv4 reassemblies.
static clib_error_t * nat_reass_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
#define static_always_inline
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
dlist_elt_t * ip4_frags_list_pool
dlist_elt_t * ip6_reass_lru_list_pool
static_always_inline void nat_ip6_reass_get_frags_inline(nat_reass_ip6_t *reass, u32 **bi)
#define NAT_MAX_REASS_DEAFULT
#define clib_error_return(e, args...)
nat_reass_ip6_t * ip6_reass_pool
static void clib_spinlock_init(clib_spinlock_t *p)
#define NAT_MAX_FRAG_DEFAULT
u8 nat_reass_get_max_frag(u8 is_ip6)
Get maximum number of fragmets per reassembly.
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static int nat_ip4_reass_walk_cli(nat_reass_ip4_t *reass, void *ctx)
dlist_elt_t * ip4_reass_lru_list_pool
static_always_inline void nat_ip4_reass_get_frags_inline(nat_reass_ip4_t *reass, u32 **bi)
static void clib_dlist_addtail(dlist_elt_t *pool, u32 head_index, u32 new_index)
#define NAT_REASS_TIMEOUT_DEFAULT
static clib_error_t * show_nat_reass_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
int(* nat_ip4_reass_walk_fn_t)(nat_reass_ip4_t *reass, void *ctx)
Call back function when walking IPv4 reassemblies, non-zero return value stop walk.
#define pool_free(p)
Free a pool.
u16 nat_reass_get_max_reass(u8 is_ip6)
Get maximum number of concurrent reassemblies.
static void clib_dlist_addhead(dlist_elt_t *pool, u32 head_index, u32 new_index)
int nat_reass_set(u32 timeout, u16 max_reass, u8 max_frag, u8 drop_frag, u8 is_ip6)
Set NAT virtual fragmentation reassembly configuration.
u8 nat_reass_is_drop_frag(u8 is_ip6)
Get status of virtual fragmentation reassembly.
static u32 nat_reass_get_nbuckets(u8 is_ip6)
#define clib_warning(format, args...)
nat_reass_ip4_t * ip4_reass_pool
dlist_elt_t * ip6_frags_list_pool
clib_bihash_16_8_t ip4_reass_hash
#define VLIB_CLI_COMMAND(x,...)
static_always_inline nat_reass_ip4_t * nat_ip4_reass_lookup(nat_reass_ip4_key_t *k, f64 now)
#define pool_put_index(p, i)
Free pool element with given index.
nat_reass_main_t nat_reass_main
static void clib_dlist_remove(dlist_elt_t *pool, u32 index)
static_always_inline nat_reass_ip6_t * nat_ip6_reass_lookup(nat_reass_ip6_key_t *k, f64 now)
nat_reass_ip6_t * nat_ip6_reass_find_or_create(ip6_address_t src, ip6_address_t dst, u32 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
void nat_ip6_reass_walk(nat_ip6_reass_walk_fn_t fn, void *ctx)
Walk IPv6 reassemblies.
nat_reass_ip4_t * nat_ip4_reass_find_or_create(ip4_address_t src, ip4_address_t dst, u16 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
static vlib_thread_main_t * vlib_get_thread_main()
#define NAT_REASS_HT_LOAD_FACTOR
void nat_ip4_reass_get_frags(nat_reass_ip4_t *reass, u32 **bi)
Get cached fragments.
NAT plugin virtual fragmentation reassembly.
clib_bihash_48_8_t ip6_reass_hash
clib_spinlock_t ip6_reass_lock
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
clib_spinlock_t ip4_reass_lock
static u32 clib_dlist_remove_head(dlist_elt_t *pool, u32 head_index)