38 for (i = 0; i < 31; i++)
39 if ((1 << i) >= nbuckets)
55 reass->frags_per_reass_list_head_index)) !=
74 reass->frags_per_reass_list_head_index)) !=
103 nbuckets, nbuckets * 1024);
123 nbuckets, nbuckets * 1024);
183 nat_reass_ip4_t *reass;
203 nat_reass_ip4_t *reass = 0;
224 nat_reass_ip4_t *reass = 0;
247 reass->frags_per_reass_list_head_index =
250 reass->frags_per_reass_list_head_index);
256 reass->key.as_u64[0] = kv.
key[0] = k.
as_u64[0];
257 reass->key.as_u64[1] = kv.
key[1] = k.
as_u64[1];
259 reass->sess_index = (
u32) ~ 0;
260 reass->thread_index = (
u32) ~ 0;
261 reass->last_heard = now;
275 u16 frag_id,
u8 proto,
u8 reset_timeout,
279 nat_reass_ip4_t *reass = 0;
284 u32 oldest_index, elt_index;
299 reass->last_heard = now;
301 reass->lru_list_index);
304 reass->lru_list_index);
321 ASSERT (oldest_index != ~0);
337 kv.
key[0] = reass->key.as_u64[0];
338 kv.
key[1] = reass->key.as_u64[1];
363 reass->frags_per_reass_list_head_index =
366 reass->frags_per_reass_list_head_index);
370 reass->key.as_u64[0] = kv.
key[0] = k.
as_u64[0];
371 reass->key.as_u64[1] = kv.
key[1] = k.
as_u64[1];
373 reass->sess_index = (
u32) ~ 0;
374 reass->thread_index = (
u32) ~ 0;
375 reass->last_heard = now;
393 u32 bi,
u32 ** bi_to_drop)
415 reass->frags_per_reass_list_head_index, elt_index);
438 nat_reass_ip4_t *reass;
445 if (now < reass->last_heard + (f64) srm->ip4_timeout)
459 nat_reass_ip6_t *reass;
481 u32 frag_id,
u8 proto,
u8 reset_timeout,
485 nat_reass_ip6_t *reass = 0;
490 u32 oldest_index, elt_index;
508 reass->last_heard = now;
510 reass->lru_list_index);
513 reass->lru_list_index);
530 ASSERT (oldest_index != ~0);
569 reass->frags_per_reass_list_head_index =
572 reass->frags_per_reass_list_head_index);
576 reass->key.as_u64[0] = kv.
key[0] = k.
as_u64[0];
577 reass->key.as_u64[1] = kv.
key[1] = k.
as_u64[1];
578 reass->key.as_u64[2] = kv.
key[2] = k.
as_u64[2];
579 reass->key.as_u64[3] = kv.
key[3] = k.
as_u64[3];
580 reass->key.as_u64[4] = kv.
key[4] = k.
as_u64[4];
581 reass->key.as_u64[5] = kv.
key[5] = k.
as_u64[5];
583 reass->sess_index = (
u32) ~ 0;
584 reass->last_heard = now;
599 u32 bi,
u32 ** bi_to_drop)
621 reass->frags_per_reass_list_head_index, elt_index);
644 nat_reass_ip6_t *reass;
651 if (now < reass->last_heard + (f64) srm->ip4_timeout)
667 u32 nbuckets, head_index;
685 clib_bihash_init_16_8 (&srm->
ip4_reass_hash,
"nat-ip4-reass", nbuckets,
706 clib_bihash_init_48_8 (&srm->
ip6_reass_hash,
"nat-ip6-reass", nbuckets,
723 u32 timeout = 0, max_reass = 0, max_frag = 0;
724 u8 drop_frag = (
u8) ~ 0, is_ip6 = 0;
733 if (
unformat (line_input,
"max-reassemblies %u", &max_reass))
735 else if (
unformat (line_input,
"max-fragments %u", &max_frag))
737 else if (
unformat (line_input,
"timeout %u", &timeout))
739 else if (
unformat (line_input,
"enable"))
741 else if (
unformat (line_input,
"disable"))
743 else if (
unformat (line_input,
"ip4"))
745 else if (
unformat (line_input,
"ip6"))
761 if (drop_frag == (
u8) ~ 0)
784 const char *classify_next_str;
787 flags_str =
format (flags_str,
"MAX_FRAG_DROP");
791 flags_str =
format (flags_str,
" | ");
792 flags_str =
format (flags_str,
"CLASSIFY_ED_CONTINUE");
797 flags_str =
format (flags_str,
" | ");
798 flags_str =
format (flags_str,
"CLASSIFY_ED_DONT_TRANSLATE");
801 flags_str =
format (flags_str,
"0");
802 flags_str =
format (flags_str,
"%c", 0);
804 switch (reass->classify_next)
807 classify_next_str =
"NONE";
810 classify_next_str =
"IN2OUT";
813 classify_next_str =
"OUT2IN";
816 classify_next_str =
"invalid value";
820 "flags %s classify_next %s",
824 clib_net_to_host_u16 (reass->key.frag_id), reass->frag_n,
825 flags_str, classify_next_str);
841 clib_net_to_host_u32 (reass->key.frag_id), reass->frag_n);
850 vlib_cli_output (vm,
"NAT IPv4 virtual fragmentation reassembly is %s",
858 vlib_cli_output (vm,
"NAT IPv6 virtual fragmentation reassembly is %s",
872 .path =
"nat virtual-reassembly",
873 .short_help =
"nat virtual-reassembly ip4|ip6 [max-reassemblies <n>] " 874 "[max-fragments <n>] [timeout <sec>] [enable|disable]",
880 .path =
"show nat virtual-reassembly",
881 .short_help =
"show nat virtual-reassembly",
u32 nat_reass_get_timeout(u8 is_ip6)
Get reassembly timeout.
int nat_ip4_reass_add_fragment(u32 thread_index, nat_reass_ip4_t *reass, u32 bi, u32 **bi_to_drop)
Cache fragment.
static void clib_dlist_init(dlist_elt_t *pool, u32 index)
vnet_main_t * vnet_get_main(void)
clib_error_t * nat_reass_init(vlib_main_t *vm)
Initialize NAT virtual fragmentation reassembly.
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
static f64 vlib_time_now(vlib_main_t *vm)
static int nat_ip6_reass_walk_cli(nat_reass_ip6_t *reass, void *ctx)
void nat_ip6_reass_get_frags(nat_reass_ip6_t *reass, u32 **bi)
Get cached fragments.
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
nat_reass_ip4_t * nat_ip4_reass_find(ip4_address_t src, ip4_address_t dst, u16 frag_id, u8 proto)
Find reassembly.
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
#define nat_log_warn(...)
int nat_ip6_reass_add_fragment(u32 thread_index, nat_reass_ip6_t *reass, u32 bi, u32 **bi_to_drop)
Cache fragment.
int(* nat_ip6_reass_walk_fn_t)(nat_reass_ip6_t *reass, void *ctx)
Call back function when walking IPv6 reassemblies, non-zero return value stop walk.
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
void nat_ip4_reass_walk(nat_ip4_reass_walk_fn_t fn, void *ctx)
Walk IPv4 reassemblies.
static clib_error_t * nat_reass_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
#define static_always_inline
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
dlist_elt_t * ip4_frags_list_pool
dlist_elt_t * ip6_reass_lru_list_pool
static_always_inline void nat_ip6_reass_get_frags_inline(nat_reass_ip6_t *reass, u32 **bi)
#define NAT_MAX_REASS_DEAFULT
#define NAT_REASS_FLAG_MAX_FRAG_DROP
#define clib_error_return(e, args...)
#define NAT_REASS_FLAG_ED_DONT_TRANSLATE
void nat_ipfix_logging_max_fragments_ip4(u32 thread_index, u32 limit, ip4_address_t *src)
Generate maximum IPv4 fragments pending reassembly exceeded event.
#define NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE
nat_reass_ip6_t * ip6_reass_pool
static void clib_spinlock_init(clib_spinlock_t *p)
#define NAT_MAX_FRAG_DEFAULT
u8 nat_reass_get_max_frag(u8 is_ip6)
Get maximum number of fragmets per reassembly.
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static int nat_ip4_reass_walk_cli(nat_reass_ip4_t *reass, void *ctx)
dlist_elt_t * ip4_reass_lru_list_pool
static_always_inline void nat_ip4_reass_get_frags_inline(nat_reass_ip4_t *reass, u32 **bi)
static void clib_dlist_addtail(dlist_elt_t *pool, u32 head_index, u32 new_index)
void nat_ipfix_logging_max_fragments_ip6(u32 thread_index, u32 limit, ip6_address_t *src)
Generate maximum IPv6 fragments pending reassembly exceeded event.
#define NAT_REASS_TIMEOUT_DEFAULT
static clib_error_t * show_nat_reass_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
int(* nat_ip4_reass_walk_fn_t)(nat_reass_ip4_t *reass, void *ctx)
Call back function when walking IPv4 reassemblies, non-zero return value stop walk.
#define pool_free(p)
Free a pool.
u16 nat_reass_get_max_reass(u8 is_ip6)
Get maximum number of concurrent reassemblies.
static void clib_dlist_addhead(dlist_elt_t *pool, u32 head_index, u32 new_index)
int nat_reass_set(u32 timeout, u16 max_reass, u8 max_frag, u8 drop_frag, u8 is_ip6)
Set NAT virtual fragmentation reassembly configuration.
u8 nat_reass_is_drop_frag(u8 is_ip6)
Get status of virtual fragmentation reassembly.
static u32 nat_reass_get_nbuckets(u8 is_ip6)
#define vec_free(V)
Free vector's memory (no header).
nat_reass_ip4_t * ip4_reass_pool
dlist_elt_t * ip6_frags_list_pool
clib_bihash_16_8_t ip4_reass_hash
#define VLIB_CLI_COMMAND(x,...)
static_always_inline nat_reass_ip4_t * nat_ip4_reass_lookup(nat_reass_ip4_key_t *k, f64 now)
#define pool_put_index(p, i)
Free pool element with given index.
nat_reass_main_t nat_reass_main
static void clib_dlist_remove(dlist_elt_t *pool, u32 index)
static_always_inline nat_reass_ip6_t * nat_ip6_reass_lookup(nat_reass_ip6_key_t *k, f64 now)
nat_reass_ip6_t * nat_ip6_reass_find_or_create(ip6_address_t src, ip6_address_t dst, u32 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
void nat_ip6_reass_walk(nat_ip6_reass_walk_fn_t fn, void *ctx)
Walk IPv6 reassemblies.
nat_reass_ip4_t * nat_ip4_reass_find_or_create(ip4_address_t src, ip4_address_t dst, u16 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
static vlib_thread_main_t * vlib_get_thread_main()
#define NAT_REASS_HT_LOAD_FACTOR
void nat_ip4_reass_get_frags(nat_reass_ip4_t *reass, u32 **bi)
Get cached fragments.
NAT plugin virtual fragmentation reassembly.
clib_bihash_48_8_t ip6_reass_hash
clib_spinlock_t ip6_reass_lock
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
clib_spinlock_t ip4_reass_lock
nat_reass_ip4_t * nat_ip4_reass_create(ip4_address_t src, ip4_address_t dst, u16 frag_id, u8 proto)
Create reassembly.
static u32 clib_dlist_remove_head(dlist_elt_t *pool, u32 head_index)