38 for (i = 0; i < 31; i++)
39 if ((1 << i) >= nbuckets)
55 reass->frags_per_reass_list_head_index)) !=
74 reass->frags_per_reass_list_head_index)) !=
103 nbuckets, nbuckets * 1024);
123 nbuckets, nbuckets * 1024);
183 nat_reass_ip4_t *reass;
203 nat_reass_ip4_t *reass = 0;
221 u16 frag_id,
u8 proto,
u8 reset_timeout,
225 nat_reass_ip4_t *reass = 0;
230 u32 oldest_index, elt_index;
245 reass->last_heard = now;
247 reass->lru_list_index);
250 reass->lru_list_index);
267 ASSERT (oldest_index != ~0);
283 kv.
key[0] = reass->key.as_u64[0];
284 kv.
key[1] = reass->key.as_u64[1];
309 reass->frags_per_reass_list_head_index =
312 reass->frags_per_reass_list_head_index);
316 reass->key.as_u64[0] = kv.
key[0] = k.
as_u64[0];
317 reass->key.as_u64[1] = kv.
key[1] = k.
as_u64[1];
319 reass->sess_index = (
u32) ~ 0;
320 reass->thread_index = (
u32) ~ 0;
321 reass->last_heard = now;
359 reass->frags_per_reass_list_head_index, elt_index);
382 nat_reass_ip4_t *reass;
389 if (now < reass->last_heard + (f64) srm->ip4_timeout)
403 nat_reass_ip6_t *reass;
425 u32 frag_id,
u8 proto,
u8 reset_timeout,
429 nat_reass_ip6_t *reass = 0;
434 u32 oldest_index, elt_index;
452 reass->last_heard = now;
454 reass->lru_list_index);
457 reass->lru_list_index);
474 ASSERT (oldest_index != ~0);
513 reass->frags_per_reass_list_head_index =
516 reass->frags_per_reass_list_head_index);
520 reass->key.as_u64[0] = kv.
key[0] = k.
as_u64[0];
521 reass->key.as_u64[1] = kv.
key[1] = k.
as_u64[1];
522 reass->key.as_u64[2] = kv.
key[2] = k.
as_u64[2];
523 reass->key.as_u64[3] = kv.
key[3] = k.
as_u64[3];
524 reass->key.as_u64[4] = kv.
key[4] = k.
as_u64[4];
525 reass->key.as_u64[5] = kv.
key[5] = k.
as_u64[5];
527 reass->sess_index = (
u32) ~ 0;
528 reass->last_heard = now;
565 reass->frags_per_reass_list_head_index, elt_index);
588 nat_reass_ip6_t *reass;
595 if (now < reass->last_heard + (f64) srm->ip4_timeout)
611 u32 nbuckets, head_index;
629 clib_bihash_init_16_8 (&srm->
ip4_reass_hash,
"nat-ip4-reass", nbuckets,
650 clib_bihash_init_48_8 (&srm->
ip6_reass_hash,
"nat-ip6-reass", nbuckets,
667 u32 timeout = 0, max_reass = 0, max_frag = 0;
668 u8 drop_frag = (
u8) ~ 0, is_ip6 = 0;
677 if (
unformat (line_input,
"max-reassemblies %u", &max_reass))
679 else if (
unformat (line_input,
"max-fragments %u", &max_frag))
681 else if (
unformat (line_input,
"timeout %u", &timeout))
683 else if (
unformat (line_input,
"enable"))
685 else if (
unformat (line_input,
"disable"))
687 else if (
unformat (line_input,
"ip4"))
689 else if (
unformat (line_input,
"ip6"))
705 if (drop_frag == (
u8) ~ 0)
732 clib_net_to_host_u16 (reass->key.frag_id), reass->frag_n);
746 clib_net_to_host_u32 (reass->key.frag_id), reass->frag_n);
755 vlib_cli_output (vm,
"NAT IPv4 virtual fragmentation reassembly is %s",
763 vlib_cli_output (vm,
"NAT IPv6 virtual fragmentation reassembly is %s",
777 .path =
"nat virtual-reassembly",
778 .short_help =
"nat virtual-reassembly ip4|ip6 [max-reassemblies <n>] " 779 "[max-fragments <n>] [timeout <sec>] [enable|disable]",
785 .path =
"show nat virtual-reassembly",
786 .short_help =
"show nat virtual-reassembly",
u32 nat_reass_get_timeout(u8 is_ip6)
Get reassembly timeout.
static void clib_dlist_init(dlist_elt_t *pool, u32 index)
vnet_main_t * vnet_get_main(void)
clib_error_t * nat_reass_init(vlib_main_t *vm)
Initialize NAT virtual fragmentation reassembly.
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
static f64 vlib_time_now(vlib_main_t *vm)
static int nat_ip6_reass_walk_cli(nat_reass_ip6_t *reass, void *ctx)
void nat_ip6_reass_get_frags(nat_reass_ip6_t *reass, u32 **bi)
Get cached fragments.
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
nat_reass_ip4_t * nat_ip4_reass_find(ip4_address_t src, ip4_address_t dst, u16 frag_id, u8 proto)
Find reassembly.
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
int(* nat_ip6_reass_walk_fn_t)(nat_reass_ip6_t *reass, void *ctx)
Call back function when walking IPv6 reassemblies, non-zero return value stop walk.
void nat_ipfix_logging_max_fragments_ip6(u32 limit, ip6_address_t *src)
Generate maximum IPv6 fragments pending reassembly exceeded event.
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
void nat_ip4_reass_walk(nat_ip4_reass_walk_fn_t fn, void *ctx)
Walk IPv4 reassemblies.
static clib_error_t * nat_reass_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
#define static_always_inline
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
dlist_elt_t * ip4_frags_list_pool
dlist_elt_t * ip6_reass_lru_list_pool
static_always_inline void nat_ip6_reass_get_frags_inline(nat_reass_ip6_t *reass, u32 **bi)
#define NAT_MAX_REASS_DEAFULT
#define NAT_REASS_FLAG_MAX_FRAG_DROP
#define clib_error_return(e, args...)
nat_reass_ip6_t * ip6_reass_pool
static void clib_spinlock_init(clib_spinlock_t *p)
#define NAT_MAX_FRAG_DEFAULT
u8 nat_reass_get_max_frag(u8 is_ip6)
Get maximum number of fragmets per reassembly.
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static int nat_ip4_reass_walk_cli(nat_reass_ip4_t *reass, void *ctx)
dlist_elt_t * ip4_reass_lru_list_pool
static_always_inline void nat_ip4_reass_get_frags_inline(nat_reass_ip4_t *reass, u32 **bi)
static void clib_dlist_addtail(dlist_elt_t *pool, u32 head_index, u32 new_index)
#define NAT_REASS_TIMEOUT_DEFAULT
static clib_error_t * show_nat_reass_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
int(* nat_ip4_reass_walk_fn_t)(nat_reass_ip4_t *reass, void *ctx)
Call back function when walking IPv4 reassemblies, non-zero return value stop walk.
#define pool_free(p)
Free a pool.
u16 nat_reass_get_max_reass(u8 is_ip6)
Get maximum number of concurrent reassemblies.
static void clib_dlist_addhead(dlist_elt_t *pool, u32 head_index, u32 new_index)
int nat_reass_set(u32 timeout, u16 max_reass, u8 max_frag, u8 drop_frag, u8 is_ip6)
Set NAT virtual fragmentation reassembly configuration.
u8 nat_reass_is_drop_frag(u8 is_ip6)
Get status of virtual fragmentation reassembly.
static u32 nat_reass_get_nbuckets(u8 is_ip6)
int nat_ip6_reass_add_fragment(nat_reass_ip6_t *reass, u32 bi, u32 **bi_to_drop)
Cache fragment.
#define clib_warning(format, args...)
nat_reass_ip4_t * ip4_reass_pool
dlist_elt_t * ip6_frags_list_pool
clib_bihash_16_8_t ip4_reass_hash
#define VLIB_CLI_COMMAND(x,...)
static_always_inline nat_reass_ip4_t * nat_ip4_reass_lookup(nat_reass_ip4_key_t *k, f64 now)
#define pool_put_index(p, i)
Free pool element with given index.
void nat_ipfix_logging_max_fragments_ip4(u32 limit, ip4_address_t *src)
Generate maximum IPv4 fragments pending reassembly exceeded event.
nat_reass_main_t nat_reass_main
static void clib_dlist_remove(dlist_elt_t *pool, u32 index)
static_always_inline nat_reass_ip6_t * nat_ip6_reass_lookup(nat_reass_ip6_key_t *k, f64 now)
nat_reass_ip6_t * nat_ip6_reass_find_or_create(ip6_address_t src, ip6_address_t dst, u32 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
void nat_ip6_reass_walk(nat_ip6_reass_walk_fn_t fn, void *ctx)
Walk IPv6 reassemblies.
nat_reass_ip4_t * nat_ip4_reass_find_or_create(ip4_address_t src, ip4_address_t dst, u16 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
static vlib_thread_main_t * vlib_get_thread_main()
int nat_ip4_reass_add_fragment(nat_reass_ip4_t *reass, u32 bi, u32 **bi_to_drop)
Cache fragment.
#define NAT_REASS_HT_LOAD_FACTOR
void nat_ip4_reass_get_frags(nat_reass_ip4_t *reass, u32 **bi)
Get cached fragments.
NAT plugin virtual fragmentation reassembly.
clib_bihash_48_8_t ip6_reass_hash
clib_spinlock_t ip6_reass_lock
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
clib_spinlock_t ip4_reass_lock
static u32 clib_dlist_remove_head(dlist_elt_t *pool, u32 head_index)