29 #define MSEC_PER_SEC 1000 30 #define IP4_REASS_TIMEOUT_DEFAULT_MS 100 31 #define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default 32 #define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024 33 #define IP4_REASS_HT_LOAD_FACTOR (0.75) 35 #define IP4_REASS_DEBUG_BUFFERS 0 36 #if IP4_REASS_DEBUG_BUFFERS 37 #define IP4_REASS_DEBUG_BUFFER(bi, what) \ 41 printf (#what "buffer %u", _bi); \ 42 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi); \ 43 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT) \ 45 _bi = _b->next_buffer; \ 46 printf ("[%u]", _bi); \ 47 _b = vlib_get_buffer (vm, _bi); \ 54 #define IP4_REASS_DEBUG_BUFFER(...) 83 return vnb->
ip.reass.range_first - vnb->
ip.reass.fragment_first;
90 ASSERT (vnb->
ip.reass.range_first >= vnb->
ip.reass.fragment_first);
98 return clib_min (vnb->
ip.reass.range_last, vnb->
ip.reass.fragment_last) -
106 ASSERT (vnb->
ip.reass.range_last > vnb->
ip.reass.fragment_first);
234 s =
format (s,
"first bi: %u, data len: %u, ip/fragment[%u, %u]",
266 ip4_reass_trace_operation_e action,
u32 size_diff)
273 b->
flags &= ~VLIB_BUFFER_IS_TRACED;
290 printf (
"%.*s\n",
vec_len (s), s);
304 clib_bihash_add_del_24_8 (&rm->
hash, &kv, 0);
316 while (~0 != range_bi)
325 if (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
328 b->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
335 range_bi = range_vnb->
ip.reass.next_range_bi;
351 if (!clib_bihash_search_24_8 (&rm->
hash, &kv, &value))
376 memset (reass, 0,
sizeof (*reass));
392 if (clib_bihash_add_del_24_8 (&rm->
hash, &kv, 1))
405 u32 * error0,
u32 ** vec_drop_compress,
406 u32 ** vec_drop_overlap,
bool is_feature)
412 u32 total_length = 0;
417 u32 tmp_bi = sub_chain_bi;
444 vec_add1 (*vec_drop_compress, tmp_bi);
448 tmp->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
463 last_b->
flags |= VLIB_BUFFER_NEXT_PRESENT;
467 if (keep_data <= tmp->current_length)
481 vec_add1 (*vec_drop_overlap, tmp_bi);
485 if (tmp->
flags & VLIB_BUFFER_NEXT_PRESENT)
499 while (~0 != sub_chain_bi);
502 last_b->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
507 first_b->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
528 if (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
537 printf (
"%.*s\n",
vec_len (s), s);
554 *error0 = IP4_ERROR_NONE;
583 u32 prev_range_bi,
u32 new_next_bi)
588 if (~0 != prev_range_bi)
592 new_next_vnb->
ip.reass.next_range_bi = prev_vnb->
ip.reass.next_range_bi;
593 prev_vnb->
ip.reass.next_range_bi = new_next_bi;
599 new_next_vnb->
ip.reass.next_range_bi = reass->
first_bi;
611 u32 ** vec_drop_overlap,
617 if (~0 != prev_range_bi)
621 ASSERT (prev_vnb->
ip.reass.next_range_bi == discard_bi);
622 prev_vnb->
ip.reass.next_range_bi = discard_vnb->
ip.reass.next_range_bi;
626 reass->
first_bi = discard_vnb->
ip.reass.next_range_bi;
631 vec_add1 (*vec_drop_overlap, discard_bi);
637 if (discard_b->
flags & VLIB_BUFFER_NEXT_PRESENT)
639 discard_b->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
654 u32 * error0,
u32 ** vec_drop_overlap,
655 u32 ** vec_drop_compress,
bool is_feature)
662 u32 fragment_first = fvnb->
ip.reass.fragment_first =
664 u32 fragment_length =
666 u32 fragment_last = fvnb->
ip.reass.fragment_last =
667 fragment_first + fragment_length - 1;
670 u32 prev_range_bi = ~0;
672 fvnb->
ip.reass.range_first = fragment_first;
673 fvnb->
ip.reass.range_last = fragment_last;
674 fvnb->
ip.reass.next_range_bi = ~0;
693 fvnb->
ip.reass.estimated_mtu);
694 while (~0 != candidate_range_bi)
698 if (fragment_first > candidate_vnb->
ip.reass.range_last)
701 prev_range_bi = candidate_range_bi;
702 candidate_range_bi = candidate_vnb->
ip.reass.next_range_bi;
703 if (candidate_vnb->
ip.reass.range_last < fragment_last &&
704 ~0 == candidate_range_bi)
708 prev_range_bi, *bi0);
714 if (fragment_last < candidate_vnb->ip.reass.range_first)
723 if (fragment_first >= candidate_vnb->
ip.reass.range_first &&
724 fragment_last <= candidate_vnb->ip.reass.range_last)
734 int discard_candidate = 0;
735 if (fragment_first < candidate_vnb->ip.reass.range_first)
738 fragment_last - candidate_vnb->
ip.reass.range_first + 1;
741 candidate_vnb->
ip.reass.range_first += overlap;
751 prev_range_bi, *bi0);
756 discard_candidate = 1;
759 else if (fragment_last > candidate_vnb->
ip.reass.range_last)
762 candidate_vnb->
ip.reass.range_last - fragment_first + 1;
765 fvnb->
ip.reass.range_first += overlap;
766 if (~0 != candidate_vnb->
ip.reass.next_range_bi)
768 prev_range_bi = candidate_range_bi;
770 candidate_vnb->
ip.reass.next_range_bi;
784 discard_candidate = 1;
789 discard_candidate = 1;
791 if (discard_candidate)
793 u32 next_range_bi = candidate_vnb->
ip.reass.next_range_bi;
796 vec_drop_overlap, reass,
799 if (~0 != next_range_bi)
801 candidate_range_bi = next_range_bi;
808 prev_range_bi, *bi0);
826 vec_drop_compress, vec_drop_overlap, is_feature);
837 *error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
848 u32 n_left_from, n_left_to_next, *to_next, next_index;
855 static u32 *vec_drop_timeout =
NULL;
856 static u32 *vec_drop_overlap =
NULL;
857 static u32 *vec_drop_compress =
NULL;
858 while (n_left_from > 0 ||
vec_len (vec_drop_timeout) > 0 ||
859 vec_len (vec_drop_overlap) > 0 ||
vec_len (vec_drop_compress) > 0)
863 while (
vec_len (vec_drop_timeout) > 0 && n_left_to_next > 0)
879 while (
vec_len (vec_drop_overlap) > 0 && n_left_to_next > 0)
883 b->
error = node->
errors[IP4_ERROR_REASS_DUPLICATE_FRAGMENT];
895 while (
vec_len (vec_drop_compress) > 0 && n_left_to_next > 0)
911 while (n_left_from > 0 && n_left_to_next > 0)
916 u32 error0 = IP4_ERROR_NONE;
949 &error0, &vec_drop_overlap,
950 &vec_drop_compress, is_feature);
955 error0 = IP4_ERROR_REASS_LIMIT_REACHED;
966 if (is_feature && IP4_ERROR_NONE == error0)
972 n_left_to_next, bi0, next0);
988 #define _(sym, string) string, 1003 .name =
"ip4-reassembly",
1004 .vector_size =
sizeof (
u32),
1006 .n_errors =
ARRAY_LEN (ip4_reassembly_error_strings),
1029 .name =
"ip4-reassembly-feature",
1030 .vector_size =
sizeof (
u32),
1032 .n_errors =
ARRAY_LEN (ip4_reassembly_error_strings),
1047 .arc_name =
"ip4-unicast",
1048 .node_name =
"ip4-reassembly-feature",
1063 for (i = 0; i < 31; i++)
1064 if ((1 << i) >= nbuckets)
1086 if (clib_bihash_add_del_24_8 (ctx->
new_hash, kv, 1))
1094 u32 expire_walk_interval_ms)
1104 u32 expire_walk_interval_ms)
1108 expire_walk_interval_ms);
1113 if (ip4_reass_main.
max_reass_n > 0 && new_nbuckets > old_nbuckets)
1115 clib_bihash_24_8_t new_hash;
1116 memset (&new_hash, 0,
sizeof (new_hash));
1120 clib_bihash_init_24_8 (&new_hash,
"ip4-reass", new_nbuckets,
1121 new_nbuckets * 1024);
1122 clib_bihash_foreach_key_value_pair_24_8 (&ip4_reass_main.
hash,
1126 clib_bihash_free_24_8 (&new_hash);
1131 clib_bihash_free_24_8 (&ip4_reass_main.
hash);
1133 sizeof (ip4_reass_main.
hash));
1141 u32 * expire_walk_interval_ms)
1177 clib_bihash_init_24_8 (&rm->
hash,
"ip4-reass", nbuckets, nbuckets * 1024);
1193 uword event_type, *event_data = 0;
1217 int *pool_indexes_to_free =
NULL;
1219 uword thread_index = 0;
1222 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1230 reass = pool_elt_at_index (rt->pool, index);
1231 if (now > reass->last_heard + rm->timeout)
1233 vec_add1 (pool_indexes_to_free, index);
1250 b->
flags &= ~VLIB_BUFFER_IS_TRACED;
1255 ASSERT (rt->buffers_n >= (after - before));
1256 rt->buffers_n -= (after - before);
1264 while (
vec_len (vec_drop_timeout) > 0)
1269 int trace_frame = 0;
1270 while (
vec_len (vec_drop_timeout) > 0 && n_left_to_next > 0)
1280 b->
flags &= ~VLIB_BUFFER_IS_TRACED;
1291 n_left_to_next -= 1;
1302 _vec_len (event_data) = 0;
1315 .name =
"ip4-reassembly-expire-walk",
1327 s =
format (s,
"xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1339 s =
format (s,
"ID: %lu, key: %U\n first_bi: %u, data_len: %u, " 1340 "last_packet_octet: %u, trace_op_counter: %u\n",
1350 s =
format (s,
" #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, " 1351 "fragment[%u, %u]\n",
1352 counter, vnb->
ip.reass.range_first,
1353 vnb->
ip.reass.range_last, bi,
1356 vnb->
ip.reass.fragment_first, vnb->
ip.reass.fragment_last);
1357 if (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
1378 bool details =
false;
1384 u32 sum_reass_n = 0;
1385 u64 sum_buffers_n = 0;
1389 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1397 vlib_cli_output (vm,
"%U", format_ip4_reass, vm, reass);
1407 (
long unsigned) sum_reass_n);
1409 "Maximum configured concurrent IP4 reassemblies per worker-thread: %lu\n",
1412 (
long unsigned) sum_buffers_n);
1418 .path =
"show ip4-reassembly",
1419 .short_help =
"show ip4-reassembly [details]",
1428 sw_if_index, enable_disable, 0, 0);
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
static void ip4_reass_insert_range_in_chain(vlib_main_t *vm, ip4_reass_main_t *rm, ip4_reass_per_thread_t *rt, ip4_reass_t *reass, u32 prev_range_bi, u32 new_next_bi)
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
static uword ip4_reassembly_feature(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
VLIB_NODE_FUNCTION_MULTIARCH(ip4_reass_node, ip4_reassembly)
static vlib_node_registration_t ip4_reass_expire_node
(constructor) VLIB_REGISTER_NODE (ip4_reass_expire_node)
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
static uword ip4_reassembly(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
vnet_main_t * vnet_get_main(void)
u32 ip4_reass_expire_node_idx
u8 * format_ip4_reass_trace(u8 *s, va_list *args)
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
static void vlib_buffer_chain_compress(vlib_main_t *vm, vlib_buffer_t *first, u32 **discard_vector)
compress buffer chain in a way where the first buffer is at least VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SI...
static int ip4_header_bytes(ip4_header_t *i)
#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS
static char * ip4_reassembly_error_strings[]
static f64 vlib_time_now(vlib_main_t *vm)
static vlib_node_registration_t ip4_reass_node
(constructor) VLIB_REGISTER_NODE (ip4_reass_node)
ip4_reass_t * ip4_reass_find_or_create(vlib_main_t *vm, ip4_reass_main_t *rm, ip4_reass_per_thread_t *rt, ip4_reass_key_t *k, u32 **vec_drop_timeout)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
static u32 ip4_reass_buffer_get_data_offset_no_check(vlib_buffer_t *b)
vlib_error_t * errors
Vector of errors for this node.
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
ip4_reass_per_thread_t * per_thread_data
static void ip4_reass_trace_details(vlib_main_t *vm, u32 bi, ip4_reass_range_trace_t *trace)
#define vec_pop(V)
Returns last element of a vector and decrements its length.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
vlib_trace_header_t ** trace_buffer_pool
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
#define VLIB_INIT_FUNCTION(x)
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
static u32 ip4_reass_get_buffer_chain_length(vlib_main_t *vm, vlib_buffer_t *b)
static int ip4_get_fragment_offset(ip4_header_t *i)
static void ip4_reass_free(ip4_reass_main_t *rm, ip4_reass_per_thread_t *rt, ip4_reass_t *reass)
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
static u16 ip4_reass_buffer_get_data_len(vlib_buffer_t *b)
u32 expire_walk_interval_ms
static void ip4_reass_set_params(u32 timeout_ms, u32 max_reassemblies, u32 expire_walk_interval_ms)
struct vnet_buffer_opaque_t::@57::@59 ip
static uword ip4_reass_walk_expired(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
static void clib_spinlock_init(clib_spinlock_t *p)
static void ip4_reass_on_timeout(vlib_main_t *vm, ip4_reass_main_t *rm, ip4_reass_t *reass, u32 **vec_drop_timeout)
static uword ip4_reassembly_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature)
static u8 * format_ip4_reass_key(u8 *s, va_list *args)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
vnet_api_error_t ip4_reass_set(u32 timeout_ms, u32 max_reassemblies, u32 expire_walk_interval_ms)
set ip4 reassembly configuration
u16 current_length
Nbytes between current data and the end of this buffer.
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
#define IP4_REASS_DEBUG_BUFFER(...)
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
vnet_api_error_t ip4_reass_get(u32 *timeout_ms, u32 *max_reassemblies, u32 *expire_walk_interval_ms)
get ip4 reassembly configuration
#define pool_put(P, E)
Free an object E in pool P.
static vlib_cli_command_t show_ip4_reassembly_cmd
(constructor) VLIB_CLI_COMMAND (show_ip4_reassembly_cmd)
ip4_reass_main_t ip4_reass_main
static_always_inline void vnet_feature_next(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
static u16 ip4_reass_buffer_get_data_len_no_check(vlib_buffer_t *b)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static u8 * format_ip4_reass_range_trace(u8 *s, va_list *args)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static void ip4_rehash_cb(clib_bihash_kv_24_8_t *kv, void *_ctx)
ip4_reass_trace_operation_e
#define foreach_ip4_error
static clib_error_t * ip4_reass_init_function(vlib_main_t *vm)
#define VLIB_REGISTER_NODE(x,...)
#define vec_free(V)
Free vector's memory (no header).
ip4_reass_range_trace_t trace_range
static void ip4_reass_remove_range_from_chain(vlib_main_t *vm, vlib_node_runtime_t *node, ip4_reass_main_t *rm, u32 **vec_drop_overlap, ip4_reass_t *reass, u32 prev_range_bi, u32 discard_bi)
#define clib_warning(format, args...)
VNET_FEATURE_INIT(ip4_reassembly_feature, static)
#define clib_memcpy(a, b, c)
static void ip4_reass_add_trace(vlib_main_t *vm, vlib_node_runtime_t *node, ip4_reass_main_t *rm, ip4_reass_t *reass, u32 bi, ip4_reass_trace_operation_e action, u32 size_diff)
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
static clib_error_t * show_ip4_reass(vlib_main_t *vm, unformat_input_t *input, CLIB_UNUSED(vlib_cli_command_t *lmd))
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
#define IP4_REASS_MAX_REASSEMBLIES_DEFAULT
#define IP4_REASS_HT_LOAD_FACTOR
vnet_api_error_t ip4_reass_enable_disable(u32 sw_if_index, u8 enable_disable)
#define VLIB_CLI_COMMAND(x,...)
clib_bihash_24_8_t * new_hash
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
static void ip4_reass_update(vlib_main_t *vm, vlib_node_runtime_t *node, ip4_reass_main_t *rm, ip4_reass_per_thread_t *rt, ip4_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, u32 **vec_drop_overlap, u32 **vec_drop_compress, bool is_feature)
static u32 ip4_reass_get_nbuckets()
static u32 ip4_reass_buffer_get_data_offset(vlib_buffer_t *b)
u32 next_buffer
Next buffer for this linked-list of buffers.
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
vlib_trace_main_t trace_main
static int ip4_get_fragment_more(ip4_header_t *i)
#define IP4_REASS_TIMEOUT_DEFAULT_MS
#define VNET_FEATURES(...)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static int ip4_get_fragment_offset_bytes(ip4_header_t *i)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
struct _vlib_node_registration vlib_node_registration_t
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static void ip4_reass_finalize(vlib_main_t *vm, vlib_node_runtime_t *node, ip4_reass_main_t *rm, ip4_reass_per_thread_t *rt, ip4_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, u32 **vec_drop_compress, u32 **vec_drop_overlap, bool is_feature)
uword os_get_nthreads(void)
ip4_reass_trace_operation_e action
static_always_inline uword os_get_thread_index(void)
static u8 * format_ip4_reass(u8 *s, va_list *args)
static u32 vlib_num_workers()
#define vec_foreach(var, vec)
Vector iterator.
#define pool_foreach_index(i, v, body)
Iterate pool by index.
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static vlib_node_registration_t ip4_reass_node_feature
(constructor) VLIB_REGISTER_NODE (ip4_reass_node_feature)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
u32 trace_index
Specifies index into trace buffer if VLIB_PACKET_IS_TRACED flag is set.
static u16 ip4_header_checksum(ip4_header_t *i)
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".