29 #define MSEC_PER_SEC 1000 30 #define IP6_FULL_REASS_TIMEOUT_DEFAULT_MS 100 31 #define IP6_FULL_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default 32 #define IP6_FULL_REASS_MAX_REASSEMBLIES_DEFAULT 1024 33 #define IP6_FULL_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3 34 #define IP6_FULL_REASS_HT_LOAD_FACTOR (0.75) 87 return vnb->
ip.reass.range_first - vnb->
ip.reass.fragment_first;
94 return clib_min (vnb->
ip.reass.range_last, vnb->
ip.reass.fragment_last) -
95 (vnb->
ip.reass.fragment_first +
173 #ifndef CLIB_MARCH_VARIANT 271 s =
format (s,
"first bi: %u, data len: %u, ip/fragment[%u, %u]",
286 s =
format (s,
"\n%Uicmp-error - frag_len > 65535 %U",
291 s =
format (s,
"\n%Uicmp-error - frag_len mod 8 != 0 %U",
296 s =
format (s,
"\n%Uicmp-error - reassembly time exceeded",
315 ip6_frag_hdr_t * ip6_frag_header,
316 ip6_full_reass_trace_operation_e
action,
321 bool is_after_handoff =
false;
324 is_after_handoff =
true;
363 printf (
"%.*s\n",
vec_len (s), s);
389 clib_bihash_add_del_48_8 (&rm->
hash, &kv, 0);
401 while (~0 != range_bi)
410 if (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
413 b->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
420 range_bi = range_vnb->
ip.reass.next_range_bi;
425 u32 n_left_to_next, *to_next, next_index;
434 while (
vec_len (to_free) > 0 && n_left_to_next > 0)
476 if (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
479 b->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
487 ICMP6_time_exceeded_fragment_reassembly_time_exceeded,
509 if (!clib_bihash_search_48_8 (&rm->
hash, &kv->
kv, &kv->
kv))
565 int rv = clib_bihash_add_del_48_8 (&rm->
hash, &kv->
kv, 2);
583 u32 * error0,
bool is_custom_app)
586 *error0 = IP6_ERROR_NONE;
587 ip6_frag_hdr_t *frag_hdr;
590 u32 total_length = 0;
593 u32 *vec_drop_compress = NULL;
597 u32 tmp_bi = sub_chain_bi;
600 if (!(vnb->
ip.reass.range_first >= vnb->
ip.reass.fragment_first) &&
601 !(vnb->
ip.reass.range_last > vnb->
ip.reass.fragment_first))
604 goto free_buffers_and_return;
608 u32 trim_front =
vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
615 if (0 != ip6_full_reass_buffer_get_data_offset (tmp))
618 goto free_buffers_and_return;
627 goto free_buffers_and_return;
640 vec_add1 (vec_drop_compress, tmp_bi);
642 if (!(tmp->
flags & VLIB_BUFFER_NEXT_PRESENT))
645 goto free_buffers_and_return;
647 tmp->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
662 last_b->
flags |= VLIB_BUFFER_NEXT_PRESENT;
666 if (keep_data <= tmp->current_length)
674 if (!(tmp->
flags & VLIB_BUFFER_NEXT_PRESENT))
677 goto free_buffers_and_return;
684 vec_add1 (vec_drop_compress, tmp_bi);
688 goto free_buffers_and_return;
692 if (tmp->
flags & VLIB_BUFFER_NEXT_PRESENT)
706 while (~0 != sub_chain_bi);
711 goto free_buffers_and_return;
713 last_b->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
715 if (total_length < first_b->current_length)
718 goto free_buffers_and_return;
721 first_b->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
727 ip6_ext_header_t *prev_hdr;
733 prev_hdr->next_hdr = frag_hdr->next_hdr;
739 if (!((
u8 *) frag_hdr - (
u8 *) ip == ip6_frag_hdr_offset))
742 goto free_buffers_and_return;
744 memmove (frag_hdr, (
u8 *) frag_hdr +
sizeof (*frag_hdr),
746 sizeof (ip6_frag_hdr_t));
754 goto free_buffers_and_return;
756 first_b->
flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
772 if (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
781 printf (
"%.*s\n",
vec_len (s), s);
799 free_buffers_and_return:
810 u32 prev_range_bi,
u32 new_next_bi)
815 if (~0 != prev_range_bi)
819 new_next_vnb->
ip.reass.next_range_bi = prev_vnb->
ip.reass.next_range_bi;
820 prev_vnb->
ip.reass.next_range_bi = new_next_bi;
826 new_next_vnb->
ip.reass.next_range_bi = reass->
first_bi;
838 u32 * error0, ip6_frag_hdr_t * frag_hdr,
839 bool is_custom_app,
u32 * handoff_thread_idx)
850 fvnb->
ip.reass.ip6_frag_hdr_offset =
854 fvnb->
ip.reass.ip6_frag_hdr_offset == 0 ||
860 u32 fragment_first = fvnb->
ip.reass.fragment_first =
862 u32 fragment_length =
864 (fvnb->
ip.reass.ip6_frag_hdr_offset +
sizeof (*frag_hdr));
865 u32 fragment_last = fvnb->
ip.reass.fragment_last =
866 fragment_first + fragment_length - 1;
869 u32 prev_range_bi = ~0;
870 fvnb->
ip.reass.range_first = fragment_first;
871 fvnb->
ip.reass.range_last = fragment_last;
872 fvnb->
ip.reass.next_range_bi = ~0;
885 goto check_if_done_maybe;
889 fvnb->
ip.reass.estimated_mtu);
890 while (~0 != candidate_range_bi)
894 if (fragment_first > candidate_vnb->
ip.reass.range_last)
897 prev_range_bi = candidate_range_bi;
898 candidate_range_bi = candidate_vnb->
ip.reass.next_range_bi;
899 if (candidate_vnb->
ip.reass.range_last < fragment_last &&
900 ~0 == candidate_range_bi)
904 prev_range_bi, *bi0);
910 if (fragment_last < candidate_vnb->
ip.reass.range_first)
914 prev_range_bi, *bi0);
917 else if (fragment_first == candidate_vnb->
ip.reass.range_first &&
918 fragment_last == candidate_vnb->
ip.reass.range_last)
933 *error0 = IP6_ERROR_REASS_OVERLAPPING_FRAGMENT;
954 ip6_full_reass_rc_t rc =
976 *error0 = IP6_ERROR_REASS_DUPLICATE_FRAGMENT;
985 ip6_frag_hdr_t * frag_hdr)
987 ip6_ext_header_t *tmp = (ip6_ext_header_t *) frag_hdr;
992 if (IP_PROTOCOL_IP6_NONXT == tmp->next_hdr)
995 ICMP6_parameter_problem_first_fragment_has_incomplete_header_chain,
997 b->
error = node->
errors[IP6_ERROR_REASS_MISSING_UPPER];
1008 ip6_frag_hdr_t * frag_hdr)
1013 u32 fragment_length =
1015 (vnb->
ip.reass.ip6_frag_hdr_offset +
sizeof (*frag_hdr));
1016 if (more_fragments && 0 != fragment_length % 8)
1019 ICMP6_parameter_problem_erroneous_header_field,
1030 ip6_frag_hdr_t * frag_hdr)
1034 u32 fragment_length =
1036 (vnb->
ip.reass.ip6_frag_hdr_offset +
sizeof (*frag_hdr));
1037 if (fragment_first + fragment_length > 65535)
1041 ICMP6_parameter_problem_erroneous_header_field,
1042 (
u8 *) & frag_hdr->fragment_offset_and_more
1056 u32 n_left_from, n_left_to_next, *to_next, next_index;
1063 while (n_left_from > 0)
1067 while (n_left_from > 0 && n_left_to_next > 0)
1072 u32 error0 = IP6_ERROR_NONE;
1079 ip6_frag_hdr_t *frag_hdr = NULL;
1080 ip6_ext_header_t *prev_hdr;
1085 IP_PROTOCOL_IPV6_FRAGMENTATION,
1095 (
u8 *) frag_hdr - (
u8 *) ip0;
1101 (node, b0, frag_hdr))
1108 (vm, node, b0, frag_hdr)
1125 (
u64) frag_hdr->identification;
1135 if (0 == fragment_first)
1148 u32 handoff_thread_idx;
1150 (vm, node, rm, rt, reass, &bi0, &next0, &error0,
1151 frag_hdr, is_custom_app, &handoff_thread_idx))
1164 IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1172 IP6_ERROR_REASS_NO_BUF, 1);
1179 IP6_ERROR_REASS_INTERNAL_ERROR,
1196 next0 = fvnb->
ip.reass.error_next_index;
1198 error0 = IP6_ERROR_REASS_LIMIT_REACHED;
1206 n_left_to_next -= 1;
1210 if (IP6_ERROR_NONE != error0)
1222 reass.owner_thread_index);
1225 else if (is_feature && IP6_ERROR_NONE == error0)
1230 n_left_to_next, bi0, next0);
1236 to_next[0] = icmp_bi;
1238 n_left_to_next -= 1;
1240 n_left_to_next, icmp_bi,
1256 #define _(sym, string) string, 1271 .name =
"ip6-full-reassembly",
1272 .vector_size =
sizeof (
u32),
1274 .n_errors =
ARRAY_LEN (ip6_full_reassembly_error_strings),
1297 .name =
"ip6-full-reassembly-feature",
1298 .vector_size =
sizeof (
u32),
1300 .n_errors =
ARRAY_LEN (ip6_full_reassembly_error_strings),
1315 .arc_name =
"ip6-unicast",
1316 .node_name =
"ip6-full-reassembly-feature",
1318 "ipsec6-input-feature"),
1323 #ifndef CLIB_MARCH_VARIANT 1333 for (i = 0; i < 31; i++)
1334 if ((1 << i) >= nbuckets)
1347 #ifndef CLIB_MARCH_VARIANT 1358 if (clib_bihash_add_del_48_8 (ctx->
new_hash, kv, 1))
1362 return (BIHASH_WALK_CONTINUE);
1367 u32 max_reassembly_length,
1368 u32 expire_walk_interval_ms)
1372 ip6_full_reass_main.
max_reass_n = max_reassemblies;
1379 u32 max_reassembly_length,
u32 expire_walk_interval_ms)
1383 max_reassembly_length, expire_walk_interval_ms);
1388 if (ip6_full_reass_main.
max_reass_n > 0 && new_nbuckets > old_nbuckets)
1390 clib_bihash_48_8_t new_hash;
1395 clib_bihash_init_48_8 (&new_hash,
"ip6-full-reass", new_nbuckets,
1396 new_nbuckets * 1024);
1397 clib_bihash_foreach_key_value_pair_48_8 (&ip6_full_reass_main.
hash,
1401 clib_bihash_free_48_8 (&new_hash);
1406 clib_bihash_free_48_8 (&ip6_full_reass_main.
hash);
1408 sizeof (ip6_full_reass_main.
hash));
1417 u32 * max_reassembly_length,
1418 u32 * expire_walk_interval_ms)
1420 *timeout_ms = ip6_full_reass_main.
timeout_ms;
1421 *max_reassemblies = ip6_full_reass_main.
max_reass_n;
1455 clib_bihash_init_48_8 (&rm->
hash,
"ip6-full-reass", nbuckets,
1486 uword event_type, *event_data = 0;
1509 int *pool_indexes_to_free = NULL;
1511 uword thread_index = 0;
1514 u32 *vec_icmp_bi = NULL;
1515 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1524 reass = pool_elt_at_index (rt->pool, index);
1525 if (now > reass->last_heard + rm->timeout)
1527 vec_add1 (pool_indexes_to_free, index);
1548 while (
vec_len (vec_icmp_bi) > 0)
1554 int trace_frame = 0;
1555 while (
vec_len (vec_icmp_bi) > 0 && n_left_to_next > 0)
1565 n_left_to_next -= 1;
1575 _vec_len (event_data) = 0;
1587 .name =
"ip6-full-reassembly-expire-walk",
1599 s =
format (s,
"xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1611 s =
format (s,
"ID: %lu, key: %U\n first_bi: %u, data_len: %u, " 1612 "last_packet_octet: %u, trace_op_counter: %u\n",
1622 s =
format (s,
" #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, " 1623 "fragment[%u, %u]\n",
1624 counter, vnb->
ip.reass.range_first,
1625 vnb->
ip.reass.range_last, bi,
1628 vnb->
ip.reass.fragment_first, vnb->
ip.reass.fragment_last);
1629 if (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
1650 bool details =
false;
1656 u32 sum_reass_n = 0;
1657 u64 sum_buffers_n = 0;
1661 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1669 vlib_cli_output (vm,
"%U", format_ip6_full_reass, vm, reass);
1678 (
long unsigned) sum_reass_n);
1680 "Maximum configured concurrent full IP6 reassemblies per worker-thread: %lu\n",
1683 "Maximum configured full IP6 reassembly timeout: %lums\n",
1686 "Maximum configured full IP6 reassembly expire walk interval: %lums\n",
1689 (
long unsigned) sum_buffers_n);
1695 .path =
"show ip6-full-reassembly",
1696 .short_help =
"show ip6-full-reassembly [details]",
1701 #ifndef CLIB_MARCH_VARIANT 1706 "ip6-full-reassembly-feature",
1707 sw_if_index, enable_disable, 0, 0);
1711 #define foreach_ip6_full_reassembly_handoff_error \ 1712 _(CONGESTION_DROP, "congestion drop") 1717 #define _(sym,str) IP6_FULL_REASSEMBLY_HANDOFF_ERROR_##sym, 1724 #define _(sym,string) string, 1743 format (s,
"ip6-full-reassembly-handoff: next-worker %d",
1757 u32 n_enq, n_left_from, *from;
1766 ti = thread_indices;
1770 while (n_left_from > 0)
1772 ti[0] =
vnet_buffer (b[0])->ip.reass.owner_thread_index;
1776 && (b[0]->
flags & VLIB_BUFFER_IS_TRACED)))
1791 if (n_enq < frame->n_vectors)
1793 IP6_FULL_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
1808 .name =
"ip6-full-reassembly-handoff",
1809 .vector_size =
sizeof (
u32),
1810 .n_errors =
ARRAY_LEN(ip6_full_reassembly_handoff_error_strings),
1831 .name =
"ip6-full-reass-feature-hoff",
1832 .vector_size =
sizeof (
u32),
1833 .n_errors =
ARRAY_LEN(ip6_full_reassembly_handoff_error_strings),
1845 #ifndef CLIB_MARCH_VARIANT 1857 "ip6-full-reassembly-feature",
1858 sw_if_index, 1, 0, 0);
1867 "ip6-full-reassembly-feature",
1868 sw_if_index, 0, 0, 0);
#define IP6_FULL_REASS_MAX_REASSEMBLIES_DEFAULT
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
vnet_api_error_t ip6_full_reass_get(u32 *timeout_ms, u32 *max_reassemblies, u32 *max_reassembly_length, u32 *expire_walk_interval_ms)
get ip6 reassembly configuration
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static bool ip6_full_reass_verify_fragment_multiple_8(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
vlib_node_registration_t ip6_full_reassembly_handoff_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reassembly_handoff_node)
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
static u32 ip6_full_reass_buffer_get_data_offset(vlib_buffer_t *b)
ip6_full_reass_trace_operation_e action
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
void ip6_register_protocol(u32 protocol, u32 node_index)
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
ip6_full_reass_main_t ip6_full_reass_main
static void ip6_full_reass_insert_range_in_chain(vlib_main_t *vm, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 prev_range_bi, u32 new_next_bi)
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
static ip6_full_reass_rc_t ip6_full_reass_update(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, ip6_frag_hdr_t *frag_hdr, bool is_custom_app, u32 *handoff_thread_idx)
#define clib_memcpy_fast(a, b, c)
clib_bihash_48_8_t * new_hash
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
u32 * feature_use_refcount_per_intf
u32 memory_owner_thread_index
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
u16 current_length
Nbytes between current data and the end of this buffer.
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
static ip6_full_reass_t * ip6_full_reass_find_or_create(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_kv_t *kv, u32 *icmp_bi, u8 *do_handoff)
static void * ip6_ext_next_header(ip6_ext_header_t *ext_hdr)
static uword ip6_full_reassembly_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature, bool is_custom_app)
static void ip6_full_reass_free_ctx(ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass)
#define VLIB_NODE_FN(node)
u32 ip6_full_reass_expire_node_idx
u32 memory_owner_thread_index
vlib_error_t * errors
Vector of errors for this node.
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
#define vec_pop(V)
Returns last element of a vector and decrements its length.
vlib_node_registration_t ip6_full_reass_expire_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_expire_node)
static bool ip6_full_reass_verify_upper_layer_present(vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define clib_memcpy(d, s, n)
static u8 * format_ip6_full_reass_trace(u8 *s, va_list *args)
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
vl_api_interface_index_t sw_if_index
#define VLIB_INIT_FUNCTION(x)
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
static char * ip6_full_reassembly_error_strings[]
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
u32 expire_walk_interval_ms
vlib_node_registration_t ip6_full_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_node)
static void ip6_full_reass_free(ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass)
#define vlib_call_init_function(vm, x)
static void * ip6_ext_header_find(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6_header, u8 header_type, ip6_ext_header_t **prev_ext_header)
static u32 ip6_full_reass_get_nbuckets()
void icmp6_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
static void clib_spinlock_init(clib_spinlock_t *p)
#define ip6_frag_hdr_more(hdr)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static vlib_cli_command_t show_ip6_full_reassembly_cmd
(constructor) VLIB_CLI_COMMAND (show_ip6_full_reassembly_cmd)
#define IP6_FULL_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
static ip6_full_reass_rc_t ip6_full_reass_finalize(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, bool is_custom_app)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static clib_error_t * show_ip6_full_reass(vlib_main_t *vm, unformat_input_t *input, CLIB_UNUSED(vlib_cli_command_t *lmd))
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
ip6_full_reassembly_handoff_error_t
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
static u8 * format_ip6_full_reassembly_handoff_trace(u8 *s, va_list *args)
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
ip6_full_reass_trace_operation_e
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define pool_put(P, E)
Free an object E in pool P.
static int ip6_rehash_cb(clib_bihash_kv_48_8_t *kv, void *_ctx)
static void ip6_full_reass_on_timeout(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass, u32 *icmp_bi)
static char * ip6_full_reassembly_handoff_error_strings[]
u32 node_index
Node index.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define VLIB_REGISTER_NODE(x,...)
clib_error_t * ip_main_init(vlib_main_t *vm)
VNET_FEATURE_INIT(ip6_full_reassembly_feature, static)
#define IP6_FULL_REASS_HT_LOAD_FACTOR
ip6_full_reass_range_trace_t trace_range
sll srl srl sll sra u16x4 i
#define vec_free(V)
Free vector's memory (no header).
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
#define clib_warning(format, args...)
ip6_full_reass_per_thread_t * per_thread_data
ip6_frag_hdr_t ip6_frag_header
static u32 vlib_buffer_get_trace_thread(vlib_buffer_t *b)
Extract the thread id from a trace handle.
#define ip6_frag_hdr_offset_bytes(hdr)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_main_t vlib_node_runtime_t * node
static u8 ip6_ext_hdr(u8 nexthdr)
vlib_node_registration_t ip6_full_reass_node_feature
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_node_feature)
#define VLIB_CLI_COMMAND(x,...)
u32 fq_index
Worker handoff.
#define ip6_frag_hdr_offset(hdr)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
#define IP6_FULL_REASS_TIMEOUT_DEFAULT_MS
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
static u16 ip6_full_reass_buffer_get_data_len(vlib_buffer_t *b)
static uword ip6_full_reassembly_handoff_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature)
#define VNET_FEATURES(...)
struct vnet_buffer_opaque_t::@150::@152 ip
static void ip6_full_reass_add_trace(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass, u32 bi, ip6_frag_hdr_t *ip6_frag_header, ip6_full_reass_trace_operation_e action, u32 thread_id_to)
static u8 * format_ip6_full_reass(u8 *s, va_list *args)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define vec_elt(v, i)
Get vector value at index i.
void clib_bihash_copied(void *dst, void *src)
static void ip6_full_reass_drop_all(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass)
static void ip6_full_reass_trace_details(vlib_main_t *vm, u32 bi, ip6_full_reass_range_trace_t *trace)
vl_api_mac_event_action_t action
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
#define foreach_ip6_full_reassembly_handoff_error
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
vnet_api_error_t ip6_full_reass_set(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
set ip6 reassembly configuration
#define IP6_FULL_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS
#define foreach_ip6_error
vlib_node_registration_t ip6_full_reassembly_feature_handoff_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reassembly_feature_handoff_node)
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
static void ip6_full_reass_set_params(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
static bool ip6_full_reass_verify_packet_size_lt_64k(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
static u32 vlib_num_workers()
#define vec_foreach(var, vec)
Vector iterator.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
vnet_api_error_t ip6_full_reass_enable_disable(u32 sw_if_index, u8 enable_disable)
u16 flags
Copy of main node flags.
static u8 * format_ip6_full_reass_key(u8 *s, va_list *args)
int ip6_full_reass_enable_disable_with_refcnt(u32 sw_if_index, int is_enable)
#define pool_foreach_index(i, v, body)
Iterate pool by index.
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
#define VLIB_NODE_FLAG_TRACE
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
static clib_error_t * ip6_full_reass_init_function(vlib_main_t *vm)
static uword ip6_full_reass_walk_expired(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
u32 * fib_index_by_sw_if_index
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
static u8 * format_ip6_full_reass_range_trace(u8 *s, va_list *args)