15 #ifndef __included_ioam_cache_h__ 16 #define __included_ioam_cache_h__ 159 #define MAX_CACHE_ENTRIES 4096 161 #define MAX_CACHE_TS_ENTRIES 1048576 163 #define IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS (4 * 1024) 164 #define IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE (2<<20) 253 ((t1 << 16) | t2) : ((t2 << 16) | t1);
266 #define HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID 30 267 #define HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID 31 273 }) ioam_e2e_id_option_t;
279 }) ioam_e2e_cache_option_t;
281 #define IOAM_E2E_ID_OPTION_RND ((sizeof(ioam_e2e_id_option_t) + 7) & ~7) 282 #define IOAM_E2E_ID_HBH_EXT_LEN (IOAM_E2E_ID_OPTION_RND >> 3) 283 #define IOAM_E2E_CACHE_OPTION_RND ((sizeof(ioam_e2e_cache_option_t) + 7) & ~7) 284 #define IOAM_E2E_CACHE_HBH_EXT_LEN (IOAM_E2E_CACHE_OPTION_RND >> 3) 290 e2e_option->id.as_u64[0] = address->as_u64[0];
291 e2e_option->id.as_u64[1] = address->as_u64[1];
331 kv.
key = (
u64) flow_hash << 32 | seq_no;
372 u32 rewrite_len = 0, e2e_id_offset = 0;
374 ioam_e2e_id_option_t *e2e = 0;
381 sizeof (ip6_address_t));
383 sizeof (ip6_address_t));
387 rewrite_len = ((hbh0->
length + 1) << 3);
392 entry->
next_hop.as_u64[0] = e2e->id.as_u64[0];
393 entry->
next_hop.as_u64[1] = e2e->id.as_u64[1];
399 e2e_id_offset = (
u8 *) e2e - (
u8 *) hbh0;
414 kv.
key = (
u64) flow_hash << 32 | seq_no;
422 kv.
value = pool_index;
437 ip6_address_t *segments = 0;
438 ip6_address_t *this_seg = 0;
443 clib_memset (this_seg, 0xfe,
sizeof (ip6_address_t));
460 "ioam rewrite cache table",
492 s =
format (s,
"%d: %U:%d to %U:%d seq_no %lu\n",
510 #define IOAM_CACHE_TS_TIMEOUT 1.0 //SYN timeout 1 sec 511 #define IOAM_CACHE_TS_TICK 100e-3 513 #define IOAM_CACHE_TS_TIMEOUT_TICKS IOAM_CACHE_TS_TICK*9 514 #define TIMER_HANDLE_INVALID ((u32) ~0) 542 for (i = 0; i < no_of_threads; i++)
611 for (i = 0; i < no_of_threads; i++)
619 tw_timer_wheel_free_16t_2w_512sl (&cm->
timer_wheels[i]);
644 u8 max_responses,
u64 now,
u32 thread_id,
u32 * pool_index)
661 sizeof (ip6_address_t));
663 sizeof (ip6_address_t));
699 if (entry && entry->
hbh)
757 u32 * pool_index,
u8 * thread_id,
u8 response_seen)
761 ioam_e2e_cache_option_t *e2e = 0;
766 if ((
u8 *) e2e < ((
u8 *) hbh0 + ((hbh0->
length + 1) << 3))
771 *thread_id = e2e->pool_id;
796 u32 thread_id = va_arg (*args,
u32);
798 ioam_e2e_id_option_t *e2e = 0;
813 "%d: %U:%d to %U:%d seq_no %u buffer %u %U \n\t\tCreated at %U Received %d\n",
827 "%d: %U:%d to %U:%d seq_no %u Buffer %u \n\t\tCreated at %U Received %d\n",
856 ioam_e2e_cache_option_t *e2e = 0;
857 ioam_e2e_id_option_t *e2e_id = 0;
863 rewrite_len = ((hbh->
length + 1) << 3);
871 e2e = (ioam_e2e_cache_option_t *) (cm->
rewrite + rewrite_len);
874 e2e->hdr.length =
sizeof (ioam_e2e_cache_option_t) -
878 (ioam_e2e_id_option_t *) ((
u8 *) e2e +
sizeof (ioam_e2e_cache_option_t));
883 e2e_id->e2e_type = 1;
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
u8 rewrite_pool_index_offset
ip6_address_t sr_localsid_ts
#define MAX_CACHE_TS_ENTRIES
ioam_cache_entry_t * ioam_rewrite_pool
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
static int ioam_cache_ts_table_init(vlib_main_t *vm)
static ioam_cache_entry_t * ioam_cache_lookup(ip6_header_t *ip0, u16 src_port, u16 dst_port, u32 seq_no)
static void ioam_cache_ts_entry_free(u32 thread_id, ioam_cache_ts_entry_t *entry, u32 node_index)
#define MAX_CACHE_ENTRIES
#define IOAM_E2E_CACHE_HBH_EXT_LEN
typedef CLIB_PACKED(struct { ip6_hop_by_hop_option_t hdr;u8 e2e_type;u8 reserved[5];ip6_address_t id;}) ioam_e2e_id_option_t
#define pool_foreach(VAR, POOL)
Iterate through pool.
#define TIMER_HANDLE_INVALID
#define IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS
vl_api_ip_port_and_mask_t dst_port
Fixed length block allocator.
vl_api_ip_proto_t protocol
#define clib_memcpy_fast(a, b, c)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
static int ioam_cache_table_init(vlib_main_t *vm)
static int ioam_cache_table_destroy(vlib_main_t *vm)
ip6_address_t src_address
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
ip6_address_t dst_address
#define pool_is_free(P, E)
Use free bitmap to query whether given element is free.
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
static void ioam_cache_ts_timer_set(ioam_cache_main_t *cm, ioam_cache_ts_entry_t *entry, u32 interval)
static int ioam_cache_ts_update(u32 thread_id, i32 pool_index, u32 buffer_index, ip6_hop_by_hop_header_t *hbh)
#define IP_FLOW_HASH_SRC_PORT
static u8 * format_ioam_cache_ts_entry(u8 *s, va_list *args)
ip6_address_t sr_localsid_cache
static void ioam_cache_sr_rewrite_template_create(void)
static ioam_cache_entry_t * ioam_cache_entry_cleanup(u32 pool_index)
static int ip6_ioam_ts_cache_set_rewrite(void)
u8 * format_ip6_hop_by_hop_ext_hdr(u8 *s, va_list *args)
static int ioam_cache_add(vlib_buffer_t *b0, ip6_header_t *ip0, u16 src_port, u16 dst_port, ip6_hop_by_hop_header_t *hbh0, u32 seq_no)
#define pool_alloc_aligned(P, N, A)
Allocate N more free elements to pool (general version).
description fragment has unexpected format
static int ioam_cache_ts_add(ip6_header_t *ip0, u16 src_port, u16 dst_port, u32 seq_no, u8 max_responses, u64 now, u32 thread_id, u32 *pool_index)
#define IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
static u32 ip6_compute_flow_hash_ext(const ip6_header_t *ip, u8 protocol, u16 src_port, u16 dst_port, flow_hash_config_t flow_hash_config)
u32 ip6_reset_ts_hbh_node_index
u64 cpu_time_main_loop_start
#define HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID
tw_timer_wheel_16t_2w_512sl_t * timer_wheels
per thread single-wheel
static void ioam_cache_ts_check_and_send(u32 thread_id, i32 pool_index)
static void ioam_e2e_id_rewrite_handler(ioam_e2e_id_option_t *e2e_option, ip6_address_t *address)
int ip6_ioam_set_rewrite(u8 **rwp, int has_trace_option, int has_pot_option, int has_seqno_option)
vlib_node_registration_t ioam_cache_ts_node
(constructor) VLIB_REGISTER_NODE (ioam_cache_ts_node)
ioam_cache_ts_pool_stats_t * ts_stats
vlib_worker_thread_t * vlib_worker_threads
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
#define IOAM_CACHE_TS_TIMEOUT
#define IP_FLOW_HASH_DST_ADDR
u64 lookup_table_nbuckets
ioam_cache_ts_entry_t ** ioam_ts_pool
vlib_node_registration_t ioam_cache_node
(constructor) VLIB_REGISTER_NODE (ioam_cache_node)
static int ip6_ioam_ts_cache_cleanup_rewrite(void)
ip6_hop_by_hop_ioam_main_t ip6_hop_by_hop_ioam_main
#define IOAM_E2E_ID_HBH_EXT_LEN
u32 ip6_hbh_pop_node_index
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
#define pool_put(P, E)
Free an object E in pool P.
ip6_address_t src_address
#define IP_FLOW_HASH_DST_PORT
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
static void ioam_cache_entry_free(ioam_cache_entry_t *entry)
static int ioam_cache_ts_lookup(ip6_header_t *ip0, u8 protocol, u16 src_port, u16 dst_port, u32 seq_no, ip6_hop_by_hop_header_t **hbh, u32 *pool_index, u8 *thread_id, u8 response_seen)
#define IP_FLOW_HASH_DEFAULT
Default: 5-tuple without the "reverse" bit.
u32 cleanup_process_node_index
u32 expected_to_expire
entry should expire at this clock tick
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
#define pool_free(p)
Free a pool.
The fine-grained event logger allows lightweight, thread-safe event logging at minimum cost...
vl_api_ip_port_and_mask_t src_port
ip6_address_t dst_address
#define hash_mix64(a0, b0, c0)
sll srl srl sll sra u16x4 i
#define vec_free(V)
Free vector's memory (no header).
static void * ip6_ioam_find_hbh_option(ip6_hop_by_hop_header_t *hbh0, u8 option)
u32 timer_handle
Handle returned from tw_start_timer.
#define IP_FLOW_HASH_PROTO
8 octet key, 8 octet key value pair
void expired_cache_ts_timer_callback(u32 *expired_timers)
manual_print typedef address
clib_bihash_8_8_t ioam_rewrite_cache_table
void ioam_cache_ts_timer_node_enable(vlib_main_t *vm, u8 enable)
static u8 * format_ioam_cache_entry(u8 *s, va_list *args)
static void ioam_cache_ts_send(u32 thread_id, i32 pool_index)
#define IOAM_E2E_CACHE_OPTION_RND
struct _vlib_node_registration vlib_node_registration_t
static void ioam_cache_ts_timer_reset(ioam_cache_main_t *cm, ioam_cache_ts_entry_t *entry)
static int ioam_cache_ts_table_destroy(vlib_main_t *vm)
#define IP_FLOW_HASH_REVERSE_SRC_DST
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
ip6_hop_by_hop_header_t * hbh
#define IP_FLOW_HASH_SRC_ADDR
Flow hash configuration.
Segment Routing data structures definitions.
u32 flow_hash_config_t
A flow hash configuration is a mask of the flow hash options.
#define HBH_OPTION_TYPE_SKIP_UNKNOWN
static int ioam_cache_ts_entry_cleanup(u32 thread_id, u32 pool_index)
f64 end
end of the time range
u32 ip6_add_from_cache_hbh_node_index
ioam_cache_main_t ioam_cache_main
#define CLIB_CACHE_LINE_BYTES
#define HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID
#define IOAM_E2E_ID_OPTION_RND
#define IOAM_CACHE_TS_TICK
static u8 * ip6_sr_compute_rewrite_string_insert(ip6_address_t *sl)
SR rewrite string computation for SRH insertion (inline)