15 #ifndef __included_ioam_cache_h__ 16 #define __included_ioam_cache_h__ 159 #define MAX_CACHE_ENTRIES 4096 161 #define MAX_CACHE_TS_ENTRIES 1048576 163 #define IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS (4 * 1024) 164 #define IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE (2<<20) 253 ((t1 << 16) | t2) : ((t2 << 16) | t1);
266 #define HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID 30 267 #define HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID 31 273 }) ioam_e2e_id_option_t;
279 }) ioam_e2e_cache_option_t;
281 #define IOAM_E2E_ID_OPTION_RND ((sizeof(ioam_e2e_id_option_t) + 7) & ~7) 282 #define IOAM_E2E_ID_HBH_EXT_LEN (IOAM_E2E_ID_OPTION_RND >> 3) 283 #define IOAM_E2E_CACHE_OPTION_RND ((sizeof(ioam_e2e_cache_option_t) + 7) & ~7) 284 #define IOAM_E2E_CACHE_HBH_EXT_LEN (IOAM_E2E_CACHE_OPTION_RND >> 3) 290 e2e_option->id.as_u64[0] = address->as_u64[0];
291 e2e_option->id.as_u64[1] = address->as_u64[1];
331 kv.
key = (
u64) flow_hash << 32 | seq_no;
372 u32 rewrite_len = 0, e2e_id_offset = 0;
374 ioam_e2e_id_option_t *e2e = 0;
381 sizeof (ip6_address_t));
383 sizeof (ip6_address_t));
387 rewrite_len = ((hbh0->
length + 1) << 3);
392 entry->
next_hop.as_u64[0] = e2e->id.as_u64[0];
393 entry->
next_hop.as_u64[1] = e2e->id.as_u64[1];
399 e2e_id_offset = (
u8 *) e2e - (
u8 *) hbh0;
414 kv.
key = (
u64) flow_hash << 32 | seq_no;
422 kv.
value = pool_index;
437 ip6_address_t *segments = 0;
438 ip6_address_t *this_seg = 0;
443 clib_memset (this_seg, 0xfe,
sizeof (ip6_address_t));
460 "ioam rewrite cache table",
476 ioam_cache_entry_free (entry);
492 s =
format (s,
"%d: %U:%d to %U:%d seq_no %lu\n",
510 #define IOAM_CACHE_TS_TIMEOUT 1.0 //SYN timeout 1 sec 511 #define IOAM_CACHE_TS_TICK 100e-3 513 #define IOAM_CACHE_TS_TIMEOUT_TICKS IOAM_CACHE_TS_TICK*9 514 #define TIMER_HANDLE_INVALID ((u32) ~0) 542 for (i = 0; i < no_of_threads; i++)
611 for (i = 0; i < no_of_threads; i++)
615 ioam_cache_ts_entry_free (i,
617 cm->error_node_index);
622 tw_timer_wheel_free_16t_2w_512sl (&cm->
timer_wheels[i]);
647 u8 max_responses,
u64 now,
u32 thread_id,
u32 * pool_index)
664 sizeof (ip6_address_t));
666 sizeof (ip6_address_t));
702 if (entry && entry->
hbh)
760 u32 * pool_index,
u8 * thread_id,
u8 response_seen)
764 ioam_e2e_cache_option_t *e2e = 0;
769 if ((
u8 *) e2e < ((
u8 *) hbh0 + ((hbh0->
length + 1) << 3))
774 *thread_id = e2e->pool_id;
799 u32 thread_id = va_arg (*args,
u32);
801 ioam_e2e_id_option_t *e2e = 0;
816 "%d: %U:%d to %U:%d seq_no %u buffer %u %U \n\t\tCreated at %U Received %d\n",
830 "%d: %U:%d to %U:%d seq_no %u Buffer %u \n\t\tCreated at %U Received %d\n",
859 ioam_e2e_cache_option_t *e2e = 0;
860 ioam_e2e_id_option_t *e2e_id = 0;
866 rewrite_len = ((hbh->
length + 1) << 3);
874 e2e = (ioam_e2e_cache_option_t *) (cm->
rewrite + rewrite_len);
877 e2e->hdr.length =
sizeof (ioam_e2e_cache_option_t) -
881 (ioam_e2e_id_option_t *) ((
u8 *) e2e +
sizeof (ioam_e2e_cache_option_t));
886 e2e_id->e2e_type = 1;
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
u8 rewrite_pool_index_offset
ip6_address_t sr_localsid_ts
#define MAX_CACHE_TS_ENTRIES
ioam_cache_entry_t * ioam_rewrite_pool
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
static int ioam_cache_ts_table_init(vlib_main_t *vm)
static ioam_cache_entry_t * ioam_cache_lookup(ip6_header_t *ip0, u16 src_port, u16 dst_port, u32 seq_no)
static void ioam_cache_ts_entry_free(u32 thread_id, ioam_cache_ts_entry_t *entry, u32 node_index)
#define MAX_CACHE_ENTRIES
#define IP_FLOW_HASH_SRC_PORT
#define IOAM_E2E_CACHE_HBH_EXT_LEN
typedef CLIB_PACKED(struct { ip6_hop_by_hop_option_t hdr;u8 e2e_type;u8 reserved[5];ip6_address_t id;}) ioam_e2e_id_option_t
#define TIMER_HANDLE_INVALID
#define IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS
Fixed length block allocator.
#define clib_memcpy_fast(a, b, c)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
#define IP_FLOW_HASH_REVERSE_SRC_DST
static int ioam_cache_table_init(vlib_main_t *vm)
static int ioam_cache_table_destroy(vlib_main_t *vm)
ip6_address_t src_address
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
#define IP_FLOW_HASH_DST_PORT
ip6_address_t dst_address
#define pool_is_free(P, E)
Use free bitmap to query whether given element is free.
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
static void ioam_cache_ts_timer_set(ioam_cache_main_t *cm, ioam_cache_ts_entry_t *entry, u32 interval)
static int ioam_cache_ts_update(u32 thread_id, i32 pool_index, u32 buffer_index, ip6_hop_by_hop_header_t *hbh)
static u8 * format_ioam_cache_ts_entry(u8 *s, va_list *args)
ip6_address_t sr_localsid_cache
static void ioam_cache_sr_rewrite_template_create(void)
vl_api_ip_proto_t protocol
static ioam_cache_entry_t * ioam_cache_entry_cleanup(u32 pool_index)
static int ip6_ioam_ts_cache_set_rewrite(void)
u8 * format_ip6_hop_by_hop_ext_hdr(u8 *s, va_list *args)
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
static int ioam_cache_add(vlib_buffer_t *b0, ip6_header_t *ip0, u16 src_port, u16 dst_port, ip6_hop_by_hop_header_t *hbh0, u32 seq_no)
#define IP_FLOW_HASH_DST_ADDR
#define pool_alloc_aligned(P, N, A)
Allocate N more free elements to pool (general version).
static int ioam_cache_ts_add(ip6_header_t *ip0, u16 src_port, u16 dst_port, u32 seq_no, u8 max_responses, u64 now, u32 thread_id, u32 *pool_index)
#define IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
static u32 ip6_compute_flow_hash_ext(const ip6_header_t *ip, u8 protocol, u16 src_port, u16 dst_port, flow_hash_config_t flow_hash_config)
u32 ip6_reset_ts_hbh_node_index
u64 cpu_time_main_loop_start
#define HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID
tw_timer_wheel_16t_2w_512sl_t * timer_wheels
per thread single-wheel
static void ioam_cache_ts_check_and_send(u32 thread_id, i32 pool_index)
static void ioam_e2e_id_rewrite_handler(ioam_e2e_id_option_t *e2e_option, ip6_address_t *address)
int ip6_ioam_set_rewrite(u8 **rwp, int has_trace_option, int has_pot_option, int has_seqno_option)
vlib_node_registration_t ioam_cache_ts_node
(constructor) VLIB_REGISTER_NODE (ioam_cache_ts_node)
ioam_cache_ts_pool_stats_t * ts_stats
vlib_worker_thread_t * vlib_worker_threads
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
#define IOAM_CACHE_TS_TIMEOUT
u64 lookup_table_nbuckets
ioam_cache_ts_entry_t ** ioam_ts_pool
vlib_node_registration_t ioam_cache_node
(constructor) VLIB_REGISTER_NODE (ioam_cache_node)
static int ip6_ioam_ts_cache_cleanup_rewrite(void)
ip6_hop_by_hop_ioam_main_t ip6_hop_by_hop_ioam_main
#define IOAM_E2E_ID_HBH_EXT_LEN
u32 ip6_hbh_pop_node_index
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
#define pool_put(P, E)
Free an object E in pool P.
ip6_address_t src_address
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
static void ioam_cache_entry_free(ioam_cache_entry_t *entry)
static int ioam_cache_ts_lookup(ip6_header_t *ip0, u8 protocol, u16 src_port, u16 dst_port, u32 seq_no, ip6_hop_by_hop_header_t **hbh, u32 *pool_index, u8 *thread_id, u8 response_seen)
u32 cleanup_process_node_index
u32 expected_to_expire
entry should expire at this clock tick
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
#define pool_free(p)
Free a pool.
The fine-grained event logger allows lightweight, thread-safe event logging at minimum cost...
ip6_address_t dst_address
#define hash_mix64(a0, b0, c0)
sll srl srl sll sra u16x4 i
#define vec_free(V)
Free vector's memory (no header).
static void * ip6_ioam_find_hbh_option(ip6_hop_by_hop_header_t *hbh0, u8 option)
u32 timer_handle
Handle returned from tw_start_timer.
8 octet key, 8 octet key value pair
#define IP_FLOW_HASH_SRC_ADDR
Flow hash configuration.
void expired_cache_ts_timer_callback(u32 *expired_timers)
manual_print typedef address
clib_bihash_8_8_t ioam_rewrite_cache_table
void ioam_cache_ts_timer_node_enable(vlib_main_t *vm, u8 enable)
static u8 * format_ioam_cache_entry(u8 *s, va_list *args)
#define IP_FLOW_HASH_DEFAULT
Default: 5-tuple without the "reverse" bit.
static void ioam_cache_ts_send(u32 thread_id, i32 pool_index)
u32 flow_hash_config_t
A flow hash configuration is a mask of the flow hash options.
#define IOAM_E2E_CACHE_OPTION_RND
struct _vlib_node_registration vlib_node_registration_t
static void ioam_cache_ts_timer_reset(ioam_cache_main_t *cm, ioam_cache_ts_entry_t *entry)
static int ioam_cache_ts_table_destroy(vlib_main_t *vm)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
ip6_hop_by_hop_header_t * hbh
Segment Routing data structures definitions.
#define HBH_OPTION_TYPE_SKIP_UNKNOWN
#define IP_FLOW_HASH_PROTO
static int ioam_cache_ts_entry_cleanup(u32 thread_id, u32 pool_index)
f64 end
end of the time range
u32 ip6_add_from_cache_hbh_node_index
ioam_cache_main_t ioam_cache_main
#define CLIB_CACHE_LINE_BYTES
#define HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID
#define IOAM_E2E_ID_OPTION_RND
#define IOAM_CACHE_TS_TICK
static u8 * ip6_sr_compute_rewrite_string_insert(ip6_address_t *sl)
SR rewrite string computation for SRH insertion (inline)