15 #ifndef __included_ioam_export_h__ 16 #define __included_ioam_export_h__ 80 #define DEFAULT_EXPORT_SIZE (3 * CLIB_CACHE_LINE_BYTES) 85 #define DEFAULT_EXPORT_RECORDS 7 184 for (i = 0; i < no_of_threads; i++)
188 memset (eb, 0,
sizeof (*eb));
202 #define IPFIX_IOAM_EXPORT_ID 272 203 #define IPFIX_VXLAN_IOAM_EXPORT_ID 273 259 udp->
src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
260 udp->
dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
274 DEFAULT_EXPORT_SIZE)));
280 DEFAULT_EXPORT_SIZE)));
286 DEFAULT_EXPORT_SIZE));
329 (sizeof (*ip) +
sizeof (*udp) +
355 #define EXPORT_TIMEOUT (20.0) 356 #define THREAD_PERIOD (30.0) 365 uword *event_data = 0;
368 u32 *vec_buffer_indices = 0;
369 u32 *vec_buffer_to_be_sent = 0;
370 u32 *thread_index = 0;
371 u32 new_pool_index = 0;
378 clib_warning (
"bogus kickoff event received, %d", event_type);
405 if (*em->
lockp[i] == 1)
413 memset (new_eb, 0,
sizeof (*new_eb));
417 vec_add (vec_buffer_indices, &new_pool_index, 1);
430 if (vec_len (thread_index) != 0)
435 for (i = 0; i <
vec_len (thread_index); i++)
437 while (__sync_lock_test_and_set (em->
lockp[thread_index[i]], 1))
441 *em->
lockp[thread_index[
i]] = 0;
445 for (i = 0; i <
vec_len (vec_buffer_to_be_sent); i++)
456 for (i = 0; i <
vec_len (vec_buffer_indices); i++)
469 #define ioam_export_node_common(EM, VM, N, F, HTYPE, L, V, NEXT, FIXUP_FUNC) \ 471 u32 n_left_from, *from, *to_next; \ 472 export_next_t next_index; \ 473 u32 pkts_recorded = 0; \ 474 ioam_export_buffer_t *my_buf = 0; \ 475 vlib_buffer_t *eb0 = 0; \ 477 from = vlib_frame_vector_args (F); \ 478 n_left_from = (F)->n_vectors; \ 479 next_index = (N)->cached_next_index; \ 480 while (__sync_lock_test_and_set ((EM)->lockp[(VM)->cpu_index], 1)); \ 481 my_buf = ioam_export_get_my_buffer (EM, (VM)->cpu_index); \ 482 my_buf->touched_at = vlib_time_now (VM); \ 483 while (n_left_from > 0) \ 485 u32 n_left_to_next; \ 486 vlib_get_next_frame (VM, N, next_index, to_next, n_left_to_next); \ 487 while (n_left_from >= 4 && n_left_to_next >= 2) \ 493 vlib_buffer_t *p0, *p1; \ 494 u32 ip_len0, ip_len1; \ 496 vlib_buffer_t *p2, *p3; \ 497 p2 = vlib_get_buffer (VM, from[2]); \ 498 p3 = vlib_get_buffer (VM, from[3]); \ 499 vlib_prefetch_buffer_header (p2, LOAD); \ 500 vlib_prefetch_buffer_header (p3, LOAD); \ 501 CLIB_PREFETCH (p2->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD); \ 502 CLIB_PREFETCH (p3->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD); \ 504 to_next[0] = bi0 = from[0]; \ 505 to_next[1] = bi1 = from[1]; \ 509 n_left_to_next -= 2; \ 510 p0 = vlib_get_buffer (VM, bi0); \ 511 p1 = vlib_get_buffer (VM, bi1); \ 512 ip0 = vlib_buffer_get_current (p0); \ 513 ip1 = vlib_buffer_get_current (p1); \ 515 clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE); \ 517 clib_net_to_host_u16 (ip1->L) + sizeof (HTYPE); \ 518 ebi0 = my_buf->buffer_index; \ 519 eb0 = vlib_get_buffer (VM, ebi0); \ 520 if (PREDICT_FALSE (eb0 == 0)) \ 523 ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0; \ 525 ip_len1 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len1; \ 526 copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0); \ 527 FIXUP_FUNC(eb0, p0); \ 528 eb0->current_length += DEFAULT_EXPORT_SIZE; \ 529 my_buf->records_in_this_buffer++; \ 530 if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \ 532 ioam_export_send_buffer (EM, VM, my_buf); \ 533 ioam_export_init_buffer (EM, VM, my_buf); \ 535 ebi0 = my_buf->buffer_index; \ 536 eb0 = vlib_get_buffer (VM, ebi0); \ 537 if (PREDICT_FALSE (eb0 == 0)) \ 539 copy3cachelines (eb0->data + eb0->current_length, ip1, ip_len1); \ 540 FIXUP_FUNC(eb0, p1); \ 541 eb0->current_length += DEFAULT_EXPORT_SIZE; \ 542 my_buf->records_in_this_buffer++; \ 543 if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \ 545 ioam_export_send_buffer (EM, VM, my_buf); \ 546 ioam_export_init_buffer (EM, VM, my_buf); \ 548 pkts_recorded += 2; \ 549 if (PREDICT_FALSE (((node)->flags & VLIB_NODE_FLAG_TRACE))) \ 551 if (p0->flags & VLIB_BUFFER_IS_TRACED) \ 553 export_trace_t *t = \ 554 vlib_add_trace (VM, node, p0, sizeof (*t)); \ 556 clib_net_to_host_u32 (ip0->V); \ 557 t->next_index = next0; \ 559 if (p1->flags & VLIB_BUFFER_IS_TRACED) \ 561 export_trace_t *t = \ 562 vlib_add_trace (VM, N, p1, sizeof (*t)); \ 564 clib_net_to_host_u32 (ip1->V); \ 565 t->next_index = next1; \ 569 vlib_validate_buffer_enqueue_x2 (VM, N, next_index, \ 570 to_next, n_left_to_next, \ 571 bi0, bi1, next0, next1); \ 573 while (n_left_from > 0 && n_left_to_next > 0) \ 585 n_left_to_next -= 1; \ 586 p0 = vlib_get_buffer (VM, bi0); \ 587 ip0 = vlib_buffer_get_current (p0); \ 589 clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE); \ 590 ebi0 = my_buf->buffer_index; \ 591 eb0 = vlib_get_buffer (VM, ebi0); \ 592 if (PREDICT_FALSE (eb0 == 0)) \ 595 ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0; \ 596 copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0); \ 597 FIXUP_FUNC(eb0, p0); \ 598 eb0->current_length += DEFAULT_EXPORT_SIZE; \ 599 my_buf->records_in_this_buffer++; \ 600 if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \ 602 ioam_export_send_buffer (EM, VM, my_buf); \ 603 ioam_export_init_buffer (EM, VM, my_buf); \ 605 if (PREDICT_FALSE (((N)->flags & VLIB_NODE_FLAG_TRACE) \ 606 && (p0->flags & VLIB_BUFFER_IS_TRACED))) \ 608 export_trace_t *t = vlib_add_trace (VM, (N), p0, sizeof (*t)); \ 610 clib_net_to_host_u32 (ip0->V); \ 611 t->next_index = next0; \ 613 pkts_recorded += 1; \ 615 vlib_validate_buffer_enqueue_x1 (VM, N, next_index, \ 616 to_next, n_left_to_next, \ 619 vlib_put_next_frame (VM, N, next_index, n_left_to_next); \ 621 vlib_node_increment_counter (VM, export_node.index, \ 622 EXPORT_ERROR_RECORDED, pkts_recorded); \ 623 *(EM)->lockp[(VM)->cpu_index] = 0; \
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
sll srl srl sll sra u16x4 i
#define DEFAULT_EXPORT_RECORDS
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
static void ioam_export_reset_next_node(ioam_export_main_t *em)
Fixed length block allocator.
u32 export_process_node_index
static f64 vlib_time_now(vlib_main_t *vm)
static int ioam_export_send_buffer(ioam_export_main_t *em, vlib_main_t *vm, ioam_export_buffer_t *eb)
static int ioam_export_header_create(ioam_export_main_t *em, ip4_address_t *collector_address, ip4_address_t *src_address)
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
ip4_address_t ipfix_collector
#define vec_pop(V)
Returns last element of a vector and decrements its length.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
static uword ioam_export_process_common(ioam_export_main_t *em, vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f, u32 index)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static int ioam_export_buffer_add_header(ioam_export_main_t *em, vlib_buffer_t *b0)
#define pool_alloc_aligned(P, N, A)
Allocate N more free elements to pool (general version).
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
ioam_export_buffer_t * buffer_pool
ipfix_data_packet_t ipfix
#define VLIB_BUFFER_TOTAL_LENGTH_VALID
struct ioam_export_buffer ioam_export_buffer_t
vlib_worker_thread_t * vlib_worker_threads
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define pool_put(P, E)
Free an object E in pool P.
static void ioam_export_header_cleanup(ioam_export_main_t *em, ip4_address_t *collector_address, ip4_address_t *src_address)
static u32 version_length(u16 length)
static void ioam_export_thread_buffer_free(ioam_export_main_t *em)
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P (general version).
u8 records_in_this_buffer
#define pool_free(p)
Free a pool.
static void ioam_export_set_next_node(ioam_export_main_t *em, u8 *next_node_name)
#define vec_free(V)
Free vector's memory (no header).
ip4_address_t src_address
#define clib_warning(format, args...)
#define clib_memcpy(a, b, c)
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
static ioam_export_buffer_t * ioam_export_get_my_buffer(ioam_export_main_t *em, u32 thread_id)
ethernet_main_t * ethernet_main
static void clib_mem_free(void *p)
static u32 ipfix_set_id_length(u16 set_id, u16 length)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static int ioam_export_init_buffer(ioam_export_main_t *em, vlib_main_t *vm, ioam_export_buffer_t *eb)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
#define ip_csum_update(sum, old, new, type, field)
#define DEFAULT_EXPORT_SIZE
static void * clib_mem_alloc_aligned(uword size, uword align)
static int ioam_export_thread_buffer_init(ioam_export_main_t *em, vlib_main_t *vm)
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u16 ip4_header_checksum(ip4_header_t *i)
static u16 ip_csum_fold(ip_csum_t c)