15 #ifndef __included_ioam_export_h__ 16 #define __included_ioam_export_h__ 82 #define DEFAULT_EXPORT_SIZE (3 * CLIB_CACHE_LINE_BYTES) 87 #define DEFAULT_EXPORT_RECORDS 7 123 b0->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
186 for (i = 0; i < no_of_threads; i++)
190 memset (eb, 0,
sizeof (*eb));
204 #define IPFIX_IOAM_EXPORT_ID 272 205 #define IPFIX_VXLAN_IOAM_EXPORT_ID 273 261 udp->
src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
262 udp->
dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
276 DEFAULT_EXPORT_SIZE)));
282 DEFAULT_EXPORT_SIZE)));
288 DEFAULT_EXPORT_SIZE));
331 (sizeof (*ip) +
sizeof (*udp) +
357 #define EXPORT_TIMEOUT (20.0) 358 #define THREAD_PERIOD (30.0) 367 uword *event_data = 0;
370 u32 *vec_buffer_indices = 0;
371 u32 *vec_buffer_to_be_sent = 0;
372 u32 *thread_index = 0;
373 u32 new_pool_index = 0;
380 clib_warning (
"bogus kickoff event received, %d", event_type);
407 if (*em->
lockp[i] == 1)
415 memset (new_eb, 0,
sizeof (*new_eb));
419 vec_add (vec_buffer_indices, &new_pool_index, 1);
432 if (vec_len (thread_index) != 0)
437 for (i = 0; i <
vec_len (thread_index); i++)
439 while (__sync_lock_test_and_set (em->
lockp[thread_index[i]], 1))
443 *em->
lockp[thread_index[
i]] = 0;
447 for (i = 0; i <
vec_len (vec_buffer_to_be_sent); i++)
458 for (i = 0; i <
vec_len (vec_buffer_indices); i++)
471 #define ioam_export_node_common(EM, VM, N, F, HTYPE, L, V, NEXT, FIXUP_FUNC) \ 473 u32 n_left_from, *from, *to_next; \ 474 export_next_t next_index; \ 475 u32 pkts_recorded = 0; \ 476 ioam_export_buffer_t *my_buf = 0; \ 477 vlib_buffer_t *eb0 = 0; \ 479 from = vlib_frame_vector_args (F); \ 480 n_left_from = (F)->n_vectors; \ 481 next_index = (N)->cached_next_index; \ 482 while (__sync_lock_test_and_set ((EM)->lockp[(VM)->thread_index], 1)); \ 483 my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index); \ 484 my_buf->touched_at = vlib_time_now (VM); \ 485 while (n_left_from > 0) \ 487 u32 n_left_to_next; \ 488 vlib_get_next_frame (VM, N, next_index, to_next, n_left_to_next); \ 489 while (n_left_from >= 4 && n_left_to_next >= 2) \ 495 vlib_buffer_t *p0, *p1; \ 496 u32 ip_len0, ip_len1; \ 498 vlib_buffer_t *p2, *p3; \ 499 p2 = vlib_get_buffer (VM, from[2]); \ 500 p3 = vlib_get_buffer (VM, from[3]); \ 501 vlib_prefetch_buffer_header (p2, LOAD); \ 502 vlib_prefetch_buffer_header (p3, LOAD); \ 503 CLIB_PREFETCH (p2->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD); \ 504 CLIB_PREFETCH (p3->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD); \ 506 to_next[0] = bi0 = from[0]; \ 507 to_next[1] = bi1 = from[1]; \ 511 n_left_to_next -= 2; \ 512 p0 = vlib_get_buffer (VM, bi0); \ 513 p1 = vlib_get_buffer (VM, bi1); \ 514 ip0 = vlib_buffer_get_current (p0); \ 515 ip1 = vlib_buffer_get_current (p1); \ 517 clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE); \ 519 clib_net_to_host_u16 (ip1->L) + sizeof (HTYPE); \ 520 ebi0 = my_buf->buffer_index; \ 521 eb0 = vlib_get_buffer (VM, ebi0); \ 522 if (PREDICT_FALSE (eb0 == 0)) \ 525 ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0; \ 527 ip_len1 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len1; \ 528 copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0); \ 529 FIXUP_FUNC(eb0, p0); \ 530 eb0->current_length += DEFAULT_EXPORT_SIZE; \ 531 my_buf->records_in_this_buffer++; \ 532 if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \ 534 ioam_export_send_buffer (EM, VM, my_buf); \ 535 ioam_export_init_buffer (EM, VM, my_buf); \ 537 ebi0 = my_buf->buffer_index; \ 538 eb0 = vlib_get_buffer (VM, ebi0); \ 539 if (PREDICT_FALSE (eb0 == 0)) \ 541 copy3cachelines (eb0->data + eb0->current_length, ip1, ip_len1); \ 542 FIXUP_FUNC(eb0, p1); \ 543 eb0->current_length += DEFAULT_EXPORT_SIZE; \ 544 my_buf->records_in_this_buffer++; \ 545 if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \ 547 ioam_export_send_buffer (EM, VM, my_buf); \ 548 ioam_export_init_buffer (EM, VM, my_buf); \ 550 pkts_recorded += 2; \ 551 if (PREDICT_FALSE (((node)->flags & VLIB_NODE_FLAG_TRACE))) \ 553 if (p0->flags & VLIB_BUFFER_IS_TRACED) \ 555 export_trace_t *t = \ 556 vlib_add_trace (VM, node, p0, sizeof (*t)); \ 558 clib_net_to_host_u32 (ip0->V); \ 559 t->next_index = next0; \ 561 if (p1->flags & VLIB_BUFFER_IS_TRACED) \ 563 export_trace_t *t = \ 564 vlib_add_trace (VM, N, p1, sizeof (*t)); \ 566 clib_net_to_host_u32 (ip1->V); \ 567 t->next_index = next1; \ 571 vlib_validate_buffer_enqueue_x2 (VM, N, next_index, \ 572 to_next, n_left_to_next, \ 573 bi0, bi1, next0, next1); \ 575 while (n_left_from > 0 && n_left_to_next > 0) \ 587 n_left_to_next -= 1; \ 588 p0 = vlib_get_buffer (VM, bi0); \ 589 ip0 = vlib_buffer_get_current (p0); \ 591 clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE); \ 592 ebi0 = my_buf->buffer_index; \ 593 eb0 = vlib_get_buffer (VM, ebi0); \ 594 if (PREDICT_FALSE (eb0 == 0)) \ 597 ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0; \ 598 copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0); \ 599 FIXUP_FUNC(eb0, p0); \ 600 eb0->current_length += DEFAULT_EXPORT_SIZE; \ 601 my_buf->records_in_this_buffer++; \ 602 if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \ 604 ioam_export_send_buffer (EM, VM, my_buf); \ 605 ioam_export_init_buffer (EM, VM, my_buf); \ 607 if (PREDICT_FALSE (((N)->flags & VLIB_NODE_FLAG_TRACE) \ 608 && (p0->flags & VLIB_BUFFER_IS_TRACED))) \ 610 export_trace_t *t = vlib_add_trace (VM, (N), p0, sizeof (*t)); \ 612 clib_net_to_host_u32 (ip0->V); \ 613 t->next_index = next0; \ 615 pkts_recorded += 1; \ 617 vlib_validate_buffer_enqueue_x1 (VM, N, next_index, \ 618 to_next, n_left_to_next, \ 621 vlib_put_next_frame (VM, N, next_index, n_left_to_next); \ 623 vlib_node_increment_counter (VM, export_node.index, \ 624 EXPORT_ERROR_RECORDED, pkts_recorded); \ 625 *(EM)->lockp[(VM)->thread_index] = 0; \
#define DEFAULT_EXPORT_RECORDS
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
CLIB_CACHE_LINE_ALIGN_MARK(cacheline0)
Required for pool_get_aligned.
static void ioam_export_reset_next_node(ioam_export_main_t *em)
Fixed length block allocator.
u32 export_process_node_index
static f64 vlib_time_now(vlib_main_t *vm)
static int ioam_export_send_buffer(ioam_export_main_t *em, vlib_main_t *vm, ioam_export_buffer_t *eb)
static int ioam_export_header_create(ioam_export_main_t *em, ip4_address_t *collector_address, ip4_address_t *src_address)
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
ip4_address_t ipfix_collector
#define vec_pop(V)
Returns last element of a vector and decrements its length.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
static uword ioam_export_process_common(ioam_export_main_t *em, vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f, u32 index)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static int ioam_export_buffer_add_header(ioam_export_main_t *em, vlib_buffer_t *b0)
#define pool_alloc_aligned(P, N, A)
Allocate N more free elements to pool (general version).
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
ioam_export_buffer_t * buffer_pool
ipfix_data_packet_t ipfix
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
struct ioam_export_buffer ioam_export_buffer_t
vlib_worker_thread_t * vlib_worker_threads
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
u16 current_length
Nbytes between current data and the end of this buffer.
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define pool_put(P, E)
Free an object E in pool P.
static void ioam_export_header_cleanup(ioam_export_main_t *em, ip4_address_t *collector_address, ip4_address_t *src_address)
static u32 version_length(u16 length)
static void ioam_export_thread_buffer_free(ioam_export_main_t *em)
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P (general version).
u8 records_in_this_buffer
#define pool_free(p)
Free a pool.
The fine-grained event logger allows lightweight, thread-safe event logging at minimum cost...
static void ioam_export_set_next_node(ioam_export_main_t *em, u8 *next_node_name)
#define vec_free(V)
Free vector's memory (no header).
ip4_address_t src_address
#define clib_warning(format, args...)
#define clib_memcpy(a, b, c)
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
static ioam_export_buffer_t * ioam_export_get_my_buffer(ioam_export_main_t *em, u32 thread_id)
ethernet_main_t * ethernet_main
static void clib_mem_free(void *p)
static u32 ipfix_set_id_length(u16 set_id, u16 length)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static int ioam_export_init_buffer(ioam_export_main_t *em, vlib_main_t *vm, ioam_export_buffer_t *eb)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
#define ip_csum_update(sum, old, new, type, field)
#define DEFAULT_EXPORT_SIZE
static void * clib_mem_alloc_aligned(uword size, uword align)
static int ioam_export_thread_buffer_init(ioam_export_main_t *em, vlib_main_t *vm)
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u16 ip4_header_checksum(ip4_header_t *i)
static u16 ip_csum_fold(ip_csum_t c)