24 #define foreach_vxlan_gpe_pop_ioam_v4_error \ 25 _(POPPED, "good packets popped") 28 #define _(sym,string) string, 35 #define _(sym,str) VXLAN_GPE_POP_IOAM_V4_ERROR_##sym, 63 s =
format (s,
"VXLAN_GPE_IOAM_POP: next_index %d len %d traced %d",
81 s = (*hm->
trace[type0]) (s, opt0);
86 format (s,
"\n unrecognized option %d length %d", type0,
152 if (gpe_ioam0->
length > clib_net_to_host_u16 (ip0->
length))
154 *next0 = VXLAN_GPE_INPUT_NEXT_DROP;
159 while (opt0 < limit0)
175 *next0 = VXLAN_GPE_INPUT_NEXT_DROP;
190 decap_next_node_list[gpe_ioam0->
protocol] : VXLAN_GPE_INPUT_NEXT_DROP;
233 u32 n_left_from, next_index, *from, *to_next;
241 while (n_left_from > 0)
247 while (n_left_from >= 4 && n_left_to_next >= 2)
284 n_left_to_next, bi0, bi1, next0,
288 while (n_left_from > 0 && n_left_to_next > 0)
307 n_left_to_next, bi0, next0);
327 .name =
"vxlan-gpe-pop-ioam-v4",
328 .vector_size =
sizeof (
u32),
338 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n, u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
#define clib_memcpy_fast(a, b, c)
u8 * format_vxlan_gpe_pop_ioam_v4_trace(u8 *s, va_list *args)
int(* pop_options[256])(ip4_header_t *ip, vxlan_gpe_ioam_option_t *opt)
static void vxlan_gpe_ioam_pop_v4(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b0)
u8 protocol
see vxlan_gpe_protocol_t
static void vxlan_gpe_pop_ioam_v4_one_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vxlan_gpe_main_t *ngm, vlib_buffer_t *b0, u32 *next0)
description fragment has unexpected format
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main
vl_api_fib_path_type_t type
#define foreach_vxlan_gpe_pop_ioam_v4_error
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
vxlan_gpe_main_t vxlan_gpe_main
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
#define VLIB_REGISTER_NODE(x,...)
#define CLIB_PREFETCH(addr, size, type)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_main_t vlib_node_runtime_t * node
#define foreach_vxlan_gpe_input_next
next nodes for VXLAN GPE input
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Struct for VXLAN GPE node state.
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
static uword vxlan_gpe_pop_ioam_v4(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
vlib_node_registration_t vxlan_gpe_pop_ioam_v4_node
(constructor) VLIB_REGISTER_NODE (vxlan_gpe_pop_ioam_v4_node)
static uword vxlan_gpe_pop_ioam(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u8 is_ipv6)
VLIB buffer representation.
VXLAN GPE Extension (iOAM) Header definition.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static char * vxlan_gpe_pop_ioam_v4_error_strings[]
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
vxlan_gpe_pop_ioam_v4_error_t
static void vxlan_gpe_pop_ioam_v4_two_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vxlan_gpe_main_t *ngm, vlib_buffer_t *b0, vlib_buffer_t *b1, u32 *next0, u32 *next1)
#define CLIB_CACHE_LINE_BYTES
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
u8 *(* trace[256])(u8 *s, vxlan_gpe_ioam_option_t *opt)