40 #ifndef included_vlib_buffer_node_h 41 #define included_vlib_buffer_node_h 70 #define vlib_validate_buffer_enqueue_x2(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,next0,next1) \ 72 int enqueue_code = (next0 != next_index) + 2*(next1 != next_index); \ 74 if (PREDICT_FALSE (enqueue_code != 0)) \ 76 switch (enqueue_code) \ 82 n_left_to_next += 1; \ 83 vlib_set_next_frame_buffer (vm, node, next0, bi0); \ 89 n_left_to_next += 1; \ 90 vlib_set_next_frame_buffer (vm, node, next1, bi1); \ 96 n_left_to_next += 2; \ 97 vlib_set_next_frame_buffer (vm, node, next0, bi0); \ 98 vlib_set_next_frame_buffer (vm, node, next1, bi1); \ 101 vlib_put_next_frame (vm, node, next_index, \ 103 next_index = next1; \ 104 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); \ 138 #define vlib_validate_buffer_enqueue_x4(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,bi2,bi3,next0,next1,next2,next3) \ 141 u32 fix_speculation = (next_index ^ next0) | (next_index ^ next1) \ 142 | (next_index ^ next2) | (next_index ^ next3); \ 143 if (PREDICT_FALSE(fix_speculation)) \ 147 n_left_to_next += 4; \ 150 if (next_index == next0) \ 157 vlib_set_next_frame_buffer (vm, node, next0, bi0); \ 159 if (next_index == next1) \ 166 vlib_set_next_frame_buffer (vm, node, next1, bi1); \ 168 if (next_index == next2) \ 175 vlib_set_next_frame_buffer (vm, node, next2, bi2); \ 177 if (next_index == next3) \ 185 vlib_set_next_frame_buffer (vm, node, next3, bi3); \ 188 if (next2 == next3) \ 190 vlib_put_next_frame (vm, node, next_index, n_left_to_next); \ 191 next_index = next3; \ 192 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); \ 218 #define vlib_validate_buffer_enqueue_x1(vm,node,next_index,to_next,n_left_to_next,bi0,next0) \ 220 if (PREDICT_FALSE (next0 != next_index)) \ 222 vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); \ 223 next_index = next0; \ 224 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); \ 228 n_left_to_next -= 1; \ 244 u32 * next0,
u32 * next1),
246 void *opaque1,
uword opaque2,
250 u32 n_left_from, *from, *to_next;
261 while (n_left_from > 0)
267 while (n_left_from >= 4 && n_left_to_next >= 2)
287 pi0 = to_next[0] = from[0];
288 pi1 = to_next[1] = from[1];
297 two_buffers (vm, opaque1, opaque2, p0, p1, &next0, &next1);
300 to_next, n_left_to_next,
301 pi0, pi1, next0, next1);
304 while (n_left_from > 0 && n_left_to_next > 0)
318 one_buffer (vm, opaque1, opaque2, p0, &next0);
321 to_next, n_left_to_next,
335 u32 *to_next, n_left_to_next, max;
338 next_index = nexts[0];
340 max =
clib_min (n_left_to_next, count);
345 if ((nexts[0] != next_index) || n_left_to_next == 0)
348 next_index = nexts[0];
350 max =
clib_min (n_left_to_next, count);
352 #if defined(CLIB_HAVE_VEC512) 353 u16x32 next32 = u16x32_load_unaligned (nexts);
354 next32 = (next32 == u16x32_splat (next32[0]));
357 #elif defined(CLIB_HAVE_VEC256) 358 u16x16 next16 = u16x16_load_unaligned (nexts);
359 next16 = (next16 == u16x16_splat (next16[0]));
362 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK) 363 u16x8 next8 = u16x8_load_unaligned (nexts);
364 next8 = (next8 == u16x8_splat (next8[0]));
371 x |= next_index ^ nexts[1];
372 x |= next_index ^ nexts[2];
373 x |= next_index ^ nexts[3];
374 n_enqueued = (x == 0) ? 4 : 1;
383 #ifdef CLIB_HAVE_VEC512 384 if (n_enqueued >= 32)
390 n_left_to_next -= 32;
397 #ifdef CLIB_HAVE_VEC256 398 if (n_enqueued >= 16)
404 n_left_to_next -= 16;
411 #ifdef CLIB_HAVE_VEC128 438 to_next[0] = buffers[0];
456 u32 *to_next, n_left_to_next, n_enq;
463 n_left_to_next -=
count;
468 n_enq = n_left_to_next;
471 n_left_to_next -= n_enq;
480 n_enq =
clib_min (n_left_to_next, count);
488 u32 * buffer_indices,
u16 * thread_indices,
489 u32 n_packets,
int drop_on_congestion)
494 u32 n_left = n_packets;
497 u32 n_left_to_next_thread = 0, *to_next_thread = 0;
498 u32 next_thread_index, current_thread_index = ~0;
506 next_thread_index = thread_indices[0];
508 if (next_thread_index != current_thread_index)
510 if (drop_on_congestion &&
515 dbi[0] = buffer_indices[0];
531 current_thread_index = next_thread_index;
534 to_next_thread[0] = buffer_indices[0];
536 n_left_to_next_thread--;
538 if (n_left_to_next_thread == 0)
542 current_thread_index = ~0;
579 if (drop_on_congestion && n_drop)
582 return n_packets - n_drop;
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
u16x16 u64x4 static_always_inline u32 u8x32_msb_mask(u8x32 v)
u32 buffer_index[VLIB_FRAME_SIZE]
vlib_main_t ** vlib_mains
static vlib_frame_queue_t * is_vlib_frame_queue_congested(u32 frame_queue_index, u32 index, u32 queue_hi_thresh, vlib_frame_queue_t **handoff_queue_by_worker_index)
#define count_trailing_zeros(x)
volatile uword check_frame_queues
#define static_always_inline
vlib_frame_queue_elt_t ** handoff_queue_elt_by_thread_index
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static_always_inline void vlib_buffer_enqueue_to_single_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index, u32 count)
static vlib_frame_queue_elt_t * vlib_get_worker_handoff_queue_elt(u32 frame_queue_index, u32 vlib_worker_index, vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index)
static_always_inline u16 u8x16_msb_mask(u8x16 v)
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
#define CLIB_PREFETCH(addr, size, type)
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_frame_queue_per_thread_data_t * per_thread_data
vlib_frame_queue_t ** congested_handoff_queue_by_thread_index
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
vlib_frame_queue_main_t * frame_queue_mains
foreach_avx512_vec512i foreach_avx512_vec512u static_always_inline u32 u16x32_msb_mask(u16x32 v)
static uword generic_buffer_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, uword sizeof_trace, void *opaque1, uword opaque2, void(*two_buffers)(vlib_main_t *vm, void *opaque1, uword opaque2, vlib_buffer_t *b0, vlib_buffer_t *b1, u32 *next0, u32 *next1), void(*one_buffer)(vlib_main_t *vm, void *opaque1, uword opaque2, vlib_buffer_t *b0, u32 *next0))
void vlib_trace_frame_buffers_only(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, uword n_buffers, uword next_buffer_stride, uword n_buffer_data_bytes_in_trace)
static_always_inline void vlib_buffer_copy_indices(u32 *dst, u32 *src, u32 n_indices)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static void vlib_put_frame_queue_elt(vlib_frame_queue_elt_t *hf)
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
static vlib_thread_main_t * vlib_get_thread_main()
u16 flags
Copy of main node flags.
#define VLIB_NODE_FLAG_TRACE
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.