|
FD.io VPP
v21.10.1-2-g0a485f517
Vector Packet Processing
|
Go to the documentation of this file.
24 #define F_INVALID_CPTR (fs_sptr_t) ~0ULL
33 &&
f_pos_lt (tail_idx,
c->start_byte +
c->length));
35 tail_idx -=
c->start_byte;
36 n_chunk =
c->length - tail_idx;
42 while ((to_copy -= n_chunk))
64 &&
f_pos_lt (head_idx,
c->start_byte +
c->length));
66 head_idx -=
c->start_byte;
67 n_chunk =
c->length - head_idx;
73 while ((to_copy -= n_chunk))
90 #ifndef CLIB_MARCH_VARIANT
169 f->ooos_list_head = cur->
next;
183 u32 new_index, s_end_pos, s_index;
184 u32 offset_pos, offset_end_pos;
188 offset_pos = tail +
offset;
196 f->ooos_list_head = s -
f->ooo_segments;
197 f->ooos_newest =
f->ooos_list_head;
220 s_index = s -
f->ooo_segments;
227 new_index = new_s -
f->ooo_segments;
235 prev->
next = new_index;
240 f->ooos_list_head = new_index;
243 new_s->
next = s_index;
245 f->ooos_newest = new_index;
249 else if (
f_pos_gt (offset_pos, s_end_pos))
252 new_index = new_s -
f->ooo_segments;
260 new_s->
prev = s_index;
262 f->ooos_newest = new_index;
274 s->
start = offset_pos;
276 f->ooos_newest = s -
f->ooo_segments;
282 if (
f_pos_gt (offset_end_pos, s_end_pos))
301 f->ooos_newest = s -
f->ooo_segments;
312 u32 s_index, bytes = 0;
317 diff = *tail - s->
start;
319 ASSERT (diff != n_bytes_enqueued);
321 if (diff > n_bytes_enqueued)
325 while (0 <= diff && diff < n_bytes_enqueued)
327 s_index = s -
f->ooo_segments;
333 *tail = *tail + bytes;
342 diff = *tail - s->
start;
353 ASSERT (bytes <= f->shr->size);
381 f->shr->head =
f->shr->tail =
f->
flags = 0;
382 f->shr->head_chunk =
f->shr->tail_chunk =
f->shr->start_chunk;
383 f->ooo_deq =
f->ooo_enq = 0;
385 min_alloc =
size > 32 << 10 ?
size >> 3 : 4096;
386 min_alloc =
clib_min (min_alloc, 64 << 10);
387 f->shr->min_alloc = min_alloc;
428 u32 rounded_data_size;
439 rounded_data_size = (1 << (
max_log2 (data_size_in_bytes)));
450 c->length = data_size_in_bytes;
453 f->shr->start_chunk =
f->shr->end_chunk =
f_csptr (
f,
c);
475 c->length = rounded_size;
516 u32 head, tail, end_chunk;
521 if (!
f->shr->head_chunk)
530 return f_pos_lt (end_chunk, tail) ? end_chunk - head : tail - head;
544 return tail_chunk ?
f_chunk_end (tail_chunk) - tail : 0;
556 while (pos != cur->
key)
772 if (--
f->refcnt == 0)
784 u32 head, tail, head_idx;
787 ASSERT (len <= f->shr->size);
791 if (!
f->shr->head_chunk)
795 head_idx = head -
c->start_byte;
796 n_chunk =
c->length - head_idx;
812 u32 alloc_size, free_alloced;
817 alloc_size =
clib_min (
f->shr->min_alloc,
f->shr->size - (tail - head));
818 alloc_size =
clib_max (alloc_size,
len - free_alloced);
840 if (!
f->shr->tail_chunk)
849 u32 tail, head, free_count;
908 u32 tail, head, free_count, enq_pos;
973 u32 n_segs,
u8 allow_partial)
975 u32 tail, head, free_count,
len = 0,
i;
988 for (
i = 0;
i < n_segs;
i++)
1004 for (
i = 0;
i < n_segs;
i++)
1007 segs[
i].
len, &
f->shr->tail_chunk);
1008 tail += segs[
i].
len;
1030 to_copy, &
f->shr->tail_chunk);
1042 f->shr->tail_chunk =
1061 rt = &
f->ooo_deq_lookup;
1107 u32 tail, head, cursize;
1119 if (!
f->shr->head_chunk)
1123 &
f->shr->head_chunk);
1143 u32 tail, head, cursize, head_idx;
1155 head_idx = head +
offset;
1170 u32 total_drop_bytes, tail, head, cursize;
1185 head = head + total_drop_bytes;
1192 f->shr->start_chunk :
1199 return total_drop_bytes;
1216 f->shr->head_chunk =
1247 u32 head, tail, n_avail, head_pos,
n_bytes, fs_index = 1, clen;
1264 head_pos = (tail -
c->start_byte);
1265 fs[0].
data =
c->data + head_pos;
1273 fs[fs_index].
data =
c->data;
1274 fs[fs_index].
len = clen;
1284 u32 n_segs,
u32 max_bytes)
1286 u32 cursize, to_read, head, tail, fs_index = 1;
1304 if (!
f->shr->head_chunk)
1312 head_pos = start -
c->start_byte;
1313 fs[0].
data =
c->data + head_pos;
1317 while (
n_bytes < to_read && fs_index < n_segs)
1321 fs[fs_index].
data =
c->data;
1391 f->shr->subscribers[
f->shr->n_subscribers++] = subscriber;
1399 for (
i = 0;
i <
f->shr->n_subscribers;
i++)
1401 if (
f->shr->subscribers[
i] != subscriber)
1403 f->shr->subscribers[
i] =
f->shr->subscribers[
f->shr->n_subscribers - 1];
1404 f->shr->n_subscribers--;
1414 if (
f->shr->head_chunk &&
1417 if (
f->shr->tail_chunk &&
1430 f->ooo_deq->start_byte);
1434 if (
tmp !=
f->ooo_deq)
1447 f->ooo_enq->start_byte);
1452 f->ooo_enq->start_byte);
1454 if (
tmp !=
f->ooo_enq)
1461 u32 chunks_bytes = 0;
1491 chunks_bytes +=
c->length;
1497 if (chunks_bytes < f->shr->tail -
f->shr->head)
1525 s =
format (s,
"[%u, %u], len %u, next %d, prev %d", seg->
start,
1581 for (
i = 0;
i < trace_len;
i++)
1617 u32 indent = va_arg (*args,
u32);
1618 u32 ooo_segment_index =
f->ooos_list_head;
1626 ooo_segment_index = seg->
next;
1636 int verbose = va_arg (*args,
int);
1643 s =
format (s,
"cursize %u nitems %u has_event %d min_alloc %u\n",
1647 indent,
f->shr->head,
f->shr->tail,
f->segment_manager);
1650 s =
format (s,
"%Uvpp session %d thread %d app session %d thread %d\n",
1652 f->master_thread_index,
f->shr->client_session_index,
1653 f->client_thread_index);
1657 s =
format (s,
"%Uooo pool %d active elts newest %u\n",
__clib_export u32 rb_tree_n_nodes(rb_tree_t *rt)
static svm_fifo_chunk_t * f_find_chunk_rbtree(rb_tree_t *rt, u32 pos)
svm_fifo_chunk_t * fsh_alloc_chunk(fifo_segment_header_t *fsh, u32 slice_index, u32 chunk_size)
Allocate chunks in fifo segment.
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
static rb_node_t * f_find_node_rbtree(rb_tree_t *rt, u32 pos)
int svm_fifo_segments(svm_fifo_t *f, u32 offset, svm_fifo_seg_t *fs, u32 n_segs, u32 max_bytes)
Get pointers to fifo chunks data in svm_fifo_seg_t array.
static void f_load_head_tail_prod(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for producer.
static rb_node_t * rb_node(rb_tree_t *rt, rb_node_index_t ri)
u8 * format_ooo_segment(u8 *s, va_list *args)
#define RBTREE_TNIL_INDEX
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, u32 offset, u32 len, u8 *src)
Enqueue a future segment.
static uword pointer_to_uword(const void *p)
static rb_node_t * rb_node_left(rb_tree_t *rt, rb_node_t *n)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
int svm_fifo_peek(svm_fifo_t *f, u32 offset, u32 len, u8 *dst)
Peek data from fifo.
void svm_fifo_add_subscriber(svm_fifo_t *f, u8 subscriber)
Add io events subscriber to list.
static void clib_mem_free(void *p)
u8 * svm_fifo_dump_trace(u8 *s, svm_fifo_t *f)
#define svm_fifo_trace_add(_f, _s, _l, _t)
int svm_fifo_fill_chunk_list(svm_fifo_t *f)
Ensure the whole fifo size is writeable.
static void f_update_ooo_deq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
u32 svm_fifo_max_read_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be read.
static void ooo_segment_free(svm_fifo_t *f, u32 index)
int svm_fifo_provision_chunks(svm_fifo_t *f, svm_fifo_seg_t *fs, u32 n_segs, u32 len)
Provision and return chunks for number of bytes requested.
#define CLIB_MARCH_FN_SELECT(fn)
u8 svm_fifo_is_sane(svm_fifo_t *f)
Check if fifo is sane.
u32 start_byte
chunk start byte
#define pool_put(P, E)
Free an object E in pool P.
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
CLIB_MARCH_FN(svm_fifo_copy_to_chunk, void, svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, fs_sptr_t *last)
static svm_fifo_chunk_t * f_tail_cptr(svm_fifo_t *f)
svm_fifo_t * svm_fifo_alloc(u32 data_size_in_bytes)
Creates a fifo in the current heap.
static void f_update_ooo_enq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
u32 next
Next linked-list element pool index.
static int f_pos_lt(u32 a, u32 b)
static void f_load_head_tail_cons(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for consumer.
#define FS_MIN_LOG2_CHUNK_SZ
also min fifo size
rb_node_index_t enq_rb_index
enq node index if chunk in rbtree
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
static ooo_segment_t * ooo_segment_alloc(svm_fifo_t *f, u32 start, u32 length)
void fsh_collect_chunks(fifo_segment_header_t *fsh, u32 slice_index, svm_fifo_chunk_t *c)
Return chunks to fifo segment.
u32 length
Length of segment.
static u32 ooo_segment_end_pos(ooo_segment_t *s)
struct _svm_fifo svm_fifo_t
static uword max_log2(uword x)
static u8 svm_fifo_has_ooo_data(svm_fifo_t *f)
Check if fifo has out-of-order data.
u8 * svm_fifo_replay(u8 *s, svm_fifo_t *f, u8 no_read, u8 verbose)
void svm_fifo_free_ooo_data(svm_fifo_t *f)
Cleanup fifo ooo data.
#define SVM_FIFO_MAX_EVT_SUBSCRIBERS
u8 * format_ooo_list(u8 *s, va_list *args)
void svm_fifo_overwrite_head(svm_fifo_t *f, u8 *src, u32 len)
Overwrite fifo head with new data.
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
uword opaque
value stored by node
static svm_fifo_chunk_t * f_lookup_clear_deq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
int svm_fifo_dequeue(svm_fifo_t *f, u32 len, u8 *dst)
Dequeue data from fifo.
void svm_fifo_enqueue_nocopy(svm_fifo_t *f, u32 len)
Advance tail.
struct clib_bihash_value offset
template key/value backing page structure
u32 start
Start of segment, normalized.
static svm_fifo_chunk_t * svm_fifo_find_next_chunk(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 pos)
__clib_export rb_node_t * rb_tree_predecessor(rb_tree_t *rt, rb_node_t *x)
static svm_fifo_chunk_t * svm_fifo_find_chunk(svm_fifo_t *f, u32 pos)
Find chunk for given byte position.
u8 * format_svm_fifo(u8 *s, va_list *args)
static heap_elt_t * last(heap_header_t *h)
int svm_fifo_enqueue_segments(svm_fifo_t *f, const svm_fifo_seg_t segs[], u32 n_segs, u8 allow_partial)
Enqueue array of svm_fifo_seg_t in order.
rb_node_index_t deq_rb_index
deq node index if chunk in rbtree
static int f_pos_gt(u32 a, u32 b)
__clib_export int rb_tree_is_init(rb_tree_t *rt)
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 len)
Dequeue and drop bytes from fifo.
int svm_fifo_enqueue(svm_fifo_t *f, u32 len, const u8 *src)
Enqueue data to fifo.
static svm_fifo_chunk_t * f_end_cptr(svm_fifo_t *f)
u32 svm_fifo_max_write_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be written.
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
#define OOO_SEGMENT_INVALID_INDEX
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment)
void svm_fifo_init_pointers(svm_fifo_t *f, u32 head, u32 tail)
Set fifo pointers to requested offset.
save_rewrite_length must be aligned so that reass doesn t overwrite it
fs_sptr_t next
pointer to next chunk in linked-lists
static void svm_fifo_copy_to_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, fs_sptr_t *last)
static void ooo_segment_add(svm_fifo_t *f, u32 offset, u32 head, u32 tail, u32 length)
Add segment to fifo's out-of-order segment list.
#define CLIB_CACHE_LINE_BYTES
void svm_fifo_init(svm_fifo_t *f, u32 size)
Initialize fifo.
static int f_pos_geq(u32 a, u32 b)
void svm_fifo_dequeue_drop_all(svm_fifo_t *f)
Drop all data from fifo.
void svm_fifo_init_ooo_lookup(svm_fifo_t *f, u8 ooo_type)
Initialize rbtrees used for ooo lookups.
static void svm_fifo_copy_from_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 head_idx, u8 *dst, u32 len, fs_sptr_t *last)
static svm_fifo_chunk_t * f_cptr(svm_fifo_t *f, fs_sptr_t cp)
u32 length
length of chunk in bytes
u32 svm_fifo_n_ooo_segments(svm_fifo_t *f)
Number of out-of-order segments for fifo.
static int f_try_chunk_alloc(svm_fifo_t *f, u32 head, u32 tail, u32 len)
static svm_fifo_chunk_t * f_unlink_chunks(svm_fifo_t *f, u32 end_pos, u8 maybe_ooo)
template key/value backing page structure
static int f_pos_leq(u32 a, u32 b)
static int ooo_segment_try_collect(svm_fifo_t *f, u32 n_bytes_enqueued, u32 *tail)
Removes segments that can now be enqueued because the fifo's tail has advanced.
static svm_fifo_chunk_t * f_start_cptr(svm_fifo_t *f)
u8 data[0]
start of chunk data
static u32 svm_fifo_max_enqueue_prod(svm_fifo_t *f)
Maximum number of bytes that can be enqueued into fifo.
u32 prev
Previous linked-list element pool index.
description fragment has unexpected format
void svm_fifo_free_chunk_lookup(svm_fifo_t *f)
Cleanup fifo chunk lookup rb tree.
static svm_fifo_chunk_t * f_head_cptr(svm_fifo_t *f)
#define SVM_FIFO_INVALID_INDEX
__clib_export void rb_tree_init(rb_tree_t *rt)
__clib_export rb_node_index_t rb_tree_add_custom(rb_tree_t *rt, u32 key, uword opaque, rb_tree_lt_fn ltfn)
void svm_fifo_free(svm_fifo_t *f)
Free fifo and associated state.
#define CLIB_MEM_UNPOISON(a, s)
void svm_fifo_clone(svm_fifo_t *df, svm_fifo_t *sf)
Clones fifo.
#define vec_foreach(var, vec)
Vector iterator.
u32 svm_fifo_n_chunks(svm_fifo_t *f)
Number of chunks linked into the fifo.
static uword pool_elts(void *v)
Number of active elements in a pool.
__clib_export void rb_tree_del_node(rb_tree_t *rt, rb_node_t *z)
void svm_fifo_del_subscriber(svm_fifo_t *f, u8 subscriber)
Remove io events subscriber form list.
static void f_load_head_tail_all_acq(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail independent of producer/consumer role.
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static u32 f_cursize(svm_fifo_t *f, u32 head, u32 tail)
Fifo current size, i.e., number of bytes enqueued.
ooo_segment_t * svm_fifo_first_ooo_segment(svm_fifo_t *f)
First out-of-order segment for fifo.
static void f_csptr_link(svm_fifo_t *f, fs_sptr_t cp, svm_fifo_chunk_t *c)
vnet_interface_output_runtime_t * rt
static u32 f_chunk_end(svm_fifo_chunk_t *c)
__clib_export void rb_tree_free_nodes(rb_tree_t *rt)
#define uword_to_pointer(u, type)
#define pool_free(p)
Free a pool.
static rb_node_t * rb_node_right(rb_tree_t *rt, rb_node_t *n)
static ooo_segment_t * ooo_segment_next(svm_fifo_t *f, ooo_segment_t *s)
static ooo_segment_t * ooo_segment_prev(svm_fifo_t *f, ooo_segment_t *s)
static __clib_unused ooo_segment_t * ooo_segment_last(svm_fifo_t *f)
static svm_fifo_chunk_t * f_lookup_clear_enq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
static u8 f_chunk_includes_pos(svm_fifo_chunk_t *c, u32 pos)
static u32 f_free_count(svm_fifo_t *f, u32 head, u32 tail)
Fifo free bytes, i.e., number of free bytes.
svm_fifo_chunk_t * svm_fifo_chunk_alloc(u32 size)
Creates a fifo chunk in the current heap.
#define clib_atomic_store_rel_n(a, b)
vl_api_mac_event_action_t action
static u8 rb_node_is_tnil(rb_tree_t *rt, rb_node_t *n)
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
Fifo max bytes to dequeue.
static fs_sptr_t f_csptr(svm_fifo_t *f, svm_fifo_chunk_t *c)