34 n_chunk = c->
length - tail_idx;
40 while ((to_copy -= n_chunk))
65 n_chunk = c->
length - head_idx;
71 while ((to_copy -= n_chunk))
88 #ifndef CLIB_MARCH_VARIANT 157 next->prev = cur->
prev;
167 f->ooos_list_head = cur->
next;
181 u32 new_index, s_end_pos, s_index;
182 u32 offset_pos, offset_end_pos;
186 offset_pos = tail +
offset;
187 offset_end_pos = tail + offset +
length;
194 f->ooos_list_head = s - f->ooo_segments;
195 f->ooos_newest = f->ooos_list_head;
218 s_index = s - f->ooo_segments;
225 new_index = new_s - f->ooo_segments;
233 prev->
next = new_index;
238 f->ooos_list_head = new_index;
241 new_s->
next = s_index;
243 f->ooos_newest = new_index;
247 else if (
f_pos_gt (offset_pos, s_end_pos))
250 new_index = new_s - f->ooo_segments;
258 new_s->
prev = s_index;
260 f->ooos_newest = new_index;
272 s->
start = offset_pos;
274 f->ooos_newest = s - f->ooo_segments;
280 if (
f_pos_gt (offset_end_pos, s_end_pos))
299 f->ooos_newest = s - f->ooo_segments;
310 u32 s_index, bytes = 0;
315 diff = *tail - s->
start;
317 ASSERT (diff != n_bytes_enqueued);
319 if (diff > n_bytes_enqueued)
323 while (0 <= diff && diff < n_bytes_enqueued)
325 s_index = s - f->ooo_segments;
331 *tail = *tail + bytes;
340 diff = *tail - s->
start;
379 f->head = f->tail = f->flags = 0;
380 f->head_chunk = f->tail_chunk = f->start_chunk;
381 f->ooo_deq = f->ooo_enq = 0;
383 min_alloc = size > 32 << 10 ? size >> 3 : 4096;
384 min_alloc =
clib_min (min_alloc, 64 << 10);
385 f->min_alloc = min_alloc;
390 f->start_chunk->start_byte = 0;
391 prev = f->start_chunk;
425 u32 rounded_data_size;
436 rounded_data_size = (1 << (
max_log2 (data_size_in_bytes)));
447 c->
length = data_size_in_bytes;
450 f->start_chunk = f->end_chunk =
c;
465 rounded_size = (1 << (
max_log2 (size)));
513 u32 head, tail, end_chunk;
527 return f_pos_lt (end_chunk, tail) ? end_chunk - head : tail - head;
538 return f->tail_chunk ?
f_chunk_end (f->tail_chunk) - tail : 0;
550 while (pos != cur->
key)
766 if (--f->refcnt == 0)
778 u32 head, tail, head_idx;
790 n_chunk = c->
length - head_idx;
795 ASSERT (len - n_chunk <= c->next->length);
805 u32 alloc_size, free_alloced;
809 alloc_size =
clib_min (f->min_alloc, f->size - (tail - head));
810 alloc_size =
clib_max (alloc_size, len - free_alloced);
830 f->end_chunk->
next =
c;
842 u32 tail, head, free_count;
868 old_tail_c = f->tail_chunk;
900 u32 tail, head, free_count, enq_pos;
909 if ((len + offset) > free_count)
960 u32 n_segs,
u8 allow_partial)
962 u32 tail, head, free_count,
len = 0,
i;
975 for (
i = 0;
i < n_segs;
i++)
978 old_tail_c = f->tail_chunk;
991 for (
i = 0;
i < n_segs;
i++)
994 segs[i].
len, &f->tail_chunk);
1017 to_copy, &f->tail_chunk);
1049 rt = &f->ooo_deq_lookup;
1085 start = f->start_chunk;
1094 u32 tail, head, cursize;
1129 u32 tail, head, cursize, head_idx;
1139 len =
clib_min (cursize - offset, len);
1140 head_idx = head +
offset;
1153 u32 total_drop_bytes, tail, head, cursize;
1163 total_drop_bytes =
clib_min (cursize, len);
1168 head = head + total_drop_bytes;
1181 return total_drop_bytes;
1226 u32 n_segs,
u32 max_bytes)
1228 u32 cursize, to_read, head, tail, fs_index = 1;
1229 u32 n_bytes, head_pos,
len, start;
1240 if (offset >= cursize)
1243 to_read =
clib_min (cursize - offset, max_bytes);
1257 n_bytes = fs[0].
len;
1259 while (n_bytes < to_read && fs_index < n_segs)
1319 f->head_chunk = f->ooo_deq =
c;
1322 f->tail_chunk = f->ooo_enq =
c;
1330 f->subscribers[f->n_subscribers++] = subscriber;
1338 for (i = 0; i < f->n_subscribers; i++)
1340 if (f->subscribers[i] != subscriber)
1342 f->subscribers[
i] = f->subscribers[f->n_subscribers - 1];
1361 if (
f_pos_lt (f->ooo_deq->start_byte, f->start_chunk->start_byte)
1362 ||
f_pos_gt (f->ooo_deq->start_byte,
1367 f->ooo_deq->start_byte);
1371 if (tmp != f->ooo_deq)
1378 if (
f_pos_lt (f->ooo_enq->start_byte, f->start_chunk->start_byte)
1379 ||
f_pos_gt (f->ooo_enq->start_byte,
1384 f->ooo_enq->start_byte);
1389 f->ooo_enq->start_byte);
1391 if (tmp != f->ooo_enq)
1395 if (f->start_chunk->next)
1398 u32 chunks_bytes = 0;
1428 chunks_bytes += c->
length;
1434 if (chunks_bytes < f->tail - f->head)
1462 s =
format (s,
"[%u, %u], len %u, next %d, prev %d", seg->
start,
1515 for (i = 0; i <
vec_len (data); i++)
1518 for (i = 0; i < trace_len; i++)
1521 if (trace[i].
action == 1)
1524 s =
format (s,
"adding [%u, %u]:", trace[i].offset,
1525 (trace[i].offset + trace[i].
len));
1527 trace[i].len, &data[offset]);
1529 else if (trace[i].
action == 2)
1532 s =
format (s,
"adding [%u, %u]:", 0, trace[i].
len);
1538 s =
format (s,
"read: %u", trace[i].
len);
1554 u32 indent = va_arg (*args,
u32);
1555 u32 ooo_segment_index = f->ooos_list_head;
1563 ooo_segment_index = seg->
next;
1573 int verbose = va_arg (*args,
int);
1580 s =
format (s,
"cursize %u nitems %u has_event %d min_alloc %u\n",
1583 indent, f->head, f->tail, f->segment_manager);
1586 s =
format (s,
"%Uvpp session %d thread %d app session %d thread %d\n",
1588 f->master_thread_index, f->client_session_index,
1589 f->client_thread_index);
1593 s =
format (s,
"%Uooo pool %d active elts newest %u\n",
u32 length
length of chunk in bytes
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
static void svm_fifo_copy_to_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
static void f_load_head_tail_all_acq(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail independent of producer/consumer role.
#define CLIB_MEM_UNPOISON(a, s)
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
static int ooo_segment_try_collect(svm_fifo_t *f, u32 n_bytes_enqueued, u32 *tail)
Removes segments that can now be enqueued because the fifo's tail has advanced.
static u32 svm_fifo_max_enqueue_prod(svm_fifo_t *f)
Maximum number of bytes that can be enqueued into fifo.
static ooo_segment_t * ooo_segment_next(svm_fifo_t *f, ooo_segment_t *s)
int svm_fifo_segments(svm_fifo_t *f, u32 offset, svm_fifo_seg_t *fs, u32 n_segs, u32 max_bytes)
Get pointers to fifo chunks data in svm_fifo_seg_t array.
static rb_node_t * rb_node_left(rb_tree_t *rt, rb_node_t *n)
void svm_fifo_free_chunk_lookup(svm_fifo_t *f)
Cleanup fifo chunk lookup rb tree.
static u8 svm_fifo_has_ooo_data(svm_fifo_t *f)
Check if fifo has out-of-order data.
void svm_fifo_init_pointers(svm_fifo_t *f, u32 head, u32 tail)
Set fifo pointers to requested offset.
static u32 f_free_count(svm_fifo_t *f, u32 head, u32 tail)
Fifo free bytes, i.e., number of free bytes.
void svm_fifo_free(svm_fifo_t *f)
Free fifo and associated state.
#define clib_memcpy_fast(a, b, c)
u32 prev
Previous linked-list element pool index.
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static void f_load_head_tail_cons(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for consumer.
static void svm_fifo_copy_from_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 head_idx, u8 *dst, u32 len, svm_fifo_chunk_t **last)
#define CLIB_MARCH_FN_SELECT(fn)
svm_fifo_chunk_t * fsh_alloc_chunk(fifo_segment_header_t *fsh, u32 slice_index, u32 chunk_size)
Allocate chunks in fifo segment.
static heap_elt_t * last(heap_header_t *h)
int svm_fifo_peek(svm_fifo_t *f, u32 offset, u32 len, u8 *dst)
Peek data from fifo.
static svm_fifo_chunk_t * f_lookup_clear_deq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
void svm_fifo_enqueue_nocopy(svm_fifo_t *f, u32 len)
Advance tail.
void svm_fifo_init(svm_fifo_t *f, u32 size)
Initialize fifo.
static rb_node_t * rb_node(rb_tree_t *rt, rb_node_index_t ri)
#define RBTREE_TNIL_INDEX
ooo_segment_t * svm_fifo_first_ooo_segment(svm_fifo_t *f)
First out-of-order segment for fifo.
void svm_fifo_dequeue_drop_all(svm_fifo_t *f)
Drop all data from fifo.
static rb_node_t * rb_node_right(rb_tree_t *rt, rb_node_t *n)
void svm_fifo_overwrite_head(svm_fifo_t *f, u8 *src, u32 len)
Overwrite fifo head with new data.
#define SVM_FIFO_INVALID_INDEX
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
__clib_export rb_node_t * rb_tree_predecessor(rb_tree_t *rt, rb_node_t *x)
__clib_export void rb_tree_free_nodes(rb_tree_t *rt)
static ooo_segment_t * ooo_segment_alloc(svm_fifo_t *f, u32 start, u32 length)
static svm_fifo_chunk_t * svm_fifo_find_chunk(svm_fifo_t *f, u32 pos)
Find chunk for given byte position.
void svm_fifo_clone(svm_fifo_t *df, svm_fifo_t *sf)
Clones fifo.
int svm_fifo_enqueue_segments(svm_fifo_t *f, const svm_fifo_seg_t segs[], u32 n_segs, u8 allow_partial)
Enqueue array of svm_fifo_seg_t in order.
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
Fifo max bytes to dequeue.
u32 svm_fifo_n_chunks(svm_fifo_t *f)
Number of chunks linked into the fifo.
svm_fifo_chunk_t * svm_fifo_chunk_alloc(u32 size)
Creates a fifo chunk in the current heap.
u8 * format_ooo_list(u8 *s, va_list *args)
description fragment has unexpected format
void svm_fifo_free_ooo_data(svm_fifo_t *f)
Cleanup fifo ooo data.
static void ooo_segment_free(svm_fifo_t *f, u32 index)
__clib_export rb_node_index_t rb_tree_add_custom(rb_tree_t *rt, u32 key, uword opaque, rb_tree_lt_fn ltfn)
int svm_fifo_dequeue(svm_fifo_t *f, u32 len, u8 *dst)
Dequeue data from fifo.
static svm_fifo_chunk_t * f_unlink_chunks(svm_fifo_t *f, u32 end_pos, u8 maybe_ooo)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static int f_pos_gt(u32 a, u32 b)
uword opaque
value stored by node
static ooo_segment_t * ooo_segment_prev(svm_fifo_t *f, ooo_segment_t *s)
struct svm_fifo_chunk_ * next
pointer to next chunk in linked-lists
void fsh_collect_chunks(fifo_segment_header_t *fsh, u32 slice_index, svm_fifo_chunk_t *c)
Return chunks to fifo segment.
#define pool_put(P, E)
Free an object E in pool P.
int svm_fifo_enqueue(svm_fifo_t *f, u32 len, const u8 *src)
Enqueue data to fifo.
#define SVM_FIFO_MAX_EVT_SUBSCRIBERS
static u32 f_chunk_end(svm_fifo_chunk_t *c)
#define svm_fifo_trace_add(_f, _s, _l, _t)
u8 * svm_fifo_replay(u8 *s, svm_fifo_t *f, u8 no_read, u8 verbose)
CLIB_MARCH_FN(svm_fifo_copy_to_chunk, void, svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
void svm_fifo_add_subscriber(svm_fifo_t *f, u8 subscriber)
Add io events subscriber to list.
#define pool_free(p)
Free a pool.
__clib_export void rb_tree_del_node(rb_tree_t *rt, rb_node_t *z)
sll srl srl sll sra u16x4 i
static u32 f_cursize(svm_fifo_t *f, u32 head, u32 tail)
Fifo current size, i.e., number of bytes enqueued.
static void f_load_head_tail_prod(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for producer.
u32 start_byte
chunk start byte
u8 svm_fifo_is_sane(svm_fifo_t *f)
Check if fifo is sane.
static svm_fifo_chunk_t * svm_fifo_find_next_chunk(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 pos)
#define OOO_SEGMENT_INVALID_INDEX
u8 * format_ooo_segment(u8 *s, va_list *args)
static svm_fifo_chunk_t * f_lookup_clear_enq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
u32 svm_fifo_n_ooo_segments(svm_fifo_t *f)
Number of out-of-order segments for fifo.
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
#define uword_to_pointer(u, type)
u8 * format_svm_fifo(u8 *s, va_list *args)
static u8 f_chunk_includes_pos(svm_fifo_chunk_t *c, u32 pos)
static int f_pos_geq(u32 a, u32 b)
rb_node_index_t deq_rb_index
deq node index if chunk in rbtree
static void clib_mem_free(void *p)
u8 data[0]
start of chunk data
void svm_fifo_del_subscriber(svm_fifo_t *f, u8 subscriber)
Remove io events subscriber form list.
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, u32 offset, u32 len, u8 *src)
Enqueue a future segment.
__clib_export int rb_tree_is_init(rb_tree_t *rt)
static u32 ooo_segment_end_pos(ooo_segment_t *s)
static void f_update_ooo_deq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
static uword pointer_to_uword(const void *p)
#define clib_atomic_store_rel_n(a, b)
u32 length
Length of segment.
u8 * svm_fifo_dump_trace(u8 *s, svm_fifo_t *f)
__clib_export void rb_tree_init(rb_tree_t *rt)
u32 next
Next linked-list element pool index.
u32 svm_fifo_max_read_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be read.
static int f_pos_leq(u32 a, u32 b)
template key/value backing page structure
__clib_export u32 rb_tree_n_nodes(rb_tree_t *rt)
int svm_fifo_fill_chunk_list(svm_fifo_t *f)
Ensure the whole fifo size is writeable.
static u8 rb_node_is_tnil(rb_tree_t *rt, rb_node_t *n)
vl_api_mac_event_action_t action
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
static rb_node_t * f_find_node_rbtree(rb_tree_t *rt, u32 pos)
static svm_fifo_chunk_t * f_find_chunk_rbtree(rb_tree_t *rt, u32 pos)
rb_node_index_t enq_rb_index
enq node index if chunk in rbtree
struct clib_bihash_value offset
template key/value backing page structure
static int f_try_chunk_alloc(svm_fifo_t *f, u32 head, u32 tail, u32 len)
static __clib_unused ooo_segment_t * ooo_segment_last(svm_fifo_t *f)
#define vec_foreach(var, vec)
Vector iterator.
save_rewrite_length must be aligned so that reass doesn t overwrite it
u32 svm_fifo_max_write_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be written.
#define CLIB_CACHE_LINE_BYTES
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 len)
Dequeue and drop bytes from fifo.
struct _svm_fifo svm_fifo_t
rb_node_index_t root
root index
u32 start
Start of segment, normalized.
void svm_fifo_init_ooo_lookup(svm_fifo_t *f, u8 ooo_type)
Initialize rbtrees used for ooo lookups.
static int f_pos_lt(u32 a, u32 b)
static void ooo_segment_add(svm_fifo_t *f, u32 offset, u32 head, u32 tail, u32 length)
Add segment to fifo's out-of-order segment list.
svm_fifo_t * svm_fifo_alloc(u32 data_size_in_bytes)
Creates a fifo in the current heap.
static void f_update_ooo_enq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
static uword pool_elts(void *v)
Number of active elements in a pool.