34 n_chunk = c->
length - tail_idx;
40 while ((to_copy -= n_chunk))
65 n_chunk = c->
length - head_idx;
71 while ((to_copy -= n_chunk))
88 #ifndef CLIB_MARCH_VARIANT 157 next->prev = cur->
prev;
167 f->ooos_list_head = cur->
next;
181 u32 new_index, s_end_pos, s_index;
182 u32 offset_pos, offset_end_pos;
186 offset_pos = tail +
offset;
187 offset_end_pos = tail + offset + length;
194 f->ooos_list_head = s - f->ooo_segments;
195 f->ooos_newest = f->ooos_list_head;
218 s_index = s - f->ooo_segments;
225 new_index = new_s - f->ooo_segments;
233 prev->
next = new_index;
238 f->ooos_list_head = new_index;
241 new_s->
next = s_index;
243 f->ooos_newest = new_index;
247 else if (
f_pos_gt (offset_pos, s_end_pos))
250 new_index = new_s - f->ooo_segments;
258 new_s->
prev = s_index;
260 f->ooos_newest = new_index;
272 s->
start = offset_pos;
274 f->ooos_newest = s - f->ooo_segments;
280 if (
f_pos_gt (offset_end_pos, s_end_pos))
299 f->ooos_newest = s - f->ooo_segments;
310 u32 s_index, bytes = 0;
315 diff = *tail - s->
start;
317 ASSERT (diff != n_bytes_enqueued);
319 if (diff > n_bytes_enqueued)
323 while (0 <= diff && diff < n_bytes_enqueued)
325 s_index = s - f->ooo_segments;
331 *tail = *tail + bytes;
340 diff = *tail - s->
start;
379 f->head = f->tail = f->flags = 0;
380 f->head_chunk = f->tail_chunk = f->start_chunk;
381 f->ooo_deq = f->ooo_enq = 0;
383 min_alloc = size > 32 << 10 ? size >> 3 : 4096;
384 min_alloc =
clib_min (min_alloc, 64 << 10);
385 f->min_alloc = min_alloc;
390 f->start_chunk->start_byte = 0;
391 prev = f->start_chunk;
423 u32 rounded_data_size;
434 rounded_data_size = (1 << (
max_log2 (data_size_in_bytes)));
445 c->
length = data_size_in_bytes;
448 f->start_chunk = f->end_chunk =
c;
463 rounded_size = (1 << (
max_log2 (size)));
511 u32 head, tail, end_chunk;
525 return f_pos_lt (end_chunk, tail) ? end_chunk - head : tail - head;
536 return f->tail_chunk ?
f_chunk_end (f->tail_chunk) - tail : 0;
548 while (pos != cur->
key)
764 if (--f->refcnt == 0)
776 u32 head, tail, head_idx;
788 n_chunk = c->
length - head_idx;
793 ASSERT (len - n_chunk <= c->next->length);
803 u32 alloc_size, free_alloced;
807 alloc_size =
clib_min (f->min_alloc, f->size - (tail - head));
808 alloc_size =
clib_max (alloc_size, len - free_alloced);
828 f->end_chunk->
next =
c;
840 u32 tail, head, free_count;
866 old_tail_c = f->tail_chunk;
898 u32 tail, head, free_count, enq_pos;
907 if ((len + offset) > free_count)
966 rt = &f->ooo_deq_lookup;
1002 start = f->start_chunk;
1011 u32 tail, head, cursize;
1046 u32 tail, head, cursize, head_idx;
1056 len =
clib_min (cursize - offset, len);
1057 head_idx = head +
offset;
1070 u32 total_drop_bytes, tail, head, cursize;
1080 total_drop_bytes =
clib_min (cursize, len);
1085 head = head + total_drop_bytes;
1098 return total_drop_bytes;
1144 u32 cursize, head, tail, head_idx;
1158 fs[0].
len = f->size - head_idx;
1159 fs[0].
data = f->head_chunk->data + head_idx;
1160 fs[1].
len = cursize - fs[0].
len;
1161 fs[1].
data = f->head_chunk->data;
1165 fs[0].
len = cursize;
1166 fs[0].
data = f->head_chunk->data + head_idx;
1181 ASSERT (fs[0].
data == f->head_chunk->data + head);
1182 head = (head + fs[0].
len + fs[1].
len);
1234 f->head_chunk = f->ooo_deq =
c;
1237 f->tail_chunk = f->ooo_enq =
c;
1245 f->subscribers[f->n_subscribers++] = subscriber;
1253 for (i = 0; i < f->n_subscribers; i++)
1255 if (f->subscribers[i] != subscriber)
1257 f->subscribers[
i] = f->subscribers[f->n_subscribers - 1];
1276 if (
f_pos_lt (f->ooo_deq->start_byte, f->start_chunk->start_byte)
1277 ||
f_pos_gt (f->ooo_deq->start_byte,
1282 f->ooo_deq->start_byte);
1286 if (tmp != f->ooo_deq)
1293 if (
f_pos_lt (f->ooo_enq->start_byte, f->start_chunk->start_byte)
1294 ||
f_pos_gt (f->ooo_enq->start_byte,
1299 f->ooo_enq->start_byte);
1304 f->ooo_enq->start_byte);
1306 if (tmp != f->ooo_enq)
1310 if (f->start_chunk->next)
1313 u32 chunks_bytes = 0;
1343 chunks_bytes += c->
length;
1349 if (chunks_bytes < f->tail - f->head)
1377 s =
format (s,
"[%u, %u], len %u, next %d, prev %d", seg->
start,
1430 for (i = 0; i <
vec_len (data); i++)
1433 for (i = 0; i < trace_len; i++)
1436 if (trace[i].
action == 1)
1439 s =
format (s,
"adding [%u, %u]:", trace[i].offset,
1440 (trace[i].offset + trace[i].
len));
1442 trace[i].len, &data[offset]);
1444 else if (trace[i].
action == 2)
1447 s =
format (s,
"adding [%u, %u]:", 0, trace[i].
len);
1453 s =
format (s,
"read: %u", trace[i].
len);
1469 u32 indent = va_arg (*args,
u32);
1470 u32 ooo_segment_index = f->ooos_list_head;
1478 ooo_segment_index = seg->
next;
1488 int verbose = va_arg (*args,
int);
1495 s =
format (s,
"cursize %u nitems %u has_event %d min_alloc %u\n",
1498 indent, f->head, f->tail, f->segment_manager);
1501 s =
format (s,
"%Uvpp session %d thread %d app session %d thread %d\n",
1503 f->master_thread_index, f->client_session_index,
1504 f->client_thread_index);
1508 s =
format (s,
"%Uooo pool %d active elts newest %u\n",
u32 length
length of chunk in bytes
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
static void svm_fifo_copy_to_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
rb_node_t * rb_tree_predecessor(rb_tree_t *rt, rb_node_t *x)
static void f_load_head_tail_all_acq(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail independent of producer/consumer role.
#define CLIB_MEM_UNPOISON(a, s)
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
static int ooo_segment_try_collect(svm_fifo_t *f, u32 n_bytes_enqueued, u32 *tail)
Removes segments that can now be enqueued because the fifo's tail has advanced.
int svm_fifo_segments(svm_fifo_t *f, svm_fifo_seg_t *fs)
static u32 svm_fifo_max_enqueue_prod(svm_fifo_t *f)
Maximum number of bytes that can be enqueued into fifo.
static ooo_segment_t * ooo_segment_next(svm_fifo_t *f, ooo_segment_t *s)
static rb_node_t * rb_node_left(rb_tree_t *rt, rb_node_t *n)
void svm_fifo_free_chunk_lookup(svm_fifo_t *f)
Cleanup fifo chunk lookup rb tree.
static u8 svm_fifo_has_ooo_data(svm_fifo_t *f)
Check if fifo has out-of-order data.
void svm_fifo_init_pointers(svm_fifo_t *f, u32 head, u32 tail)
Set fifo pointers to requested offset.
static u32 f_free_count(svm_fifo_t *f, u32 head, u32 tail)
Fifo free bytes, i.e., number of free bytes.
void svm_fifo_segments_free(svm_fifo_t *f, svm_fifo_seg_t *fs)
void svm_fifo_free(svm_fifo_t *f)
Free fifo and associated state.
#define clib_memcpy_fast(a, b, c)
u32 prev
Previous linked-list element pool index.
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static void f_load_head_tail_cons(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for consumer.
static void svm_fifo_copy_from_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 head_idx, u8 *dst, u32 len, svm_fifo_chunk_t **last)
#define CLIB_MARCH_FN_SELECT(fn)
svm_fifo_chunk_t * fsh_alloc_chunk(fifo_segment_header_t *fsh, u32 slice_index, u32 chunk_size)
Allocate chunks in fifo segment.
static heap_elt_t * last(heap_header_t *h)
int svm_fifo_peek(svm_fifo_t *f, u32 offset, u32 len, u8 *dst)
Peek data from fifo.
static svm_fifo_chunk_t * f_lookup_clear_deq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
void svm_fifo_enqueue_nocopy(svm_fifo_t *f, u32 len)
Advance tail.
void svm_fifo_init(svm_fifo_t *f, u32 size)
Initialize fifo.
static rb_node_t * rb_node(rb_tree_t *rt, rb_node_index_t ri)
#define RBTREE_TNIL_INDEX
ooo_segment_t * svm_fifo_first_ooo_segment(svm_fifo_t *f)
First out-of-order segment for fifo.
void svm_fifo_dequeue_drop_all(svm_fifo_t *f)
Drop all data from fifo.
static rb_node_t * rb_node_right(rb_tree_t *rt, rb_node_t *n)
u32 rb_tree_n_nodes(rb_tree_t *rt)
void svm_fifo_overwrite_head(svm_fifo_t *f, u8 *src, u32 len)
Overwrite fifo head with new data.
#define SVM_FIFO_INVALID_INDEX
void rb_tree_free_nodes(rb_tree_t *rt)
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
int rb_tree_is_init(rb_tree_t *rt)
static ooo_segment_t * ooo_segment_alloc(svm_fifo_t *f, u32 start, u32 length)
static svm_fifo_chunk_t * svm_fifo_find_chunk(svm_fifo_t *f, u32 pos)
Find chunk for given byte position.
void svm_fifo_clone(svm_fifo_t *df, svm_fifo_t *sf)
Clones fifo.
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
Fifo max bytes to dequeue.
u32 svm_fifo_n_chunks(svm_fifo_t *f)
Number of chunks linked into the fifo.
svm_fifo_chunk_t * svm_fifo_chunk_alloc(u32 size)
Creates a fifo chunk in the current heap.
u8 * format_ooo_list(u8 *s, va_list *args)
void svm_fifo_free_ooo_data(svm_fifo_t *f)
Cleanup fifo ooo data.
static void ooo_segment_free(svm_fifo_t *f, u32 index)
int svm_fifo_dequeue(svm_fifo_t *f, u32 len, u8 *dst)
Dequeue data from fifo.
static svm_fifo_chunk_t * f_unlink_chunks(svm_fifo_t *f, u32 end_pos, u8 maybe_ooo)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static int f_pos_gt(u32 a, u32 b)
uword opaque
value stored by node
static ooo_segment_t * ooo_segment_prev(svm_fifo_t *f, ooo_segment_t *s)
struct svm_fifo_chunk_ * next
pointer to next chunk in linked-lists
void fsh_collect_chunks(fifo_segment_header_t *fsh, u32 slice_index, svm_fifo_chunk_t *c)
Return chunks to fifo segment.
#define pool_put(P, E)
Free an object E in pool P.
int svm_fifo_enqueue(svm_fifo_t *f, u32 len, const u8 *src)
Enqueue data to fifo.
#define SVM_FIFO_MAX_EVT_SUBSCRIBERS
void rb_tree_init(rb_tree_t *rt)
static u32 f_chunk_end(svm_fifo_chunk_t *c)
#define svm_fifo_trace_add(_f, _s, _l, _t)
u8 * svm_fifo_replay(u8 *s, svm_fifo_t *f, u8 no_read, u8 verbose)
CLIB_MARCH_FN(svm_fifo_copy_to_chunk, void, svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
void svm_fifo_add_subscriber(svm_fifo_t *f, u8 subscriber)
Add io events subscriber to list.
#define pool_free(p)
Free a pool.
sll srl srl sll sra u16x4 i
static u32 f_cursize(svm_fifo_t *f, u32 head, u32 tail)
Fifo current size, i.e., number of bytes enqueued.
static void f_load_head_tail_prod(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for producer.
u32 start_byte
chunk start byte
u8 svm_fifo_is_sane(svm_fifo_t *f)
Check if fifo is sane.
static svm_fifo_chunk_t * svm_fifo_find_next_chunk(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 pos)
#define OOO_SEGMENT_INVALID_INDEX
u8 * format_ooo_segment(u8 *s, va_list *args)
static svm_fifo_chunk_t * f_lookup_clear_enq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
u32 svm_fifo_n_ooo_segments(svm_fifo_t *f)
Number of out-of-order segments for fifo.
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
#define uword_to_pointer(u, type)
u8 * format_svm_fifo(u8 *s, va_list *args)
static u8 f_chunk_includes_pos(svm_fifo_chunk_t *c, u32 pos)
static int f_pos_geq(u32 a, u32 b)
rb_node_index_t deq_rb_index
deq node index if chunk in rbtree
static void clib_mem_free(void *p)
u8 data[0]
start of chunk data
void svm_fifo_del_subscriber(svm_fifo_t *f, u8 subscriber)
Remove io events subscriber form list.
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, u32 offset, u32 len, u8 *src)
Enqueue a future segment.
static u32 ooo_segment_end_pos(ooo_segment_t *s)
static void f_update_ooo_deq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
static uword pointer_to_uword(const void *p)
#define clib_atomic_store_rel_n(a, b)
u32 length
Length of segment.
u8 * svm_fifo_dump_trace(u8 *s, svm_fifo_t *f)
u32 next
Next linked-list element pool index.
u32 svm_fifo_max_read_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be read.
static int f_pos_leq(u32 a, u32 b)
template key/value backing page structure
int svm_fifo_fill_chunk_list(svm_fifo_t *f)
Ensure the whole fifo size is writeable.
static u8 rb_node_is_tnil(rb_tree_t *rt, rb_node_t *n)
vl_api_mac_event_action_t action
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
static rb_node_t * f_find_node_rbtree(rb_tree_t *rt, u32 pos)
static svm_fifo_chunk_t * f_find_chunk_rbtree(rb_tree_t *rt, u32 pos)
rb_node_index_t enq_rb_index
enq node index if chunk in rbtree
struct clib_bihash_value offset
template key/value backing page structure
static int f_try_chunk_alloc(svm_fifo_t *f, u32 head, u32 tail, u32 len)
static __clib_unused ooo_segment_t * ooo_segment_last(svm_fifo_t *f)
#define vec_foreach(var, vec)
Vector iterator.
save_rewrite_length must be aligned so that reass doesn t overwrite it
u32 svm_fifo_max_write_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be written.
void rb_tree_del_node(rb_tree_t *rt, rb_node_t *z)
rb_node_index_t rb_tree_add_custom(rb_tree_t *rt, u32 key, uword opaque, rb_tree_lt_fn ltfn)
#define CLIB_CACHE_LINE_BYTES
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 len)
Dequeue and drop bytes from fifo.
struct _svm_fifo svm_fifo_t
rb_node_index_t root
root index
u32 start
Start of segment, normalized.
void svm_fifo_init_ooo_lookup(svm_fifo_t *f, u8 ooo_type)
Initialize rbtrees used for ooo lookups.
static int f_pos_lt(u32 a, u32 b)
static void ooo_segment_add(svm_fifo_t *f, u32 offset, u32 head, u32 tail, u32 length)
Add segment to fifo's out-of-order segment list.
svm_fifo_t * svm_fifo_alloc(u32 data_size_in_bytes)
Creates a fifo in the current heap.
static void f_update_ooo_enq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
static uword pool_elts(void *v)
Number of active elements in a pool.