53 #ifndef CLIB_MARCH_VARIANT 60 u32 normalized_start = (seg->
start + f->nitems - f->tail) % f->nitems;
61 s =
format (s,
"[%u, %u], len %u, next %d, prev %d", normalized_start,
62 (normalized_start + seg->
length) % f->nitems, seg->
length,
112 memset (f->data, 0xFF, f->nitems);
115 for (i = 0; i <
vec_len (data); i++)
118 for (i = 0; i < trace_len; i++)
121 if (trace[i].action == 1)
124 s =
format (s,
"adding [%u, %u]:", trace[i].offset,
126 trace[i].len) % dummy_fifo->nitems);
128 trace[i].len, &data[offset]);
130 else if (trace[i].action == 2)
133 s =
format (s,
"adding [%u, %u]:", 0, trace[i].len);
139 s =
format (s,
"read: %u", trace[i].len);
155 u32 ooo_segment_index = f->ooos_list_head;
162 ooo_segment_index = seg->
next;
172 int verbose = va_arg (*args,
int);
177 s =
format (s,
"cursize %u nitems %u has_event %d\n",
178 f->cursize, f->nitems, f->has_event);
179 s =
format (s,
" head %d tail %d segment manager %u\n", f->head, f->tail,
184 (s,
" vpp session %d thread %d app session %d thread %d\n",
185 f->master_session_index, f->master_thread_index,
186 f->client_session_index, f->client_thread_index);
190 s =
format (s,
" ooo pool %d active elts newest %u\n",
191 pool_elts (f->ooo_segments), f->ooos_newest);
203 u32 rounded_data_size;
206 rounded_data_size = (1 << (
max_log2 (data_size_in_bytes)));
212 memset (f, 0,
sizeof (*f));
213 f->nitems = data_size_in_bytes;
225 if (--f->refcnt == 0)
257 next->prev = cur->
prev;
267 f->ooos_list_head = cur->
next;
281 u32 new_index, s_end_pos, s_index;
282 u32 normalized_position, normalized_end_position;
285 normalized_position = (f->tail +
offset) % f->nitems;
286 normalized_end_position = (f->tail + offset + length) % f->nitems;
293 f->ooos_list_head = s - f->ooo_segments;
294 f->ooos_newest = f->ooos_list_head;
318 s_index = s - f->ooo_segments;
325 new_index = new_s - f->ooo_segments;
333 prev->
next = new_index;
338 f->ooos_list_head = new_index;
341 new_s->
next = s_index;
343 f->ooos_newest = new_index;
347 else if (
position_gt (f, normalized_position, s_end_pos))
350 new_index = new_s - f->ooo_segments;
358 new_s->
prev = s_index;
360 f->ooos_newest = new_index;
372 s->
start = normalized_position;
374 f->ooos_newest = s - f->ooo_segments;
380 if (
position_gt (f, normalized_end_position, s_end_pos))
387 normalized_end_position))
401 f->ooos_newest = s - f->ooo_segments;
413 u32 index, bytes = 0;
419 ASSERT (diff != n_bytes_enqueued);
421 if (diff > n_bytes_enqueued)
425 while (0 <= diff && diff < n_bytes_enqueued)
427 index = s - f->ooo_segments;
434 f->tail %= f->nitems;
454 ASSERT (bytes <= f->nitems);
459 const u8 * copy_from_here)
461 u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
474 total_copy_bytes = (nitems - cursize) < max_bytes ?
475 (nitems - cursize) : max_bytes;
480 first_copy_bytes = ((nitems - f->tail) < total_copy_bytes)
481 ? (nitems - f->tail) : total_copy_bytes;
483 clib_memcpy (&f->data[f->tail], copy_from_here, first_copy_bytes);
484 f->tail += first_copy_bytes;
485 f->tail = (f->tail == nitems) ? 0 : f->tail;
488 second_copy_bytes = total_copy_bytes - first_copy_bytes;
489 if (second_copy_bytes)
491 clib_memcpy (&f->data[f->tail], copy_from_here + first_copy_bytes,
493 f->tail += second_copy_bytes;
494 f->tail = (f->tail == nitems) ? 0 : f->tail;
502 ASSERT (max_bytes <= (nitems - cursize));
503 f->tail += max_bytes;
504 f->tail = f->tail % nitems;
505 total_copy_bytes = max_bytes;
515 ASSERT (cursize + total_copy_bytes <= nitems);
516 __sync_fetch_and_add (&f->cursize, total_copy_bytes);
518 return (total_copy_bytes);
521 #ifndef CLIB_MARCH_VARIANT 524 const u8 * copy_from_here)
541 u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
542 u32 cursize, nitems, normalized_offset;
550 ASSERT (required_bytes < nitems);
552 normalized_offset = (f->tail +
offset) % nitems;
555 if ((required_bytes + offset) > (nitems - cursize))
563 total_copy_bytes = required_bytes;
566 first_copy_bytes = ((nitems - normalized_offset) < total_copy_bytes)
567 ? (nitems - normalized_offset) : total_copy_bytes;
569 clib_memcpy (&f->data[normalized_offset], copy_from_here, first_copy_bytes);
572 second_copy_bytes = total_copy_bytes - first_copy_bytes;
573 if (second_copy_bytes)
575 normalized_offset += first_copy_bytes;
576 normalized_offset %= nitems;
578 ASSERT (normalized_offset == 0);
581 copy_from_here + first_copy_bytes, second_copy_bytes);
587 #ifndef CLIB_MARCH_VARIANT 602 first_chunk = f->nitems - f->head;
603 ASSERT (len <= f->nitems);
604 if (len <= first_chunk)
608 clib_memcpy (&f->data[f->head], data, first_chunk);
609 clib_memcpy (&f->data[0], data + first_chunk, len - first_chunk);
617 u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
628 total_copy_bytes = (cursize < max_bytes) ? cursize : max_bytes;
633 first_copy_bytes = ((nitems - f->head) < total_copy_bytes)
634 ? (nitems - f->head) : total_copy_bytes;
635 clib_memcpy (copy_here, &f->data[f->head], first_copy_bytes);
636 f->head += first_copy_bytes;
637 f->head = (f->head == nitems) ? 0 : f->head;
640 second_copy_bytes = total_copy_bytes - first_copy_bytes;
641 if (second_copy_bytes)
644 &f->data[f->head], second_copy_bytes);
645 f->head += second_copy_bytes;
646 f->head = (f->head == nitems) ? 0 : f->head;
653 ASSERT (max_bytes <= cursize);
654 f->head += max_bytes;
655 f->head = f->head % nitems;
656 cursize -= max_bytes;
657 total_copy_bytes = max_bytes;
660 ASSERT (f->head <= nitems);
661 ASSERT (cursize >= total_copy_bytes);
662 __sync_fetch_and_sub (&f->cursize, total_copy_bytes);
664 return (total_copy_bytes);
667 #ifndef CLIB_MARCH_VARIANT 678 u32 max_bytes,
u8 * copy_here)
680 u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
681 u32 cursize, nitems, real_head;
689 real_head = f->head + relative_offset;
690 real_head = real_head >= nitems ? real_head - nitems : real_head;
693 total_copy_bytes = (cursize - relative_offset < max_bytes) ?
694 cursize - relative_offset : max_bytes;
700 ((nitems - real_head) < total_copy_bytes) ?
701 (nitems - real_head) : total_copy_bytes;
702 clib_memcpy (copy_here, &f->data[real_head], first_copy_bytes);
705 second_copy_bytes = total_copy_bytes - first_copy_bytes;
706 if (second_copy_bytes)
708 clib_memcpy (copy_here + first_copy_bytes, &f->data[0],
712 return total_copy_bytes;
715 #ifndef CLIB_MARCH_VARIANT 728 u32 total_drop_bytes, first_drop_bytes, second_drop_bytes;
739 total_drop_bytes = (cursize < max_bytes) ? cursize : max_bytes;
745 ((nitems - f->head) < total_drop_bytes) ?
746 (nitems - f->head) : total_drop_bytes;
747 f->head += first_drop_bytes;
748 f->head = (f->head == nitems) ? 0 : f->head;
751 second_drop_bytes = total_drop_bytes - first_drop_bytes;
752 if (second_drop_bytes)
754 f->head += second_drop_bytes;
755 f->head = (f->head == nitems) ? 0 : f->head;
758 ASSERT (f->head <= nitems);
759 ASSERT (cursize >= total_drop_bytes);
760 __sync_fetch_and_sub (&f->cursize, total_drop_bytes);
762 return total_drop_bytes;
769 __sync_fetch_and_sub (&f->cursize, f->cursize);
784 fs[0].
len = ((nitems - f->head) < cursize) ? (nitems - f->head) : cursize;
785 fs[0].
data = f->data + f->head;
787 if (fs[0].len < cursize)
789 fs[1].
len = cursize - fs[0].
len;
790 fs[1].
data = f->data;
803 u32 total_drop_bytes;
809 total_drop_bytes = fs[0].
len + fs[1].
len;
813 f->head = (f->head + fs[0].
len) % f->nitems;
814 total_drop_bytes = fs[0].
len;
816 __sync_fetch_and_sub (&f->cursize, total_drop_bytes);
837 f->head = f->tail = pointer % f->nitems;
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
void svm_fifo_init_pointers(svm_fifo_t *f, u32 pointer)
Set fifo pointers to requested offset.
static u32 position_diff(svm_fifo_t *f, u32 posa, u32 posb)
int svm_fifo_segments(svm_fifo_t *f, svm_fifo_segment_t *fs)
static u8 svm_fifo_has_ooo_data(svm_fifo_t *f)
void svm_fifo_free(svm_fifo_t *f)
u32 prev
Previous linked-list element pool index.
#define CLIB_MARCH_FN_SELECT(fn)
void svm_fifo_overwrite_head(svm_fifo_t *f, u8 *data, u32 len)
static int ooo_segment_try_collect(svm_fifo_t *f, u32 n_bytes_enqueued)
Removes segments that can now be enqueued because the fifo's tail has advanced.
ooo_segment_t * svm_fifo_first_ooo_segment(svm_fifo_t *f)
void svm_fifo_dequeue_drop_all(svm_fifo_t *f)
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
static u32 ooo_segment_distance_from_tail(svm_fifo_t *f, u32 pos)
struct _svm_fifo svm_fifo_t
memset(h->entries, 0, sizeof(h->entries[0])*entries)
static void ooo_segment_add(svm_fifo_t *f, u32 offset, u32 length)
Add segment to fifo's out-of-order segment list.
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
u8 * format_ooo_list(u8 *s, va_list *args)
int svm_fifo_enqueue_nowait(svm_fifo_t *f, u32 max_bytes, const u8 *copy_from_here)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static u8 position_gt(svm_fifo_t *f, u32 a, u32 b)
u32 svm_fifo_number_ooo_segments(svm_fifo_t *f)
#define pool_put(P, E)
Free an object E in pool P.
static ooo_segment_t * ooo_segment_next(svm_fifo_t *f, ooo_segment_t *s)
#define svm_fifo_trace_add(_f, _s, _l, _t)
u8 * svm_fifo_replay(u8 *s, svm_fifo_t *f, u8 no_read, u8 verbose)
#define pool_free(p)
Free a pool.
static u8 position_leq(svm_fifo_t *f, u32 a, u32 b)
#define clib_memcpy(a, b, c)
static ooo_segment_t * ooo_segment_get_prev(svm_fifo_t *f, ooo_segment_t *s)
static void ooo_segment_del(svm_fifo_t *f, u32 index)
CLIB_MARCH_FN(svm_fifo_enqueue_nowait, int, svm_fifo_t *f, u32 max_bytes, const u8 *copy_from_here)
#define OOO_SEGMENT_INVALID_INDEX
u8 * format_ooo_segment(u8 *s, va_list *args)
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
u8 * format_svm_fifo(u8 *s, va_list *args)
static u8 position_lt(svm_fifo_t *f, u32 a, u32 b)
static void clib_mem_free(void *p)
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, u32 offset, u32 required_bytes, u8 *copy_from_here)
u32 length
Length of segment.
u8 * svm_fifo_dump_trace(u8 *s, svm_fifo_t *f)
u32 next
Next linked-list element pool index.
template key/value backing page structure
void svm_fifo_segments_free(svm_fifo_t *f, svm_fifo_segment_t *fs)
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 max_bytes)
static u32 ooo_segment_end_pos(svm_fifo_t *f, ooo_segment_t *s)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static ooo_segment_t * ooo_segment_new(svm_fifo_t *f, u32 start, u32 length)
static uword max_log2(uword x)
struct clib_bihash_value offset
template key/value backing page structure
#define vec_foreach(var, vec)
Vector iterator.
#define SVM_FIFO_INVALID_SESSION_INDEX
#define CLIB_CACHE_LINE_BYTES
int svm_fifo_peek(svm_fifo_t *f, u32 relative_offset, u32 max_bytes, u8 *copy_here)
int svm_fifo_dequeue_nowait(svm_fifo_t *f, u32 max_bytes, u8 *copy_here)
u32 start
Start of segment, normalized.
static u32 ooo_segment_distance_to_tail(svm_fifo_t *f, u32 pos)
svm_fifo_t * svm_fifo_create(u32 data_size_in_bytes)
create an svm fifo, in the current heap.
static uword pool_elts(void *v)
Number of active elements in a pool.