FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
svm_fifo.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation and/or its affiliates.
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * Inspired from DPDK rte_ring.h (SPSC only) (derived from freebsd bufring.h).
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at:
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19 
20 #include <svm/svm_fifo.h>
21 #include <svm/fifo_segment.h>
22 #include <vppinfra/cpu.h>
23 
25  svm_fifo_chunk_t * c, u32 tail_idx, const u8 * src, u32 len,
27 {
28  u32 n_chunk;
29 
30  ASSERT (f_pos_geq (tail_idx, c->start_byte)
31  && f_pos_lt (tail_idx, c->start_byte + c->length));
32 
33  tail_idx -= c->start_byte;
34  n_chunk = c->length - tail_idx;
35  if (n_chunk <= len)
36  {
37  u32 to_copy = len;
38  clib_memcpy_fast (&c->data[tail_idx], src, n_chunk);
39  c = c->next;
40  while ((to_copy -= n_chunk))
41  {
42  n_chunk = clib_min (c->length, to_copy);
43  clib_memcpy_fast (&c->data[0], src + (len - to_copy), n_chunk);
44  c = c->length <= to_copy ? c->next : c;
45  }
46  if (*last)
47  *last = c;
48  }
49  else
50  {
51  clib_memcpy_fast (&c->data[tail_idx], src, len);
52  }
53 }
54 
56  svm_fifo_chunk_t * c, u32 head_idx, u8 * dst, u32 len,
58 {
59  u32 n_chunk;
60 
61  ASSERT (f_pos_geq (head_idx, c->start_byte)
62  && f_pos_lt (head_idx, c->start_byte + c->length));
63 
64  head_idx -= c->start_byte;
65  n_chunk = c->length - head_idx;
66  if (n_chunk <= len)
67  {
68  u32 to_copy = len;
69  clib_memcpy_fast (dst, &c->data[head_idx], n_chunk);
70  c = c->next;
71  while ((to_copy -= n_chunk))
72  {
73  CLIB_MEM_UNPOISON (c, sizeof (*c));
74  CLIB_MEM_UNPOISON (c->data, c->length);
75  n_chunk = clib_min (c->length, to_copy);
76  clib_memcpy_fast (dst + (len - to_copy), &c->data[0], n_chunk);
77  c = c->length <= to_copy ? c->next : c;
78  }
79  if (*last)
80  *last = c;
81  }
82  else
83  {
84  clib_memcpy_fast (dst, &c->data[head_idx], len);
85  }
86 }
87 
88 #ifndef CLIB_MARCH_VARIANT
89 
90 static inline void
92  const u8 * src, u32 len, svm_fifo_chunk_t ** last)
93 {
95  last);
96 }
97 
98 static inline void
101 {
103  last);
104 }
105 
106 static inline u32
108 {
109  return (s->start + s->length);
110 }
111 
112 void
114 {
115  pool_free (f->ooo_segments);
116 }
117 
118 static inline ooo_segment_t *
120 {
122  return 0;
123  return pool_elt_at_index (f->ooo_segments, s->prev);
124 }
125 
126 static inline ooo_segment_t *
128 {
130  return 0;
131  return pool_elt_at_index (f->ooo_segments, s->next);
132 }
133 
134 static inline ooo_segment_t *
135 ooo_segment_alloc (svm_fifo_t * f, u32 start, u32 length)
136 {
137  ooo_segment_t *s;
138 
139  pool_get (f->ooo_segments, s);
140 
141  s->start = start;
142  s->length = length;
144 
145  return s;
146 }
147 
148 static inline void
150 {
151  ooo_segment_t *cur, *prev = 0, *next = 0;
152  cur = pool_elt_at_index (f->ooo_segments, index);
153 
154  if (cur->next != OOO_SEGMENT_INVALID_INDEX)
155  {
156  next = pool_elt_at_index (f->ooo_segments, cur->next);
157  next->prev = cur->prev;
158  }
159 
160  if (cur->prev != OOO_SEGMENT_INVALID_INDEX)
161  {
162  prev = pool_elt_at_index (f->ooo_segments, cur->prev);
163  prev->next = cur->next;
164  }
165  else
166  {
167  f->ooos_list_head = cur->next;
168  }
169 
170  pool_put (f->ooo_segments, cur);
171 }
172 
173 /**
174  * Add segment to fifo's out-of-order segment list. Takes care of merging
175  * adjacent segments and removing overlapping ones.
176  */
177 static void
178 ooo_segment_add (svm_fifo_t * f, u32 offset, u32 head, u32 tail, u32 length)
179 {
180  ooo_segment_t *s, *new_s, *prev, *next, *it;
181  u32 new_index, s_end_pos, s_index;
182  u32 offset_pos, offset_end_pos;
183 
184  ASSERT (offset + length <= f_free_count (f, head, tail));
185 
186  offset_pos = tail + offset;
187  offset_end_pos = tail + offset + length;
188 
189  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
190 
191  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX)
192  {
193  s = ooo_segment_alloc (f, offset_pos, length);
194  f->ooos_list_head = s - f->ooo_segments;
195  f->ooos_newest = f->ooos_list_head;
196  return;
197  }
198 
199  /* Find first segment that starts after new segment */
200  s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
201  while (s->next != OOO_SEGMENT_INVALID_INDEX
202  && f_pos_lt (s->start, offset_pos))
203  s = pool_elt_at_index (f->ooo_segments, s->next);
204 
205  /* If we have a previous and we overlap it, use it as starting point */
206  prev = ooo_segment_prev (f, s);
207  if (prev && f_pos_leq (offset_pos, ooo_segment_end_pos (prev)))
208  {
209  s = prev;
210  s_end_pos = ooo_segment_end_pos (s);
211 
212  /* Since we have previous, offset start position cannot be smaller
213  * than prev->start. Check tail */
214  ASSERT (f_pos_lt (s->start, offset_pos));
215  goto check_tail;
216  }
217 
218  s_index = s - f->ooo_segments;
219  s_end_pos = ooo_segment_end_pos (s);
220 
221  /* No overlap, add before current segment */
222  if (f_pos_lt (offset_end_pos, s->start))
223  {
224  new_s = ooo_segment_alloc (f, offset_pos, length);
225  new_index = new_s - f->ooo_segments;
226 
227  /* Pool might've moved, get segment again */
228  s = pool_elt_at_index (f->ooo_segments, s_index);
230  {
231  new_s->prev = s->prev;
232  prev = pool_elt_at_index (f->ooo_segments, new_s->prev);
233  prev->next = new_index;
234  }
235  else
236  {
237  /* New head */
238  f->ooos_list_head = new_index;
239  }
240 
241  new_s->next = s_index;
242  s->prev = new_index;
243  f->ooos_newest = new_index;
244  return;
245  }
246  /* No overlap, add after current segment */
247  else if (f_pos_gt (offset_pos, s_end_pos))
248  {
249  new_s = ooo_segment_alloc (f, offset_pos, length);
250  new_index = new_s - f->ooo_segments;
251 
252  /* Pool might've moved, get segment again */
253  s = pool_elt_at_index (f->ooo_segments, s_index);
254 
255  /* Needs to be last */
257 
258  new_s->prev = s_index;
259  s->next = new_index;
260  f->ooos_newest = new_index;
261 
262  return;
263  }
264 
265  /*
266  * Merge needed
267  */
268 
269  /* Merge at head */
270  if (f_pos_lt (offset_pos, s->start))
271  {
272  s->start = offset_pos;
273  s->length = s_end_pos - s->start;
274  f->ooos_newest = s - f->ooo_segments;
275  }
276 
277 check_tail:
278 
279  /* Overlapping tail */
280  if (f_pos_gt (offset_end_pos, s_end_pos))
281  {
282  s->length = offset_end_pos - s->start;
283 
284  /* Remove the completely overlapped segments in the tail */
285  it = ooo_segment_next (f, s);
286  while (it && f_pos_leq (ooo_segment_end_pos (it), offset_end_pos))
287  {
288  next = ooo_segment_next (f, it);
289  ooo_segment_free (f, it - f->ooo_segments);
290  it = next;
291  }
292 
293  /* If partial overlap with last, merge */
294  if (it && f_pos_leq (it->start, offset_end_pos))
295  {
296  s->length = ooo_segment_end_pos (it) - s->start;
297  ooo_segment_free (f, it - f->ooo_segments);
298  }
299  f->ooos_newest = s - f->ooo_segments;
300  }
301 }
302 
303 /**
304  * Removes segments that can now be enqueued because the fifo's tail has
305  * advanced. Returns the number of bytes added to tail.
306  */
307 static int
308 ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued, u32 * tail)
309 {
310  u32 s_index, bytes = 0;
311  ooo_segment_t *s;
312  i32 diff;
313 
314  s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
315  diff = *tail - s->start;
316 
317  ASSERT (diff != n_bytes_enqueued);
318 
319  if (diff > n_bytes_enqueued)
320  return 0;
321 
322  /* If last tail update overlaps one/multiple ooo segments, remove them */
323  while (0 <= diff && diff < n_bytes_enqueued)
324  {
325  s_index = s - f->ooo_segments;
326 
327  /* Segment end is beyond the tail. Advance tail and remove segment */
328  if (s->length > diff)
329  {
330  bytes = s->length - diff;
331  *tail = *tail + bytes;
332  ooo_segment_free (f, s_index);
333  break;
334  }
335 
336  /* If we have next go on */
338  {
339  s = pool_elt_at_index (f->ooo_segments, s->next);
340  diff = *tail - s->start;
341  ooo_segment_free (f, s_index);
342  }
343  /* End of search */
344  else
345  {
346  ooo_segment_free (f, s_index);
347  break;
348  }
349  }
350 
351  ASSERT (bytes <= f->size);
352  return bytes;
353 }
354 
355 __clib_unused static ooo_segment_t *
357 {
358  ooo_segment_t *s;
359 
360  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX)
361  return 0;
362 
364  while (s->next != OOO_SEGMENT_INVALID_INDEX)
365  s = pool_elt_at_index (f->ooo_segments, s->next);
366  return s;
367 }
368 
369 void
371 {
372  svm_fifo_chunk_t *c, *prev;
373  u32 min_alloc;
374 
375  f->size = size;
376  f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX;
377  f->segment_index = SVM_FIFO_INVALID_INDEX;
378  f->refcnt = 1;
379  f->head = f->tail = f->flags = 0;
380  f->head_chunk = f->tail_chunk = f->start_chunk;
381  f->ooo_deq = f->ooo_enq = 0;
382 
383  min_alloc = size > 32 << 10 ? size >> 3 : 4096;
384  min_alloc = clib_min (min_alloc, 64 << 10);
385  f->min_alloc = min_alloc;
386 
387  /*
388  * Initialize chunks
389  */
390  f->start_chunk->start_byte = 0;
391  prev = f->start_chunk;
392  c = prev->next;
393 
394  while (c)
395  {
396  c->start_byte = prev->start_byte + prev->length;
397  prev = c;
398  c = c->next;
399  }
400 }
401 
402 void
404 {
405  if (ooo_type == 0)
406  {
407  ASSERT (!rb_tree_is_init (&f->ooo_enq_lookup));
408  rb_tree_init (&f->ooo_enq_lookup);
409  }
410  else
411  {
412  ASSERT (!rb_tree_is_init (&f->ooo_deq_lookup));
413  rb_tree_init (&f->ooo_deq_lookup);
414  }
415 }
416 
417 /**
418  * Creates a fifo in the current heap. Fails vs blow up the process
419  */
420 svm_fifo_t *
421 svm_fifo_alloc (u32 data_size_in_bytes)
422 {
423  u32 rounded_data_size;
425  svm_fifo_t *f;
426 
428  if (f == 0)
429  return 0;
430 
431  clib_memset (f, 0, sizeof (*f));
432 
433  /* always round fifo data size to the next highest power-of-two */
434  rounded_data_size = (1 << (max_log2 (data_size_in_bytes)));
435  c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_data_size,
437  if (!c)
438  {
439  clib_mem_free (f);
440  return 0;
441  }
442 
443  clib_memset (c, 0, sizeof (*c));
444  c->start_byte = 0;
445  c->length = data_size_in_bytes;
448  f->start_chunk = f->end_chunk = c;
449 
450  return f;
451 }
452 
453 /**
454  * Creates a fifo chunk in the current heap
455  */
458 {
460  u32 rounded_size;
461 
462  /* round chunk size to the next highest power-of-two */
463  rounded_size = (1 << (max_log2 (size)));
464  c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_size,
466  if (c == 0)
467  return 0;
468 
469  clib_memset (c, 0, sizeof (*c));
470  c->length = rounded_size;
471  return c;
472 }
473 
474 /**
475  * Find chunk for given byte position
476  *
477  * @param f fifo
478  * @param pos normalized position in fifo
479  *
480  * @return chunk that includes given position or 0
481  */
482 static svm_fifo_chunk_t *
484 {
486 
487  c = f->start_chunk;
488  while (c && !f_chunk_includes_pos (c, pos))
489  c = c->next;
490 
491  return c;
492 }
493 
494 static svm_fifo_chunk_t *
496 {
498 
499  ASSERT (start != 0);
500 
501  c = start;
502  while (c && !f_chunk_includes_pos (c, pos))
503  c = c->next;
504 
505  return c;
506 }
507 
508 u32
510 {
511  u32 head, tail, end_chunk;
512 
513  f_load_head_tail_cons (f, &head, &tail);
514  ASSERT (!f->head_chunk || f_chunk_includes_pos (f->head_chunk, head));
515 
516  if (!f->head_chunk)
517  {
518  f->head_chunk = svm_fifo_find_chunk (f, head);
519  if (PREDICT_FALSE (!f->head_chunk))
520  return 0;
521  }
522 
523  end_chunk = f_chunk_end (f->head_chunk);
524 
525  return f_pos_lt (end_chunk, tail) ? end_chunk - head : tail - head;
526 }
527 
528 u32
530 {
531  u32 head, tail;
532 
533  f_load_head_tail_prod (f, &head, &tail);
534  ASSERT (!f->tail_chunk || f_chunk_includes_pos (f->tail_chunk, tail));
535 
536  return f->tail_chunk ? f_chunk_end (f->tail_chunk) - tail : 0;
537 }
538 
539 static rb_node_t *
541 {
542  rb_node_t *cur, *prev;
543 
544  cur = rb_node (rt, rt->root);
545  if (PREDICT_FALSE (rb_node_is_tnil (rt, cur)))
546  return 0;
547 
548  while (pos != cur->key)
549  {
550  prev = cur;
551  if (f_pos_lt (pos, cur->key))
552  {
553  cur = rb_node_left (rt, cur);
554  if (rb_node_is_tnil (rt, cur))
555  {
556  cur = rb_tree_predecessor (rt, prev);
557  break;
558  }
559  }
560  else
561  {
562  cur = rb_node_right (rt, cur);
563  if (rb_node_is_tnil (rt, cur))
564  {
565  cur = prev;
566  break;
567  }
568  }
569  }
570 
571  if (rb_node_is_tnil (rt, cur))
572  return 0;
573 
574  return cur;
575 }
576 
577 static svm_fifo_chunk_t *
579 {
581  rb_node_t *n;
582 
583  if (!rb_tree_is_init (rt))
584  return 0;
585 
586  n = f_find_node_rbtree (rt, pos);
587  if (!n)
588  return 0;
590  if (f_chunk_includes_pos (c, pos))
591  return c;
592 
593  return 0;
594 }
595 
596 static void
597 f_update_ooo_enq (svm_fifo_t * f, u32 start_pos, u32 end_pos)
598 {
599  rb_tree_t *rt = &f->ooo_enq_lookup;
601  rb_node_t *cur;
602 
603  /* Use linear search if rbtree is not initialized */
604  if (PREDICT_FALSE (!rb_tree_is_init (rt)))
605  {
606  f->ooo_enq = svm_fifo_find_next_chunk (f, f->tail_chunk, start_pos);
607  return;
608  }
609 
610  if (rt->root == RBTREE_TNIL_INDEX)
611  {
612  c = f->tail_chunk;
616  }
617  else
618  {
619  cur = f_find_node_rbtree (rt, start_pos);
621  ASSERT (f_pos_leq (c->start_byte, start_pos));
622  }
623 
624  if (f_chunk_includes_pos (c, start_pos))
625  f->ooo_enq = c;
626 
627  if (f_chunk_includes_pos (c, end_pos))
628  return;
629 
630  do
631  {
632  c = c->next;
633  if (!c || c->enq_rb_index != RBTREE_TNIL_INDEX)
634  break;
635 
638 
639  if (f_chunk_includes_pos (c, start_pos))
640  f->ooo_enq = c;
641  }
642  while (!f_chunk_includes_pos (c, end_pos));
643 }
644 
645 static void
646 f_update_ooo_deq (svm_fifo_t * f, u32 start_pos, u32 end_pos)
647 {
648  rb_tree_t *rt = &f->ooo_deq_lookup;
649  rb_node_t *cur;
651 
652  /* Use linear search if rbtree is not initialized */
653  if (PREDICT_FALSE (!rb_tree_is_init (rt)))
654  {
655  f->ooo_deq = svm_fifo_find_chunk (f, start_pos);
656  return;
657  }
658 
659  if (rt->root == RBTREE_TNIL_INDEX)
660  {
661  c = f->start_chunk;
665  }
666  else
667  {
668  cur = f_find_node_rbtree (rt, start_pos);
670  ASSERT (f_pos_leq (c->start_byte, start_pos));
671  }
672 
673  if (f_chunk_includes_pos (c, start_pos))
674  f->ooo_deq = c;
675 
676  if (f_chunk_includes_pos (c, end_pos))
677  return;
678 
679  do
680  {
681  c = c->next;
682  if (!c || c->deq_rb_index != RBTREE_TNIL_INDEX)
683  break;
684 
687 
688  if (f_chunk_includes_pos (c, start_pos))
689  f->ooo_deq = c;
690  }
691  while (!f_chunk_includes_pos (c, end_pos));
692 }
693 
694 static svm_fifo_chunk_t *
696  u32 end_pos)
697 {
698  rb_tree_t *rt = &f->ooo_enq_lookup;
700  rb_node_t *n;
701 
702  c = start;
703  while (c && !f_chunk_includes_pos (c, end_pos))
704  {
706  {
707  n = rb_node (rt, c->enq_rb_index);
708  rb_tree_del_node (rt, n);
710  }
711 
712  c = c->next;
713  }
714 
715  /* No ooo segments left, so make sure the current chunk
716  * is not tracked in the enq rbtree */
717  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX
718  && c && c->enq_rb_index != RBTREE_TNIL_INDEX)
719  {
720  n = rb_node (rt, c->enq_rb_index);
721  rb_tree_del_node (rt, n);
723  }
724 
725  return c;
726 }
727 
728 static svm_fifo_chunk_t *
730  u32 end_pos)
731 {
732  rb_tree_t *rt = &f->ooo_deq_lookup;
734  rb_node_t *n;
735 
736  c = start;
737  while (c && !f_chunk_includes_pos (c, end_pos))
738  {
740  {
741  n = rb_node (rt, c->deq_rb_index);
742  rb_tree_del_node (rt, n);
744  }
745 
746  c = c->next;
747  }
748 
749  return c;
750 }
751 
752 void
754 {
755  rb_tree_free_nodes (&f->ooo_enq_lookup);
756  rb_tree_free_nodes (&f->ooo_deq_lookup);
757 }
758 
759 void
761 {
762  ASSERT (f->refcnt > 0);
763 
764  if (--f->refcnt == 0)
765  {
766  /* ooo data is not allocated on segment heap */
768  clib_mem_free (f);
769  }
770 }
771 
772 void
774 {
775  u32 n_chunk;
776  u32 head, tail, head_idx;
778 
779  ASSERT (len <= f->size);
780 
781  f_load_head_tail_cons (f, &head, &tail);
782 
783  if (!f->head_chunk)
784  f->head_chunk = svm_fifo_find_chunk (f, head);
785 
786  c = f->head_chunk;
787  head_idx = head - c->start_byte;
788  n_chunk = c->length - head_idx;
789  if (len <= n_chunk)
790  clib_memcpy_fast (&c->data[head_idx], src, len);
791  else
792  {
793  ASSERT (len - n_chunk <= c->next->length);
794  clib_memcpy_fast (&c->data[head_idx], src, n_chunk);
795  clib_memcpy_fast (&c->next->data[0], src + n_chunk, len - n_chunk);
796  }
797 }
798 
799 static int
801 {
802  svm_fifo_chunk_t *c, *cur, *prev;
803  u32 alloc_size, free_alloced;
804 
805  free_alloced = f_chunk_end (f->end_chunk) - tail;
806 
807  alloc_size = clib_min (f->min_alloc, f->size - (tail - head));
808  alloc_size = clib_max (alloc_size, len - free_alloced);
809 
810  c = fsh_alloc_chunk (f->fs_hdr, f->slice_index, alloc_size);
811  if (PREDICT_FALSE (!c))
812  return -1;
813 
814  cur = c;
815  prev = f->end_chunk;
816 
817  while (cur)
818  {
819  cur->start_byte = prev->start_byte + prev->length;
822 
823  prev = cur;
824  cur = cur->next;
825  }
826 
827  prev->next = 0;
828  f->end_chunk->next = c;
829  f->end_chunk = prev;
830 
831  if (!f->tail_chunk)
832  f->tail_chunk = c;
833 
834  return 0;
835 }
836 
837 int
839 {
840  u32 tail, head, free_count;
841  svm_fifo_chunk_t *old_tail_c;
842 
843  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
844 
845  f_load_head_tail_prod (f, &head, &tail);
846 
847  /* free space in fifo can only increase during enqueue: SPSC */
848  free_count = f_free_count (f, head, tail);
849 
850  if (PREDICT_FALSE (free_count == 0))
851  return SVM_FIFO_EFULL;
852 
853  /* number of bytes we're going to copy */
854  len = clib_min (free_count, len);
855 
856  if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk)))
857  {
858  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
859  {
860  len = f_chunk_end (f->end_chunk) - tail;
861  if (!len)
862  return SVM_FIFO_EGROW;
863  }
864  }
865 
866  old_tail_c = f->tail_chunk;
867 
868  svm_fifo_copy_to_chunk (f, f->tail_chunk, tail, src, len, &f->tail_chunk);
869  tail = tail + len;
870 
871  svm_fifo_trace_add (f, head, len, 2);
872 
873  /* collect out-of-order segments */
874  if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX))
875  {
876  len += ooo_segment_try_collect (f, len, &tail);
877  /* Tail chunk might've changed even if nothing was collected */
878  f->tail_chunk = f_lookup_clear_enq_chunks (f, old_tail_c, tail);
879  f->ooo_enq = 0;
880  }
881 
882  /* store-rel: producer owned index (paired with load-acq in consumer) */
883  clib_atomic_store_rel_n (&f->tail, tail);
884 
885  return len;
886 }
887 
888 /**
889  * Enqueue a future segment.
890  *
891  * Two choices: either copies the entire segment, or copies nothing
892  * Returns 0 of the entire segment was copied
893  * Returns -1 if none of the segment was copied due to lack of space
894  */
895 int
897 {
898  u32 tail, head, free_count, enq_pos;
899 
900  f_load_head_tail_prod (f, &head, &tail);
901 
902  /* free space in fifo can only increase during enqueue: SPSC */
903  free_count = f_free_count (f, head, tail);
904  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
905 
906  /* will this request fit? */
907  if ((len + offset) > free_count)
908  return SVM_FIFO_EFULL;
909 
910  enq_pos = tail + offset;
911 
912  if (f_pos_gt (enq_pos + len, f_chunk_end (f->end_chunk)))
913  {
914  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, offset + len)))
915  return SVM_FIFO_EGROW;
916  }
917 
918  svm_fifo_trace_add (f, offset, len, 1);
919  ooo_segment_add (f, offset, head, tail, len);
920 
921  if (!f->ooo_enq || !f_chunk_includes_pos (f->ooo_enq, enq_pos))
922  f_update_ooo_enq (f, enq_pos, enq_pos + len);
923 
924  svm_fifo_copy_to_chunk (f, f->ooo_enq, enq_pos, src, len, &f->ooo_enq);
925 
926  return 0;
927 }
928 
929 /**
930  * Advance tail
931  */
932 void
934 {
935  u32 tail;
936 
937  ASSERT (len <= svm_fifo_max_enqueue_prod (f));
938  /* load-relaxed: producer owned index */
939  tail = f->tail;
940  tail = tail + len;
941 
942  if (rb_tree_is_init (&f->ooo_enq_lookup))
943  {
944  f->tail_chunk = f_lookup_clear_enq_chunks (f, f->tail_chunk, tail);
945  f->ooo_enq = 0;
946  }
947  else
948  {
949  f->tail_chunk = svm_fifo_find_next_chunk (f, f->tail_chunk, tail);
950  }
951 
952  /* store-rel: producer owned index (paired with load-acq in consumer) */
953  clib_atomic_store_rel_n (&f->tail, tail);
954 }
955 
957 f_unlink_chunks (svm_fifo_t * f, u32 end_pos, u8 maybe_ooo)
958 {
959  svm_fifo_chunk_t *start, *prev = 0, *c;
960  rb_tree_t *rt;
961  rb_node_t *n;
962 
963  ASSERT (!f_chunk_includes_pos (f->start_chunk, end_pos));
964 
965  if (maybe_ooo)
966  rt = &f->ooo_deq_lookup;
967 
968  c = f->start_chunk;
969 
970  do
971  {
972  if (maybe_ooo && c->deq_rb_index != RBTREE_TNIL_INDEX)
973  {
974  n = rb_node (rt, c->deq_rb_index);
975  ASSERT (n == f_find_node_rbtree (rt, c->start_byte));
976  rb_tree_del_node (rt, n);
977  c->deq_rb_index = RBTREE_TNIL_INDEX;
978  }
979  if (!c->next)
980  break;
981  prev = c;
982  c = c->next;
983  }
984  while (!f_chunk_includes_pos (c, end_pos));
985 
986  if (maybe_ooo)
987  {
988  if (f->ooo_deq && f_pos_lt (f->ooo_deq->start_byte, f_chunk_end (c)))
989  f->ooo_deq = 0;
990  }
991  else
992  {
993  if (PREDICT_FALSE (f->ooo_deq != 0))
994  f->ooo_deq = 0;
995  }
996 
997  /* Avoid unlinking the last chunk */
998  if (!prev)
999  return 0;
1000 
1001  prev->next = 0;
1002  start = f->start_chunk;
1003  f->start_chunk = c;
1004 
1005  return start;
1006 }
1007 
1008 int
1010 {
1011  u32 tail, head, cursize;
1012 
1013  f_load_head_tail_cons (f, &head, &tail);
1014 
1015  /* current size of fifo can only increase during dequeue: SPSC */
1016  cursize = f_cursize (f, head, tail);
1017 
1018  if (PREDICT_FALSE (cursize == 0))
1019  return SVM_FIFO_EEMPTY;
1020 
1021  len = clib_min (cursize, len);
1022 
1023  if (!f->head_chunk)
1024  f->head_chunk = svm_fifo_find_chunk (f, head);
1025 
1026  svm_fifo_copy_from_chunk (f, f->head_chunk, head, dst, len, &f->head_chunk);
1027  head = head + len;
1028 
1029  /* In order dequeues are not supported in combination with ooo peeking.
1030  * Use svm_fifo_dequeue_drop instead. */
1031  ASSERT (rb_tree_n_nodes (&f->ooo_deq_lookup) <= 1);
1032 
1033  if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
1034  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1035  f_unlink_chunks (f, head, 0));
1036 
1037  /* store-rel: consumer owned index (paired with load-acq in producer) */
1038  clib_atomic_store_rel_n (&f->head, head);
1039 
1040  return len;
1041 }
1042 
1043 int
1045 {
1046  u32 tail, head, cursize, head_idx;
1047 
1048  f_load_head_tail_cons (f, &head, &tail);
1049 
1050  /* current size of fifo can only increase during peek: SPSC */
1051  cursize = f_cursize (f, head, tail);
1052 
1053  if (PREDICT_FALSE (cursize < offset))
1054  return SVM_FIFO_EEMPTY;
1055 
1056  len = clib_min (cursize - offset, len);
1057  head_idx = head + offset;
1058 
1059  CLIB_MEM_UNPOISON (f->ooo_deq, sizeof (*f->ooo_deq));
1060  if (!f->ooo_deq || !f_chunk_includes_pos (f->ooo_deq, head_idx))
1061  f_update_ooo_deq (f, head_idx, head_idx + len);
1062 
1063  svm_fifo_copy_from_chunk (f, f->ooo_deq, head_idx, dst, len, &f->ooo_deq);
1064  return len;
1065 }
1066 
1067 int
1069 {
1070  u32 total_drop_bytes, tail, head, cursize;
1071 
1072  f_load_head_tail_cons (f, &head, &tail);
1073 
1074  /* number of bytes available */
1075  cursize = f_cursize (f, head, tail);
1076  if (PREDICT_FALSE (cursize == 0))
1077  return SVM_FIFO_EEMPTY;
1078 
1079  /* number of bytes we're going to drop */
1080  total_drop_bytes = clib_min (cursize, len);
1081 
1082  svm_fifo_trace_add (f, tail, total_drop_bytes, 3);
1083 
1084  /* move head */
1085  head = head + total_drop_bytes;
1086 
1087  if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
1088  {
1089  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1090  f_unlink_chunks (f, head, 1));
1091  f->head_chunk =
1092  f_chunk_includes_pos (f->start_chunk, head) ? f->start_chunk : 0;
1093  }
1094 
1095  /* store-rel: consumer owned index (paired with load-acq in producer) */
1096  clib_atomic_store_rel_n (&f->head, head);
1097 
1098  return total_drop_bytes;
1099 }
1100 
1101 /**
1102  * Drop all data from fifo
1103  *
1104  */
1105 void
1107 {
1108  u32 head, tail;
1109 
1110  f_load_head_tail_all_acq (f, &head, &tail);
1111 
1112  if (!f->head_chunk || !f_chunk_includes_pos (f->head_chunk, head))
1113  f->head_chunk = svm_fifo_find_chunk (f, head);
1114 
1115  f->head_chunk = f_lookup_clear_deq_chunks (f, f->head_chunk, tail);
1116 
1117  if (f_pos_geq (tail, f_chunk_end (f->start_chunk)))
1118  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1119  f_unlink_chunks (f, tail, 0));
1120 
1121  /* store-rel: consumer owned index (paired with load-acq in producer) */
1122  clib_atomic_store_rel_n (&f->head, tail);
1123 }
1124 
1125 int
1127 {
1128  u32 head, tail;
1129 
1130  f_load_head_tail_prod (f, &head, &tail);
1131 
1132  if (f_chunk_end (f->end_chunk) - head >= f->size)
1133  return 0;
1134 
1135  if (f_try_chunk_alloc (f, head, tail, f->size - (tail - head)))
1136  return SVM_FIFO_EGROW;
1137 
1138  return 0;
1139 }
1140 
1141 int
1143 {
1144  u32 cursize, head, tail, head_idx;
1145 
1146  f_load_head_tail_cons (f, &head, &tail);
1147 
1148  /* consumer function, cursize can only increase while we're working */
1149  cursize = f_cursize (f, head, tail);
1150 
1151  if (PREDICT_FALSE (cursize == 0))
1152  return SVM_FIFO_EEMPTY;
1153 
1154  head_idx = head;
1155 
1156  if (tail < head)
1157  {
1158  fs[0].len = f->size - head_idx;
1159  fs[0].data = f->head_chunk->data + head_idx;
1160  fs[1].len = cursize - fs[0].len;
1161  fs[1].data = f->head_chunk->data;
1162  }
1163  else
1164  {
1165  fs[0].len = cursize;
1166  fs[0].data = f->head_chunk->data + head_idx;
1167  fs[1].len = 0;
1168  fs[1].data = 0;
1169  }
1170  return cursize;
1171 }
1172 
1173 void
1175 {
1176  u32 head;
1177 
1178  /* consumer owned index */
1179  head = f->head;
1180 
1181  ASSERT (fs[0].data == f->head_chunk->data + head);
1182  head = (head + fs[0].len + fs[1].len);
1183  /* store-rel: consumer owned index (paired with load-acq in producer) */
1184  clib_atomic_store_rel_n (&f->head, head);
1185 }
1186 
1187 /**
1188  * Clones fifo
1189  *
1190  * Assumptions:
1191  * - no prod and cons are accessing either dest or src fifo
1192  * - fifo is not multi chunk
1193  */
1194 void
1196 {
1197  u32 head, tail;
1198 
1199  /* Support only single chunk clones for now */
1200  ASSERT (svm_fifo_n_chunks (sf) == 1);
1201 
1202  clib_memcpy_fast (df->head_chunk->data, sf->head_chunk->data, sf->size);
1203 
1204  f_load_head_tail_all_acq (sf, &head, &tail);
1205  clib_atomic_store_rel_n (&df->head, head);
1206  clib_atomic_store_rel_n (&df->tail, tail);
1207 }
1208 
1209 u32
1211 {
1212  return pool_elts (f->ooo_segments);
1213 }
1214 
1215 ooo_segment_t *
1217 {
1218  return pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
1219 }
1220 
1221 /**
1222  * Set fifo pointers to requested offset
1223  */
1224 void
1226 {
1228 
1229  clib_atomic_store_rel_n (&f->head, head);
1230  clib_atomic_store_rel_n (&f->tail, tail);
1231 
1232  c = svm_fifo_find_chunk (f, head);
1233  ASSERT (c != 0);
1234  f->head_chunk = f->ooo_deq = c;
1235  c = svm_fifo_find_chunk (f, tail);
1236  ASSERT (c != 0);
1237  f->tail_chunk = f->ooo_enq = c;
1238 }
1239 
1240 void
1242 {
1243  if (f->n_subscribers >= SVM_FIFO_MAX_EVT_SUBSCRIBERS)
1244  return;
1245  f->subscribers[f->n_subscribers++] = subscriber;
1246 }
1247 
1248 void
1250 {
1251  int i;
1252 
1253  for (i = 0; i < f->n_subscribers; i++)
1254  {
1255  if (f->subscribers[i] != subscriber)
1256  continue;
1257  f->subscribers[i] = f->subscribers[f->n_subscribers - 1];
1258  f->n_subscribers--;
1259  break;
1260  }
1261 }
1262 
1263 u8
1265 {
1266  svm_fifo_chunk_t *tmp;
1267 
1268  if (f->head_chunk && !f_chunk_includes_pos (f->head_chunk, f->head))
1269  return 0;
1270  if (f->tail_chunk && !f_chunk_includes_pos (f->tail_chunk, f->tail))
1271  return 0;
1272  if (f->ooo_deq)
1273  {
1274  if (rb_tree_is_init (&f->ooo_deq_lookup))
1275  {
1276  if (f_pos_lt (f->ooo_deq->start_byte, f->start_chunk->start_byte)
1277  || f_pos_gt (f->ooo_deq->start_byte,
1278  f_chunk_end (f->end_chunk)))
1279  return 0;
1280 
1281  tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup,
1282  f->ooo_deq->start_byte);
1283  }
1284  else
1285  tmp = svm_fifo_find_chunk (f, f->ooo_deq->start_byte);
1286  if (tmp != f->ooo_deq)
1287  return 0;
1288  }
1289  if (f->ooo_enq)
1290  {
1291  if (rb_tree_is_init (&f->ooo_enq_lookup))
1292  {
1293  if (f_pos_lt (f->ooo_enq->start_byte, f->start_chunk->start_byte)
1294  || f_pos_gt (f->ooo_enq->start_byte,
1295  f_chunk_end (f->end_chunk)))
1296  return 0;
1297 
1298  tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup,
1299  f->ooo_enq->start_byte);
1300  }
1301  else
1302  {
1303  tmp = svm_fifo_find_next_chunk (f, f->tail_chunk,
1304  f->ooo_enq->start_byte);
1305  }
1306  if (tmp != f->ooo_enq)
1307  return 0;
1308  }
1309 
1310  if (f->start_chunk->next)
1311  {
1312  svm_fifo_chunk_t *c, *prev = 0, *tmp;
1313  u32 chunks_bytes = 0;
1314 
1315  c = f->start_chunk;
1316  do
1317  {
1318  tmp = svm_fifo_find_chunk (f, c->start_byte);
1319  if (tmp != c)
1320  return 0;
1321  if (prev && (prev->start_byte + prev->length != c->start_byte))
1322  return 0;
1323 
1324  if (c->enq_rb_index != RBTREE_TNIL_INDEX)
1325  {
1326  tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup, c->start_byte);
1327  if (tmp)
1328  {
1329  if (tmp != c)
1330  return 0;
1331  }
1332  }
1333  if (c->deq_rb_index != RBTREE_TNIL_INDEX)
1334  {
1335  tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup, c->start_byte);
1336  if (tmp)
1337  {
1338  if (tmp != c)
1339  return 0;
1340  }
1341  }
1342 
1343  chunks_bytes += c->length;
1344  prev = c;
1345  c = c->next;
1346  }
1347  while (c);
1348 
1349  if (chunks_bytes < f->tail - f->head)
1350  return 0;
1351  }
1352 
1353  return 1;
1354 }
1355 
1356 u32
1358 {
1360  int n_chunks = 0;
1361 
1362  c = f->start_chunk;
1363  while (c)
1364  {
1365  n_chunks++;
1366  c = c->next;
1367  }
1368 
1369  return n_chunks;
1370 }
1371 
1372 u8 *
1373 format_ooo_segment (u8 * s, va_list * args)
1374 {
1375  svm_fifo_t __clib_unused *f = va_arg (*args, svm_fifo_t *);
1376  ooo_segment_t *seg = va_arg (*args, ooo_segment_t *);
1377  s = format (s, "[%u, %u], len %u, next %d, prev %d", seg->start,
1378  seg->start + seg->length, seg->length, seg->next, seg->prev);
1379  return s;
1380 }
1381 
1382 u8 *
1384 {
1385 #if SVM_FIFO_TRACE
1386  svm_fifo_trace_elem_t *seg = 0;
1387  int i = 0;
1388 
1389  if (f->trace)
1390  {
1391  vec_foreach (seg, f->trace)
1392  {
1393  s = format (s, "{%u, %u, %u}, ", seg->offset, seg->len, seg->action);
1394  i++;
1395  if (i % 5 == 0)
1396  s = format (s, "\n");
1397  }
1398  s = format (s, "\n");
1399  }
1400  return s;
1401 #else
1402  return 0;
1403 #endif
1404 }
1405 
1406 u8 *
1407 svm_fifo_replay (u8 * s, svm_fifo_t * f, u8 no_read, u8 verbose)
1408 {
1409  int i, trace_len;
1410  u8 *data = 0;
1412  u32 offset;
1413  svm_fifo_t *placeholder_fifo;
1414 
1415  if (!f)
1416  return s;
1417 
1418 #if SVM_FIFO_TRACE
1419  trace = f->trace;
1420  trace_len = vec_len (trace);
1421 #else
1422  trace = 0;
1423  trace_len = 0;
1424 #endif
1425 
1426  placeholder_fifo = svm_fifo_alloc (f->size);
1427  svm_fifo_init (f, f->size);
1428  clib_memset (f->head_chunk->data, 0xFF, f->size);
1429  vec_validate (data, f->size);
1430  for (i = 0; i < vec_len (data); i++)
1431  data[i] = i;
1432 
1433  for (i = 0; i < trace_len; i++)
1434  {
1435  offset = trace[i].offset;
1436  if (trace[i].action == 1)
1437  {
1438  if (verbose)
1439  s = format (s, "adding [%u, %u]:", trace[i].offset,
1440  (trace[i].offset + trace[i].len));
1441  svm_fifo_enqueue_with_offset (placeholder_fifo, trace[i].offset,
1442  trace[i].len, &data[offset]);
1443  }
1444  else if (trace[i].action == 2)
1445  {
1446  if (verbose)
1447  s = format (s, "adding [%u, %u]:", 0, trace[i].len);
1448  svm_fifo_enqueue (placeholder_fifo, trace[i].len, &data[offset]);
1449  }
1450  else if (!no_read)
1451  {
1452  if (verbose)
1453  s = format (s, "read: %u", trace[i].len);
1454  svm_fifo_dequeue_drop (placeholder_fifo, trace[i].len);
1455  }
1456  if (verbose)
1457  s = format (s, "%U", format_svm_fifo, placeholder_fifo, 1);
1458  }
1459 
1460  s = format (s, "result: %U", format_svm_fifo, placeholder_fifo, 1);
1461 
1462  return s;
1463 }
1464 
1465 u8 *
1466 format_ooo_list (u8 * s, va_list * args)
1467 {
1468  svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
1469  u32 indent = va_arg (*args, u32);
1470  u32 ooo_segment_index = f->ooos_list_head;
1471  ooo_segment_t *seg;
1472 
1473  while (ooo_segment_index != OOO_SEGMENT_INVALID_INDEX)
1474  {
1475  seg = pool_elt_at_index (f->ooo_segments, ooo_segment_index);
1476  s = format (s, "%U%U\n", format_white_space, indent, format_ooo_segment,
1477  f, seg);
1478  ooo_segment_index = seg->next;
1479  }
1480 
1481  return s;
1482 }
1483 
1484 u8 *
1485 format_svm_fifo (u8 * s, va_list * args)
1486 {
1487  svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
1488  int verbose = va_arg (*args, int);
1489  u32 indent;
1490 
1491  if (!s)
1492  return s;
1493 
1494  indent = format_get_indent (s);
1495  s = format (s, "cursize %u nitems %u has_event %d min_alloc %u\n",
1496  svm_fifo_max_dequeue (f), f->size, f->has_event, f->min_alloc);
1497  s = format (s, "%Uhead %u tail %u segment manager %u\n", format_white_space,
1498  indent, f->head, f->tail, f->segment_manager);
1499 
1500  if (verbose > 1)
1501  s = format (s, "%Uvpp session %d thread %d app session %d thread %d\n",
1502  format_white_space, indent, f->master_session_index,
1503  f->master_thread_index, f->client_session_index,
1504  f->client_thread_index);
1505 
1506  if (verbose)
1507  {
1508  s = format (s, "%Uooo pool %d active elts newest %u\n",
1509  format_white_space, indent, pool_elts (f->ooo_segments),
1510  f->ooos_newest);
1511  if (svm_fifo_has_ooo_data (f))
1512  s = format (s, " %U", format_ooo_list, f, indent, verbose);
1513  }
1514  return s;
1515 }
1516 
1517 #endif
1518 /*
1519  * fd.io coding-style-patch-verification: ON
1520  *
1521  * Local Variables:
1522  * eval: (c-set-style "gnu")
1523  * End:
1524  */
u32 length
length of chunk in bytes
Definition: fifo_types.h:32
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
static void svm_fifo_copy_to_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:91
rb_node_t * rb_tree_predecessor(rb_tree_t *rt, rb_node_t *x)
Definition: rbtree.c:287
static void f_load_head_tail_all_acq(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail independent of producer/consumer role.
Definition: svm_fifo.h:107
#define CLIB_MEM_UNPOISON(a, s)
Definition: sanitizer.h:47
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:899
#define clib_min(x, y)
Definition: clib.h:327
static int ooo_segment_try_collect(svm_fifo_t *f, u32 n_bytes_enqueued, u32 *tail)
Removes segments that can now be enqueued because the fifo&#39;s tail has advanced.
Definition: svm_fifo.c:308
int svm_fifo_segments(svm_fifo_t *f, svm_fifo_seg_t *fs)
Definition: svm_fifo.c:1142
static u32 svm_fifo_max_enqueue_prod(svm_fifo_t *f)
Maximum number of bytes that can be enqueued into fifo.
Definition: svm_fifo.h:524
static ooo_segment_t * ooo_segment_next(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.c:127
static rb_node_t * rb_node_left(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:94
void svm_fifo_free_chunk_lookup(svm_fifo_t *f)
Cleanup fifo chunk lookup rb tree.
Definition: svm_fifo.c:753
static u8 svm_fifo_has_ooo_data(svm_fifo_t *f)
Check if fifo has out-of-order data.
Definition: svm_fifo.h:630
void svm_fifo_init_pointers(svm_fifo_t *f, u32 head, u32 tail)
Set fifo pointers to requested offset.
Definition: svm_fifo.c:1225
static u32 f_free_count(svm_fifo_t *f, u32 head, u32 tail)
Fifo free bytes, i.e., number of free bytes.
Definition: svm_fifo.h:132
void svm_fifo_segments_free(svm_fifo_t *f, svm_fifo_seg_t *fs)
Definition: svm_fifo.c:1174
void svm_fifo_free(svm_fifo_t *f)
Free fifo and associated state.
Definition: svm_fifo.c:760
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u32 prev
Previous linked-list element pool index.
Definition: fifo_types.h:42
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static void f_load_head_tail_cons(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for consumer.
Definition: svm_fifo.h:80
static void svm_fifo_copy_from_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 head_idx, u8 *dst, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:99
#define CLIB_MARCH_FN_SELECT(fn)
Definition: cpu.h:421
svm_fifo_chunk_t * fsh_alloc_chunk(fifo_segment_header_t *fsh, u32 slice_index, u32 chunk_size)
Allocate chunks in fifo segment.
Definition: fifo_segment.c:695
vl_api_address_t src
Definition: gre.api:54
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
int svm_fifo_peek(svm_fifo_t *f, u32 offset, u32 len, u8 *dst)
Peek data from fifo.
Definition: svm_fifo.c:1044
static svm_fifo_chunk_t * f_lookup_clear_deq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
Definition: svm_fifo.c:729
void svm_fifo_enqueue_nocopy(svm_fifo_t *f, u32 len)
Advance tail.
Definition: svm_fifo.c:933
void svm_fifo_init(svm_fifo_t *f, u32 size)
Initialize fifo.
Definition: svm_fifo.c:370
static rb_node_t * rb_node(rb_tree_t *rt, rb_node_index_t ri)
Definition: rbtree.h:82
static u32 format_get_indent(u8 *s)
Definition: format.h:72
#define RBTREE_TNIL_INDEX
Definition: rbtree.h:22
ooo_segment_t * svm_fifo_first_ooo_segment(svm_fifo_t *f)
First out-of-order segment for fifo.
Definition: svm_fifo.c:1216
void svm_fifo_dequeue_drop_all(svm_fifo_t *f)
Drop all data from fifo.
Definition: svm_fifo.c:1106
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static rb_node_t * rb_node_right(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:88
u32 rb_tree_n_nodes(rb_tree_t *rt)
Definition: rbtree.c:470
void svm_fifo_overwrite_head(svm_fifo_t *f, u8 *src, u32 len)
Overwrite fifo head with new data.
Definition: svm_fifo.c:773
#define SVM_FIFO_INVALID_INDEX
Definition: svm_fifo.h:30
void rb_tree_free_nodes(rb_tree_t *rt)
Definition: rbtree.c:476
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:252
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:89
int rb_tree_is_init(rb_tree_t *rt)
Definition: rbtree.c:496
static ooo_segment_t * ooo_segment_alloc(svm_fifo_t *f, u32 start, u32 length)
Definition: svm_fifo.c:135
static svm_fifo_chunk_t * svm_fifo_find_chunk(svm_fifo_t *f, u32 pos)
Find chunk for given byte position.
Definition: svm_fifo.c:483
void svm_fifo_clone(svm_fifo_t *df, svm_fifo_t *sf)
Clones fifo.
Definition: svm_fifo.c:1195
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
Fifo max bytes to dequeue.
Definition: svm_fifo.h:433
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
u32 svm_fifo_n_chunks(svm_fifo_t *f)
Number of chunks linked into the fifo.
Definition: svm_fifo.c:1357
svm_fifo_chunk_t * svm_fifo_chunk_alloc(u32 size)
Creates a fifo chunk in the current heap.
Definition: svm_fifo.c:457
u8 * format_ooo_list(u8 *s, va_list *args)
Definition: svm_fifo.c:1466
void svm_fifo_free_ooo_data(svm_fifo_t *f)
Cleanup fifo ooo data.
Definition: svm_fifo.c:113
static void ooo_segment_free(svm_fifo_t *f, u32 index)
Definition: svm_fifo.c:149
unsigned int u32
Definition: types.h:88
int svm_fifo_dequeue(svm_fifo_t *f, u32 len, u8 *dst)
Dequeue data from fifo.
Definition: svm_fifo.c:1009
u32 key
node key
Definition: rbtree.h:38
static svm_fifo_chunk_t * f_unlink_chunks(svm_fifo_t *f, u32 end_pos, u8 maybe_ooo)
Definition: svm_fifo.c:957
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
static int f_pos_gt(u32 a, u32 b)
Definition: svm_fifo.h:156
uword opaque
value stored by node
Definition: rbtree.h:39
static ooo_segment_t * ooo_segment_prev(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.c:119
struct svm_fifo_chunk_ * next
pointer to next chunk in linked-lists
Definition: fifo_types.h:33
void fsh_collect_chunks(fifo_segment_header_t *fsh, u32 slice_index, svm_fifo_chunk_t *c)
Return chunks to fifo segment.
Definition: fifo_segment.c:805
u32 size
Definition: vhost_user.h:106
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:302
int svm_fifo_enqueue(svm_fifo_t *f, u32 len, const u8 *src)
Enqueue data to fifo.
Definition: svm_fifo.c:838
#define SVM_FIFO_MAX_EVT_SUBSCRIBERS
Definition: fifo_types.h:25
#define PREDICT_FALSE(x)
Definition: clib.h:120
void rb_tree_init(rb_tree_t *rt)
Definition: rbtree.c:483
#define always_inline
Definition: ipsec.h:28
static u32 f_chunk_end(svm_fifo_chunk_t *c)
Definition: svm_fifo.h:138
#define svm_fifo_trace_add(_f, _s, _l, _t)
Definition: svm_fifo.h:68
vl_api_address_t dst
Definition: gre.api:55
u8 * svm_fifo_replay(u8 *s, svm_fifo_t *f, u8 no_read, u8 verbose)
Definition: svm_fifo.c:1407
CLIB_MARCH_FN(svm_fifo_copy_to_chunk, void, svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:24
u8 len
Definition: ip_types.api:92
void svm_fifo_add_subscriber(svm_fifo_t *f, u8 subscriber)
Add io events subscriber to list.
Definition: svm_fifo.c:1241
#define pool_free(p)
Free a pool.
Definition: pool.h:427
svmdb_client_t * c
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static u32 f_cursize(svm_fifo_t *f, u32 head, u32 tail)
Fifo current size, i.e., number of bytes enqueued.
Definition: svm_fifo.h:121
static void f_load_head_tail_prod(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for producer.
Definition: svm_fifo.h:93
u32 start_byte
chunk start byte
Definition: fifo_types.h:31
u8 svm_fifo_is_sane(svm_fifo_t *f)
Check if fifo is sane.
Definition: svm_fifo.c:1264
static svm_fifo_chunk_t * svm_fifo_find_next_chunk(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 pos)
Definition: svm_fifo.c:495
#define OOO_SEGMENT_INVALID_INDEX
Definition: svm_fifo.h:28
u8 * format_ooo_segment(u8 *s, va_list *args)
Definition: svm_fifo.c:1373
static svm_fifo_chunk_t * f_lookup_clear_enq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
Definition: svm_fifo.c:695
u32 svm_fifo_n_ooo_segments(svm_fifo_t *f)
Number of out-of-order segments for fifo.
Definition: svm_fifo.c:1210
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
Definition: mem.h:181
signed int i32
Definition: types.h:77
#define uword_to_pointer(u, type)
Definition: types.h:136
u8 * format_svm_fifo(u8 *s, va_list *args)
Definition: svm_fifo.c:1485
#define ASSERT(truth)
static u8 f_chunk_includes_pos(svm_fifo_chunk_t *c, u32 pos)
Definition: svm_fifo.h:168
static int f_pos_geq(u32 a, u32 b)
Definition: svm_fifo.h:162
rb_node_index_t deq_rb_index
deq node index if chunk in rbtree
Definition: fifo_types.h:35
static void clib_mem_free(void *p)
Definition: mem.h:215
u8 data[0]
start of chunk data
Definition: fifo_types.h:36
void svm_fifo_del_subscriber(svm_fifo_t *f, u8 subscriber)
Remove io events subscriber form list.
Definition: svm_fifo.c:1249
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, u32 offset, u32 len, u8 *src)
Enqueue a future segment.
Definition: svm_fifo.c:896
static u32 ooo_segment_end_pos(ooo_segment_t *s)
Definition: svm_fifo.c:107
static void f_update_ooo_deq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
Definition: svm_fifo.c:646
static uword pointer_to_uword(const void *p)
Definition: types.h:131
#define clib_atomic_store_rel_n(a, b)
Definition: atomics.h:49
#define clib_max(x, y)
Definition: clib.h:320
u32 length
Length of segment.
Definition: fifo_types.h:44
u8 * svm_fifo_dump_trace(u8 *s, svm_fifo_t *f)
Definition: svm_fifo.c:1383
u32 next
Next linked-list element pool index.
Definition: fifo_types.h:41
u32 svm_fifo_max_read_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be read.
Definition: svm_fifo.c:509
static int f_pos_leq(u32 a, u32 b)
Definition: svm_fifo.h:150
template key/value backing page structure
Definition: bihash_doc.h:44
int svm_fifo_fill_chunk_list(svm_fifo_t *f)
Ensure the whole fifo size is writeable.
Definition: svm_fifo.c:1126
static u8 rb_node_is_tnil(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:76
vl_api_mac_event_action_t action
Definition: l2.api:181
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
Definition: clib.h:208
static rb_node_t * f_find_node_rbtree(rb_tree_t *rt, u32 pos)
Definition: svm_fifo.c:540
u32 index
Definition: flow_types.api:221
static svm_fifo_chunk_t * f_find_chunk_rbtree(rb_tree_t *rt, u32 pos)
Definition: svm_fifo.c:578
rb_node_index_t enq_rb_index
enq node index if chunk in rbtree
Definition: fifo_types.h:34
struct clib_bihash_value offset
template key/value backing page structure
static int f_try_chunk_alloc(svm_fifo_t *f, u32 head, u32 tail, u32 len)
Definition: svm_fifo.c:800
static __clib_unused ooo_segment_t * ooo_segment_last(svm_fifo_t *f)
Definition: svm_fifo.c:356
#define vec_foreach(var, vec)
Vector iterator.
save_rewrite_length must be aligned so that reass doesn t overwrite it
Definition: buffer.h:401
u32 svm_fifo_max_write_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be written.
Definition: svm_fifo.c:529
void rb_tree_del_node(rb_tree_t *rt, rb_node_t *z)
Definition: rbtree.c:445
rb_node_index_t rb_tree_add_custom(rb_tree_t *rt, u32 key, uword opaque, rb_tree_lt_fn ltfn)
Definition: rbtree.c:195
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 len)
Dequeue and drop bytes from fifo.
Definition: svm_fifo.c:1068
struct _svm_fifo svm_fifo_t
rb_node_index_t root
root index
Definition: rbtree.h:45
u32 start
Start of segment, normalized.
Definition: fifo_types.h:43
void svm_fifo_init_ooo_lookup(svm_fifo_t *f, u8 ooo_type)
Initialize rbtrees used for ooo lookups.
Definition: svm_fifo.c:403
static int f_pos_lt(u32 a, u32 b)
Definition: svm_fifo.h:144
static void ooo_segment_add(svm_fifo_t *f, u32 offset, u32 head, u32 tail, u32 length)
Add segment to fifo&#39;s out-of-order segment list.
Definition: svm_fifo.c:178
svm_fifo_t * svm_fifo_alloc(u32 data_size_in_bytes)
Creates a fifo in the current heap.
Definition: svm_fifo.c:421
static void f_update_ooo_enq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
Definition: svm_fifo.c:597
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128