FD.io VPP  v21.01.1
Vector Packet Processing
tcp_bt.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  *
15  * TCP byte tracker that can generate delivery rate estimates. Based on
16  * draft-cheng-iccrg-delivery-rate-estimation-00
17  */
18 
19 #include <vnet/tcp/tcp_bt.h>
20 #include <vnet/tcp/tcp.h>
21 #include <vnet/tcp/tcp_inlines.h>
22 
23 static tcp_bt_sample_t *
25 {
26  if (pool_is_free_index (bt->samples, bts_index))
27  return 0;
28  return pool_elt_at_index (bt->samples, bts_index);
29 }
30 
31 static tcp_bt_sample_t *
33 {
34  return bt_get_sample (bt, bts->next);
35 }
36 
37 static tcp_bt_sample_t *
39 {
40  return bt_get_sample (bt, bts->prev);
41 }
42 
43 static u32
45 {
46  if (!bts)
47  return TCP_BTS_INVALID_INDEX;
48  return bts - bt->samples;
49 }
50 
51 static inline int
53 {
54  return seq_lt (a, b);
55 }
56 
57 static tcp_bt_sample_t *
58 bt_alloc_sample (tcp_byte_tracker_t * bt, u32 min_seq, u32 max_seq)
59 {
60  tcp_bt_sample_t *bts;
61 
62  pool_get_zero (bt->samples, bts);
63  bts->next = bts->prev = TCP_BTS_INVALID_INDEX;
64  bts->min_seq = min_seq;
65  bts->max_seq = max_seq;
66  rb_tree_add_custom (&bt->sample_lookup, bts->min_seq, bts - bt->samples,
67  bt_seq_lt);
68  return bts;
69 }
70 
71 static void
73 {
74  if (bts->prev != TCP_BTS_INVALID_INDEX)
75  {
76  tcp_bt_sample_t *prev = bt_prev_sample (bt, bts);
77  prev->next = bts->next;
78  }
79  else
80  bt->head = bts->next;
81 
82  if (bts->next != TCP_BTS_INVALID_INDEX)
83  {
84  tcp_bt_sample_t *next = bt_next_sample (bt, bts);
85  next->prev = bts->prev;
86  }
87  else
88  bt->tail = bts->prev;
89 
91  if (CLIB_DEBUG)
92  memset (bts, 0xfc, sizeof (*bts));
93  pool_put (bt->samples, bts);
94 }
95 
96 static tcp_bt_sample_t *
98 {
99  tcp_bt_sample_t *ns, *next;
100  u32 bts_index;
101 
102  bts_index = bt_sample_index (bt, bts);
103 
104  ASSERT (seq_leq (bts->min_seq, seq) && seq_lt (seq, bts->max_seq));
105 
106  ns = bt_alloc_sample (bt, seq, bts->max_seq);
107  bts = bt_get_sample (bt, bts_index);
108 
109  *ns = *bts;
110  ns->min_seq = seq;
111  bts->max_seq = seq;
112 
113  next = bt_next_sample (bt, bts);
114  if (next)
115  next->prev = bt_sample_index (bt, ns);
116  else
117  bt->tail = bt_sample_index (bt, ns);
118 
119  bts->next = bt_sample_index (bt, ns);
120  ns->prev = bt_sample_index (bt, bts);
121 
122  return ns;
123 }
124 
125 static tcp_bt_sample_t *
127  tcp_bt_sample_t * cur)
128 {
129  ASSERT (prev->max_seq == cur->min_seq);
130  prev->max_seq = cur->max_seq;
131  if (bt_sample_index (bt, cur) == bt->tail)
132  bt->tail = bt_sample_index (bt, prev);
133  bt_free_sample (bt, cur);
134  return prev;
135 }
136 
137 static tcp_bt_sample_t *
139 {
140  rb_tree_t *rt = &bt->sample_lookup;
141  rb_node_t *cur, *prev;
142  tcp_bt_sample_t *bts;
143 
144  cur = rb_node (rt, rt->root);
145  if (rb_node_is_tnil (rt, cur))
146  return 0;
147 
148  while (seq != cur->key)
149  {
150  prev = cur;
151  if (seq_lt (seq, cur->key))
152  cur = rb_node_left (rt, cur);
153  else
154  cur = rb_node_right (rt, cur);
155 
156  if (rb_node_is_tnil (rt, cur))
157  {
158  /* Hit tnil as a left child. Find predecessor */
159  if (seq_lt (seq, prev->key))
160  {
161  cur = rb_tree_predecessor (rt, prev);
162  if (rb_node_is_tnil (rt, cur))
163  return 0;
164  bts = bt_get_sample (bt, cur->opaque);
165  }
166  /* Hit tnil as a right child */
167  else
168  {
169  bts = bt_get_sample (bt, prev->opaque);
170  }
171 
172  if (seq_geq (seq, bts->min_seq))
173  return bts;
174 
175  return 0;
176  }
177  }
178 
179  if (!rb_node_is_tnil (rt, cur))
180  return bt_get_sample (bt, cur->opaque);
181 
182  return 0;
183 }
184 
185 static void
187 {
189  bts->min_seq = seq;
191  bt_sample_index (bt, bts), bt_seq_lt);
192 }
193 
194 static tcp_bt_sample_t *
196  u32 seq, u8 is_end)
197 {
198  tcp_bt_sample_t *cur, *next;
199 
200  cur = start;
201  while (cur && seq_leq (cur->max_seq, seq))
202  {
203  next = bt_next_sample (bt, cur);
204  bt_free_sample (bt, cur);
205  cur = next;
206  }
207 
208  if (cur && seq_lt (cur->min_seq, seq))
209  bt_update_sample (bt, cur, seq);
210 
211  return cur;
212 }
213 
214 int
216 {
217  tcp_bt_sample_t *bts, *tmp;
218 
219  if (pool_elts (bt->samples) != pool_elts (bt->sample_lookup.nodes) - 1)
220  return 0;
221 
222  if (bt->head == TCP_BTS_INVALID_INDEX)
223  {
224  if (bt->tail != TCP_BTS_INVALID_INDEX)
225  return 0;
226  if (pool_elts (bt->samples) != 0)
227  return 0;
228  return 1;
229  }
230 
231  bts = bt_get_sample (bt, bt->tail);
232  if (!bts)
233  return 0;
234 
235  bts = bt_get_sample (bt, bt->head);
236  if (!bts || bts->prev != TCP_BTS_INVALID_INDEX)
237  return 0;
238 
239  while (bts)
240  {
241  tmp = bt_lookup_seq (bt, bts->min_seq);
242  if (!tmp)
243  return 0;
244  if (tmp != bts)
245  return 0;
246  tmp = bt_next_sample (bt, bts);
247  if (tmp)
248  {
249  if (tmp->prev != bt_sample_index (bt, bts))
250  {
251  clib_warning ("next %u thinks prev is %u should be %u",
252  bts->next, tmp->prev, bt_sample_index (bt, bts));
253  return 0;
254  }
255  if (!seq_lt (bts->min_seq, tmp->min_seq))
256  return 0;
257  }
258  else
259  {
260  if (bt->tail != bt_sample_index (bt, bts))
261  return 0;
262  if (bts->next != TCP_BTS_INVALID_INDEX)
263  return 0;
264  }
265  bts = tmp;
266  }
267  return 1;
268 }
269 
270 static tcp_bt_sample_t *
272 {
273  tcp_bt_sample_t *bts;
274  bts = bt_alloc_sample (tc->bt, min_seq, max_seq);
275  bts->delivered = tc->delivered;
276  bts->delivered_time = tc->delivered_time;
277  bts->tx_time = tcp_time_now_us (tc->c_thread_index);
278  bts->first_tx_time = tc->first_tx_time;
279  bts->flags |= tc->app_limited ? TCP_BTS_IS_APP_LIMITED : 0;
280  bts->tx_in_flight = tcp_flight_size (tc);
281  bts->tx_lost = tc->lost;
282  return bts;
283 }
284 
285 void
287 {
288  u32 available_bytes, flight_size;
289 
290  available_bytes = transport_max_tx_dequeue (&tc->connection);
291  flight_size = tcp_flight_size (tc);
292 
293  /* Not enough bytes to fill the cwnd */
294  if (available_bytes + flight_size + tc->snd_mss < tc->cwnd
295  /* Bytes considered lost have been retransmitted */
296  && tc->sack_sb.lost_bytes <= tc->snd_rxt_bytes)
297  tc->app_limited = tc->delivered + flight_size ? : 1;
298 }
299 
300 void
302 {
303  tcp_byte_tracker_t *bt = tc->bt;
304  tcp_bt_sample_t *bts, *tail;
305  u32 bts_index;
306 
307  tail = bt_get_sample (bt, bt->tail);
308  if (tail && tail->max_seq == tc->snd_nxt
309  && !(tail->flags & TCP_BTS_IS_SACKED)
310  && tail->tx_time == tcp_time_now_us (tc->c_thread_index))
311  {
312  tail->max_seq += len;
313  return;
314  }
315 
316  if (tc->snd_una == tc->snd_nxt)
317  {
318  tc->delivered_time = tcp_time_now_us (tc->c_thread_index);
319  tc->first_tx_time = tc->delivered_time;
320  }
321 
322  bts = tcp_bt_alloc_tx_sample (tc, tc->snd_nxt, tc->snd_nxt + len);
323  bts_index = bt_sample_index (bt, bts);
324  tail = bt_get_sample (bt, bt->tail);
325  if (tail)
326  {
327  tail->next = bts_index;
328  bts->prev = bt->tail;
329  bt->tail = bts_index;
330  }
331  else
332  {
333  bt->tail = bt->head = bts_index;
334  }
335 }
336 
337 void
339 {
340  tcp_byte_tracker_t *bt = tc->bt;
341  tcp_bt_sample_t *bts, *next, *cur, *prev, *nbts;
342  u32 bts_index, cur_index, next_index, prev_index, max_seq;
343  u8 is_end = end == tc->snd_nxt;
344  tcp_bts_flags_t bts_flags;
345 
346  /* Contiguous blocks retransmitted at the same time */
347  bts = bt_get_sample (bt, bt->last_ooo);
348  if (bts && bts->max_seq == start
349  && bts->tx_time == tcp_time_now_us (tc->c_thread_index))
350  {
351  bts->max_seq = end;
352  next = bt_next_sample (bt, bts);
353  if (next)
354  bt_fix_overlapped (bt, next, end, is_end);
355 
356  return;
357  }
358 
359  /* Find original tx sample and cache flags in case the sample
360  * is freed or the pool moves */
361  bts = bt_lookup_seq (bt, start);
362  bts_flags = bts->flags;
363 
364  ASSERT (bts != 0 && seq_geq (start, bts->min_seq));
365 
366  /* Head in the past */
367  if (seq_lt (bts->min_seq, tc->snd_una))
368  bt_update_sample (bt, bts, tc->snd_una);
369 
370  /* Head overlap */
371  if (bts->min_seq == start)
372  {
373  prev_index = bts->prev;
374  next = bt_fix_overlapped (bt, bts, end, is_end);
375  /* bts might no longer be valid from here */
376  next_index = bt_sample_index (bt, next);
377 
378  cur = tcp_bt_alloc_tx_sample (tc, start, end);
379  cur->flags |= TCP_BTS_IS_RXT;
380  if (bts_flags & TCP_BTS_IS_RXT)
381  cur->flags |= TCP_BTS_IS_RXT_LOST;
382  cur->next = next_index;
383  cur->prev = prev_index;
384 
385  cur_index = bt_sample_index (bt, cur);
386 
387  if (next_index != TCP_BTS_INVALID_INDEX)
388  {
389  next = bt_get_sample (bt, next_index);
390  next->prev = cur_index;
391  }
392  else
393  {
394  bt->tail = cur_index;
395  }
396 
397  if (prev_index != TCP_BTS_INVALID_INDEX)
398  {
399  prev = bt_get_sample (bt, prev_index);
400  prev->next = cur_index;
401  }
402  else
403  {
404  bt->head = cur_index;
405  }
406 
407  bt->last_ooo = cur_index;
408  return;
409  }
410 
411  bts_index = bt_sample_index (bt, bts);
412  next = bt_next_sample (bt, bts);
413  if (next)
414  bt_fix_overlapped (bt, next, end, is_end);
415 
416  max_seq = bts->max_seq;
417  ASSERT (seq_lt (start, max_seq));
418 
419  /* Have to split or tail overlap */
420  cur = tcp_bt_alloc_tx_sample (tc, start, end);
421  cur->flags |= TCP_BTS_IS_RXT;
422  if (bts_flags & TCP_BTS_IS_RXT)
423  cur->flags |= TCP_BTS_IS_RXT_LOST;
424  cur->prev = bts_index;
425  cur_index = bt_sample_index (bt, cur);
426 
427  /* Split. Allocate another sample */
428  if (seq_lt (end, max_seq))
429  {
430  nbts = tcp_bt_alloc_tx_sample (tc, end, bts->max_seq);
431  cur = bt_get_sample (bt, cur_index);
432  bts = bt_get_sample (bt, bts_index);
433 
434  *nbts = *bts;
435  nbts->min_seq = end;
436 
437  if (nbts->next != TCP_BTS_INVALID_INDEX)
438  {
439  next = bt_get_sample (bt, nbts->next);
440  next->prev = bt_sample_index (bt, nbts);
441  }
442  else
443  bt->tail = bt_sample_index (bt, nbts);
444 
445  bts->next = nbts->prev = cur_index;
446  cur->next = bt_sample_index (bt, nbts);
447 
448  bts->max_seq = start;
449  bt->last_ooo = cur_index;
450  }
451  /* Tail completely overlapped */
452  else
453  {
454  bts = bt_get_sample (bt, bts_index);
455  bts->max_seq = start;
456 
457  if (bts->next != TCP_BTS_INVALID_INDEX)
458  {
459  next = bt_get_sample (bt, bts->next);
460  next->prev = cur_index;
461  }
462  else
463  bt->tail = cur_index;
464 
465  cur->next = bts->next;
466  bts->next = cur_index;
467 
468  bt->last_ooo = cur_index;
469  }
470 }
471 
472 static void
474  tcp_rate_sample_t * rs)
475 {
476  if (bts->flags & TCP_BTS_IS_SACKED)
477  return;
478 
479  if (rs->prior_delivered && rs->prior_delivered >= bts->delivered)
480  return;
481 
482  rs->prior_delivered = bts->delivered;
483  rs->prior_time = bts->delivered_time;
484  rs->interval_time = bts->tx_time - bts->first_tx_time;
485  rs->rtt_time = tc->delivered_time - bts->tx_time;
486  rs->flags = bts->flags;
487  rs->tx_in_flight = bts->tx_in_flight;
488  rs->tx_lost = bts->tx_lost;
489  tc->first_tx_time = bts->tx_time;
490 }
491 
492 static void
494 {
495  tcp_byte_tracker_t *bt = tc->bt;
496  tcp_bt_sample_t *next, *cur;
497 
498  cur = bt_get_sample (bt, bt->head);
499  while (cur && seq_leq (cur->max_seq, tc->snd_una))
500  {
501  next = bt_next_sample (bt, cur);
502  tcp_bt_sample_to_rate_sample (tc, cur, rs);
503  bt_free_sample (bt, cur);
504  cur = next;
505  }
506 
507  if (cur && seq_lt (cur->min_seq, tc->snd_una))
508  {
509  bt_update_sample (bt, cur, tc->snd_una);
510  tcp_bt_sample_to_rate_sample (tc, cur, rs);
511  }
512 }
513 
514 static void
516 {
517  sack_block_t *blks = tc->rcv_opts.sacks, *blk;
518  tcp_byte_tracker_t *bt = tc->bt;
519  tcp_bt_sample_t *cur, *prev, *next;
520  int i;
521 
522  for (i = 0; i < vec_len (blks); i++)
523  {
524  blk = &blks[i];
525 
526  /* Ignore blocks that are already covered by snd_una */
527  if (seq_lt (blk->end, tc->snd_una))
528  continue;
529 
530  cur = bt_lookup_seq (bt, blk->start);
531  if (!cur)
532  continue;
533 
534  ASSERT (seq_geq (blk->start, cur->min_seq)
535  && seq_lt (blk->start, cur->max_seq));
536 
537  /* Current should be split. Second part will be consumed */
538  if (PREDICT_FALSE (cur->min_seq != blk->start))
539  {
540  cur = bt_split_sample (bt, cur, blk->start);
541  prev = bt_prev_sample (bt, cur);
542  }
543  else
544  prev = bt_prev_sample (bt, cur);
545 
546  while (cur && seq_leq (cur->max_seq, blk->end))
547  {
548  if (!(cur->flags & TCP_BTS_IS_SACKED))
549  {
550  tcp_bt_sample_to_rate_sample (tc, cur, rs);
551  cur->flags |= TCP_BTS_IS_SACKED;
552  if (prev && (prev->flags & TCP_BTS_IS_SACKED))
553  {
554  cur = bt_merge_sample (bt, prev, cur);
555  next = bt_next_sample (bt, cur);
556  }
557  else
558  {
559  next = bt_next_sample (bt, cur);
560  if (next && (next->flags & TCP_BTS_IS_SACKED))
561  {
562  cur = bt_merge_sample (bt, cur, next);
563  next = bt_next_sample (bt, cur);
564  }
565  }
566  }
567  else
568  next = bt_next_sample (bt, cur);
569 
570  prev = cur;
571  cur = next;
572  }
573 
574  if (cur && seq_lt (cur->min_seq, blk->end))
575  {
576  tcp_bt_sample_to_rate_sample (tc, cur, rs);
577  prev = bt_prev_sample (bt, cur);
578  /* Extend previous to include the newly sacked bytes */
579  if (prev && (prev->flags & TCP_BTS_IS_SACKED))
580  {
581  prev->max_seq = blk->end;
582  bt_update_sample (bt, cur, blk->end);
583  }
584  /* Split sample into two. First part is consumed */
585  else
586  {
587  next = bt_split_sample (bt, cur, blk->end);
588  cur = bt_prev_sample (bt, next);
589  cur->flags |= TCP_BTS_IS_SACKED;
590  }
591  }
592  }
593 }
594 
595 void
597 {
598  u32 delivered;
599 
600  if (PREDICT_FALSE (tc->flags & TCP_CONN_FINSNT))
601  return;
602 
603  tc->lost += tc->sack_sb.last_lost_bytes;
604 
605  delivered = tc->bytes_acked + tc->sack_sb.last_sacked_bytes;
606  /* Do not count bytes that were previously sacked again */
607  delivered -= tc->sack_sb.last_bytes_delivered;
608  if (!delivered || tc->bt->head == TCP_BTS_INVALID_INDEX)
609  return;
610 
611  tc->delivered += delivered;
612  tc->delivered_time = tcp_time_now_us (tc->c_thread_index);
613 
614  if (tc->app_limited && tc->delivered > tc->app_limited)
615  tc->app_limited = 0;
616 
617  if (tc->bytes_acked)
618  tcp_bt_walk_samples (tc, rs);
619 
620  if (tc->sack_sb.last_sacked_bytes)
621  tcp_bt_walk_samples_ooo (tc, rs);
622 
623  rs->interval_time = clib_max ((tc->delivered_time - rs->prior_time),
624  rs->interval_time);
625  rs->delivered = tc->delivered - rs->prior_delivered;
626  rs->acked_and_sacked = delivered;
627  rs->last_lost = tc->sack_sb.last_lost_bytes;
628  rs->lost = tc->lost - rs->tx_lost;
629 }
630 
631 void
633 {
634  tcp_byte_tracker_t *bt = tc->bt;
635  tcp_bt_sample_t *bts;
636  u32 *samples = 0, *si;
637 
638  vec_validate (samples, pool_elts (bt->samples) - 1);
639  vec_reset_length (samples);
640 
641  /* *INDENT-OFF* */
642  pool_foreach (bts, bt->samples) {
643  vec_add1 (samples, bts - bt->samples);
644  }
645  /* *INDENT-ON* */
646 
647  vec_foreach (si, samples)
648  {
649  bts = bt_get_sample (bt, *si);
650  bt_free_sample (bt, bts);
651  }
652 
653  vec_free (samples);
654 }
655 
656 void
658 {
659  tcp_byte_tracker_t *bt = tc->bt;
660 
662  pool_free (bt->samples);
663  clib_mem_free (bt);
664  tc->bt = 0;
665 }
666 
667 void
669 {
670  tcp_byte_tracker_t *bt;
671 
672  bt = clib_mem_alloc (sizeof (tcp_byte_tracker_t));
673  clib_memset (bt, 0, sizeof (tcp_byte_tracker_t));
674 
676  bt->head = bt->tail = TCP_BTS_INVALID_INDEX;
677  tc->bt = bt;
678 }
679 
680 u8 *
681 format_tcp_bt_sample (u8 * s, va_list * args)
682 {
683  tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
684  tcp_bt_sample_t *bts = va_arg (*args, tcp_bt_sample_t *);
685  f64 now = tcp_time_now_us (tc->c_thread_index);
686  s = format (s, "[%u, %u] d %u dt %.3f txt %.3f ftxt %.3f flags 0x%x",
687  bts->min_seq - tc->iss, bts->max_seq - tc->iss, bts->delivered,
688  now - bts->delivered_time, now - bts->tx_time,
689  now - bts->first_tx_time, bts->flags);
690  return s;
691 }
692 
693 u8 *
694 format_tcp_bt (u8 * s, va_list * args)
695 {
696  tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
697  tcp_byte_tracker_t *bt = tc->bt;
698  tcp_bt_sample_t *bts;
699 
700  bts = bt_get_sample (bt, bt->head);
701  while (bts)
702  {
703  s = format (s, "%U\n", format_tcp_bt_sample, tc, bts);
704  bts = bt_next_sample (bt, bts);
705  }
706 
707  return s;
708 }
709 
710 /*
711  * fd.io coding-style-patch-verification: ON
712  *
713  * Local Variables:
714  * eval: (c-set-style "gnu")
715  * End:
716  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
static tcp_bt_sample_t * bt_next_sample(tcp_byte_tracker_t *bt, tcp_bt_sample_t *bts)
Definition: tcp_bt.c:32
#define TCP_BTS_INVALID_INDEX
Definition: tcp_types.h:197
u64 tx_lost
Lost at tx time.
Definition: tcp_types.h:218
f64 tx_time
Transmit time for the burst.
Definition: tcp_types.h:215
u8 * format_tcp_bt_sample(u8 *s, va_list *args)
Definition: tcp_bt.c:681
struct _sack_block sack_block_t
int tcp_bt_is_sane(tcp_byte_tracker_t *bt)
Check if the byte tracker is in sane state.
Definition: tcp_bt.c:215
a
Definition: bitmap.h:544
u8 * format_tcp_bt(u8 *s, va_list *args)
Definition: tcp_bt.c:694
u32 prev
Previous sample index in list.
Definition: tcp_types.h:210
f64 first_tx_time
Connection first tx time at tx.
Definition: tcp_types.h:216
static tcp_bt_sample_t * bt_split_sample(tcp_byte_tracker_t *bt, tcp_bt_sample_t *bts, u32 seq)
Definition: tcp_bt.c:97
static rb_node_t * rb_node_left(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:94
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
Definition: pool.h:254
#define pool_foreach(VAR, POOL)
Iterate through pool.
Definition: pool.h:527
static void bt_free_sample(tcp_byte_tracker_t *bt, tcp_bt_sample_t *bts)
Definition: tcp_bt.c:72
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static tcp_bt_sample_t * bt_lookup_seq(tcp_byte_tracker_t *bt, u32 seq)
Definition: tcp_bt.c:138
struct _tcp_connection tcp_connection_t
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:592
f64 prior_time
Delivered time of sample used for rate.
Definition: tcp_types.h:226
static rb_node_t * rb_node(rb_tree_t *rt, rb_node_index_t ri)
Definition: rbtree.h:82
enum tcp_bts_flags_ tcp_bts_flags_t
static rb_node_t * rb_node_right(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:88
u32 head
Head of samples linked list.
Definition: tcp_types.h:242
unsigned char u8
Definition: types.h:56
static void tcp_bt_sample_to_rate_sample(tcp_connection_t *tc, tcp_bt_sample_t *bts, tcp_rate_sample_t *rs)
Definition: tcp_bt.c:473
static void tcp_bt_walk_samples(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Definition: tcp_bt.c:493
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
__clib_export rb_node_t * rb_tree_predecessor(rb_tree_t *rt, rb_node_t *x)
Definition: rbtree.c:287
__clib_export void rb_tree_free_nodes(rb_tree_t *rt)
Definition: rbtree.c:476
static tcp_bt_sample_t * bt_prev_sample(tcp_byte_tracker_t *bt, tcp_bt_sample_t *bts)
Definition: tcp_bt.c:38
void tcp_bt_sample_delivery_rate(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Generate a delivery rate sample from recently acked bytes.
Definition: tcp_bt.c:596
#define seq_leq(_s1, _s2)
Definition: tcp_packet.h:179
static tcp_bt_sample_t * bt_alloc_sample(tcp_byte_tracker_t *bt, u32 min_seq, u32 max_seq)
Definition: tcp_bt.c:58
description fragment has unexpected format
Definition: map.api:433
const cJSON *const b
Definition: cJSON.h:255
__clib_export rb_node_index_t rb_tree_add_custom(rb_tree_t *rt, u32 key, uword opaque, rb_tree_lt_fn ltfn)
Definition: rbtree.c:195
unsigned int u32
Definition: types.h:88
u32 key
node key
Definition: rbtree.h:38
static tcp_bt_sample_t * bt_get_sample(tcp_byte_tracker_t *bt, u32 bts_index)
Definition: tcp_bt.c:24
u32 max_seq
Max seq number.
Definition: tcp_types.h:212
void tcp_bt_init(tcp_connection_t *tc)
Byte tracker initialize.
Definition: tcp_bt.c:668
u32 lost
Number of bytes lost over interval.
Definition: tcp_types.h:234
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
u32 delivered
Bytes delivered in interval_time.
Definition: tcp_types.h:231
uword opaque
value stored by node
Definition: rbtree.h:39
rb_tree_t sample_lookup
Rbtree for sample lookup by min_seq.
Definition: tcp_types.h:241
tcp_bt_sample_t * samples
Pool of samples.
Definition: tcp_types.h:240
f64 interval_time
Time to ack the bytes delivered.
Definition: tcp_types.h:227
void tcp_bt_cleanup(tcp_connection_t *tc)
Byte tracker cleanup.
Definition: tcp_bt.c:657
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:301
#define PREDICT_FALSE(x)
Definition: clib.h:121
__clib_export void rb_tree_del_custom(rb_tree_t *rt, u32 key, rb_tree_lt_fn ltfn)
Definition: rbtree.c:461
u32 next
Next sample index in list.
Definition: tcp_types.h:209
tcp_bts_flags_t flags
Sample flag.
Definition: tcp_types.h:219
u8 len
Definition: ip_types.api:103
f64 delivered_time
Delivered time when sample taken.
Definition: tcp_types.h:214
#define pool_free(p)
Free a pool.
Definition: pool.h:440
static u32 tcp_flight_size(const tcp_connection_t *tc)
Our estimate of the number of bytes in flight (pipe size)
Definition: tcp_inlines.h:94
void tcp_bt_track_tx(tcp_connection_t *tc, u32 len)
Track a tcp tx burst.
Definition: tcp_bt.c:301
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
#define clib_warning(format, args...)
Definition: error.h:59
static u32 bt_sample_index(tcp_byte_tracker_t *bt, tcp_bt_sample_t *bts)
Definition: tcp_bt.c:44
f64 rtt_time
RTT for sample.
Definition: tcp_types.h:228
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:298
static int bt_seq_lt(u32 a, u32 b)
Definition: tcp_bt.c:52
static u32 transport_max_tx_dequeue(transport_connection_t *tc)
Definition: session.h:509
#define seq_geq(_s1, _s2)
Definition: tcp_packet.h:181
void tcp_bt_check_app_limited(tcp_connection_t *tc)
Check if sample to be generated is app limited.
Definition: tcp_bt.c:286
#define ASSERT(truth)
void tcp_bt_track_rxt(tcp_connection_t *tc, u32 start, u32 end)
Track a tcp retransmission.
Definition: tcp_bt.c:338
static tcp_bt_sample_t * bt_merge_sample(tcp_byte_tracker_t *bt, tcp_bt_sample_t *prev, tcp_bt_sample_t *cur)
Definition: tcp_bt.c:126
static void clib_mem_free(void *p)
Definition: mem.h:311
u32 last_lost
Bytes lost now.
Definition: tcp_types.h:233
u32 last_ooo
Cached last ooo sample.
Definition: tcp_types.h:244
static void * clib_mem_alloc(uword size)
Definition: mem.h:253
#define clib_max(x, y)
Definition: clib.h:321
u64 tx_lost
Lost over interval.
Definition: tcp_types.h:230
__clib_export void rb_tree_init(rb_tree_t *rt)
Definition: rbtree.c:483
u64 delivered
Total delivered bytes for sample.
Definition: tcp_types.h:213
static u8 rb_node_is_tnil(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:76
u32 tail
Tail of samples linked list.
Definition: tcp_types.h:243
u64 tx_in_flight
In flight at (re)transmit time.
Definition: tcp_types.h:229
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static void bt_update_sample(tcp_byte_tracker_t *bt, tcp_bt_sample_t *bts, u32 seq)
Definition: tcp_bt.c:186
static f64 tcp_time_now_us(u32 thread_index)
Definition: tcp_inlines.h:213
u32 min_seq
Min seq number in sample.
Definition: tcp_types.h:211
void tcp_bt_flush_samples(tcp_connection_t *tc)
Flush byte tracker samples.
Definition: tcp_bt.c:632
u64 prior_delivered
Delivered of sample used for rate, i.e., total bytes delivered at prior_time.
Definition: tcp_types.h:224
static tcp_bt_sample_t * bt_fix_overlapped(tcp_byte_tracker_t *bt, tcp_bt_sample_t *start, u32 seq, u8 is_end)
Definition: tcp_bt.c:195
#define vec_foreach(var, vec)
Vector iterator.
f64 end
end of the time range
Definition: mactime.api:44
rb_node_t * nodes
pool of nodes
Definition: rbtree.h:44
u8 si
Definition: lisp_types.api:47
tcp_bts_flags_t flags
Rate sample flags from bt sample.
Definition: tcp_types.h:235
static tcp_bt_sample_t * tcp_bt_alloc_tx_sample(tcp_connection_t *tc, u32 min_seq, u32 max_seq)
Definition: tcp_bt.c:271
static void tcp_bt_walk_samples_ooo(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Definition: tcp_bt.c:515
rb_node_index_t root
root index
Definition: rbtree.h:45
#define seq_lt(_s1, _s2)
Definition: tcp_packet.h:178
u32 acked_and_sacked
Bytes acked + sacked now.
Definition: tcp_types.h:232
u64 tx_in_flight
In flight at tx time.
Definition: tcp_types.h:217
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:127