FD.io VPP  v21.01.1
Vector Packet Processing
tcp_input.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/ip6_fib.h>
19 #include <vnet/tcp/tcp.h>
20 #include <vnet/tcp/tcp_inlines.h>
21 #include <vnet/session/session.h>
22 #include <math.h>
23 
24 static char *tcp_error_strings[] = {
25 #define tcp_error(n,s) s,
26 #include <vnet/tcp/tcp_error.def>
27 #undef tcp_error
28 };
29 
30 /* All TCP nodes have the same outgoing arcs */
31 #define foreach_tcp_state_next \
32  _ (DROP4, "ip4-drop") \
33  _ (DROP6, "ip6-drop") \
34  _ (TCP4_OUTPUT, "tcp4-output") \
35  _ (TCP6_OUTPUT, "tcp6-output")
36 
37 typedef enum _tcp_established_next
38 {
39 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
41 #undef _
44 
45 typedef enum _tcp_rcv_process_next
46 {
47 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
49 #undef _
52 
53 typedef enum _tcp_syn_sent_next
54 {
55 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
57 #undef _
60 
61 typedef enum _tcp_listen_next
62 {
63 #define _(s,n) TCP_LISTEN_NEXT_##s,
65 #undef _
68 
69 /* Generic, state independent indices */
70 typedef enum _tcp_state_next
71 {
72 #define _(s,n) TCP_NEXT_##s,
74 #undef _
77 
78 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
79  : TCP_NEXT_TCP6_OUTPUT)
80 
81 #define tcp_next_drop(is_ip4) (is_ip4 ? TCP_NEXT_DROP4 \
82  : TCP_NEXT_DROP6)
83 
84 /**
85  * Validate segment sequence number. As per RFC793:
86  *
87  * Segment Receive Test
88  * Length Window
89  * ------- ------- -------------------------------------------
90  * 0 0 SEG.SEQ = RCV.NXT
91  * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
92  * >0 0 not acceptable
93  * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
94  * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
95  *
96  * This ultimately consists in checking if segment falls within the window.
97  * The one important difference compared to RFC793 is that we use rcv_las,
98  * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
99  * peer's reference when computing our receive window.
100  *
101  * This:
102  * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
103  * however, is too strict when we have retransmits. Instead we just check that
104  * the seq is not beyond the right edge and that the end of the segment is not
105  * less than the left edge.
106  *
107  * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
108  * use rcv_nxt in the right edge window test instead of rcv_las.
109  *
110  */
113 {
114  return (seq_geq (end_seq, tc->rcv_las)
115  && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
116 }
117 
118 /**
119  * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
120  * timestamp to echo and it's less than tsval_recent, drop segment
121  * but still send an ACK in order to retain TCP's mechanism for detecting
122  * and recovering from half-open connections
123  *
124  * Or at least that's what the theory says. It seems that this might not work
125  * very well with packet reordering and fast retransmit. XXX
126  */
127 always_inline int
129 {
130  return tcp_opts_tstamp (&tc->rcv_opts)
131  && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
132 }
133 
134 /**
135  * Update tsval recent
136  */
137 always_inline void
139 {
140  /*
141  * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
142  * of an incoming segment:
143  * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
144  * then the TSval from the segment is copied to TS.Recent;
145  * otherwise, the TSval is ignored.
146  */
147  if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
148  && seq_leq (tc->rcv_las, seq_end))
149  {
150  ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
151  tc->tsval_recent = tc->rcv_opts.tsval;
152  tc->tsval_recent_age = tcp_time_now_w_thread (tc->c_thread_index);
153  }
154 }
155 
156 static void
158 {
159  switch (tc->rst_state)
160  {
161  case TCP_STATE_SYN_RCVD:
162  /* Cleanup everything. App wasn't notified yet */
163  session_transport_delete_notify (&tc->connection);
165  break;
166  case TCP_STATE_SYN_SENT:
167  session_stream_connect_notify (&tc->connection, SESSION_E_REFUSED);
169  break;
170  case TCP_STATE_ESTABLISHED:
171  session_transport_reset_notify (&tc->connection);
172  session_transport_closed_notify (&tc->connection);
173  break;
174  case TCP_STATE_CLOSE_WAIT:
175  case TCP_STATE_FIN_WAIT_1:
176  case TCP_STATE_FIN_WAIT_2:
177  case TCP_STATE_CLOSING:
178  case TCP_STATE_LAST_ACK:
179  session_transport_closed_notify (&tc->connection);
180  break;
181  case TCP_STATE_CLOSED:
182  case TCP_STATE_TIME_WAIT:
183  break;
184  default:
185  TCP_DBG ("reset state: %u", tc->state);
186  }
187 }
188 
189 static void
191 {
192  if (!tcp_disconnect_pending (tc))
193  {
194  tc->rst_state = tc->state;
195  vec_add1 (wrk->pending_resets, tc->c_c_index);
197  }
198 }
199 
200 /**
201  * Handle reset packet
202  *
203  * Programs disconnect/reset notification that should be sent
204  * later by calling @ref tcp_handle_disconnects
205  */
206 static void
208 {
209  TCP_EVT (TCP_EVT_RST_RCVD, tc);
210  switch (tc->state)
211  {
212  case TCP_STATE_SYN_RCVD:
213  tcp_program_reset_ntf (wrk, tc);
214  tcp_connection_set_state (tc, TCP_STATE_CLOSED);
215  break;
216  case TCP_STATE_SYN_SENT:
217  /* Do not program ntf because the connection is half-open */
218  tc->rst_state = tc->state;
219  tcp_handle_rst (tc);
220  break;
221  case TCP_STATE_ESTABLISHED:
224  tcp_program_reset_ntf (wrk, tc);
225  tcp_connection_set_state (tc, TCP_STATE_CLOSED);
226  tcp_program_cleanup (wrk, tc);
227  break;
228  case TCP_STATE_CLOSE_WAIT:
229  case TCP_STATE_FIN_WAIT_1:
230  case TCP_STATE_FIN_WAIT_2:
231  case TCP_STATE_CLOSING:
232  case TCP_STATE_LAST_ACK:
235  tcp_program_reset_ntf (wrk, tc);
236  /* Make sure we mark the session as closed. In some states we may
237  * be still trying to send data */
238  tcp_connection_set_state (tc, TCP_STATE_CLOSED);
239  tcp_program_cleanup (wrk, tc);
240  break;
241  case TCP_STATE_CLOSED:
242  case TCP_STATE_TIME_WAIT:
243  break;
244  default:
245  TCP_DBG ("reset state: %u", tc->state);
246  }
247 }
248 
249 /**
250  * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
251  *
252  * It first verifies if segment has a wrapped sequence number (PAWS) and then
253  * does the processing associated to the first four steps (ignoring security
254  * and precedence): sequence number, rst bit and syn bit checks.
255  *
256  * @return 0 if segments passes validation.
257  */
258 static int
260  vlib_buffer_t * b0, tcp_header_t * th0, u32 * error0)
261 {
262  /* We could get a burst of RSTs interleaved with acks */
263  if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
264  {
265  tcp_send_reset (tc0);
266  *error0 = TCP_ERROR_CONNECTION_CLOSED;
267  goto error;
268  }
269 
270  if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
271  {
272  *error0 = TCP_ERROR_SEGMENT_INVALID;
273  goto error;
274  }
275 
276  if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts, 0)))
277  {
278  *error0 = TCP_ERROR_OPTIONS;
279  goto error;
280  }
281 
283  {
284  *error0 = TCP_ERROR_PAWS;
285  TCP_EVT (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
286  vnet_buffer (b0)->tcp.seq_end);
287 
288  /* If it just so happens that a segment updates tsval_recent for a
289  * segment over 24 days old, invalidate tsval_recent. */
290  if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
291  tcp_time_now_w_thread (tc0->c_thread_index)))
292  {
293  tc0->tsval_recent = tc0->rcv_opts.tsval;
294  clib_warning ("paws failed: 24-day old segment");
295  }
296  /* Drop after ack if not rst. Resets can fail paws check as per
297  * RFC 7323 sec. 5.2: When an <RST> segment is received, it MUST NOT
298  * be subjected to the PAWS check by verifying an acceptable value in
299  * SEG.TSval */
300  else if (!tcp_rst (th0))
301  {
302  tcp_program_ack (tc0);
303  TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
304  goto error;
305  }
306  }
307 
308  /* 1st: check sequence number */
309  if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
310  vnet_buffer (b0)->tcp.seq_end))
311  {
312  /* SYN/SYN-ACK retransmit */
313  if (tcp_syn (th0)
314  && vnet_buffer (b0)->tcp.seq_number == tc0->rcv_nxt - 1)
315  {
316  tcp_options_parse (th0, &tc0->rcv_opts, 1);
317  if (tc0->state == TCP_STATE_SYN_RCVD)
318  {
319  tcp_send_synack (tc0);
320  TCP_EVT (TCP_EVT_SYN_RCVD, tc0, 0);
321  *error0 = TCP_ERROR_SYNS_RCVD;
322  }
323  else
324  {
325  tcp_program_ack (tc0);
326  TCP_EVT (TCP_EVT_SYNACK_RCVD, tc0);
327  *error0 = TCP_ERROR_SYN_ACKS_RCVD;
328  }
329  goto error;
330  }
331 
332  /* If our window is 0 and the packet is in sequence, let it pass
333  * through for ack processing. It should be dropped later. */
334  if (tc0->rcv_wnd < tc0->snd_mss
335  && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number)
336  goto check_reset;
337 
338  /* If we entered recovery and peer did so as well, there's a chance that
339  * dup acks won't be acceptable on either end because seq_end may be less
340  * than rcv_las. This can happen if acks are lost in both directions. */
341  if (tcp_in_recovery (tc0)
342  && seq_geq (vnet_buffer (b0)->tcp.seq_number,
343  tc0->rcv_las - tc0->rcv_wnd)
344  && seq_leq (vnet_buffer (b0)->tcp.seq_end,
345  tc0->rcv_nxt + tc0->rcv_wnd))
346  goto check_reset;
347 
348  *error0 = TCP_ERROR_RCV_WND;
349 
350  /* If we advertised a zero rcv_wnd and the segment is in the past or the
351  * next one that we expect, it is probably a window probe */
352  if ((tc0->flags & TCP_CONN_ZERO_RWND_SENT)
353  && seq_lt (vnet_buffer (b0)->tcp.seq_end,
354  tc0->rcv_las + tc0->rcv_opts.mss))
355  *error0 = TCP_ERROR_ZERO_RWND;
356 
357  tc0->errors.below_data_wnd += seq_lt (vnet_buffer (b0)->tcp.seq_end,
358  tc0->rcv_las);
359 
360  /* If not RST, send dup ack */
361  if (!tcp_rst (th0))
362  {
363  tcp_program_dupack (tc0);
364  TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
365  }
366  goto error;
367 
368  check_reset:
369  ;
370  }
371 
372  /* 2nd: check the RST bit */
373  if (PREDICT_FALSE (tcp_rst (th0)))
374  {
375  tcp_rcv_rst (wrk, tc0);
376  *error0 = TCP_ERROR_RST_RCVD;
377  goto error;
378  }
379 
380  /* 3rd: check security and precedence (skip) */
381 
382  /* 4th: check the SYN bit (in window) */
383  if (PREDICT_FALSE (tcp_syn (th0)))
384  {
385  /* As per RFC5961 send challenge ack instead of reset */
386  tcp_program_ack (tc0);
387  *error0 = TCP_ERROR_SPURIOUS_SYN;
388  goto error;
389  }
390 
391  /* If segment in window, save timestamp */
392  tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
393  vnet_buffer (b0)->tcp.seq_end);
394  return 0;
395 
396 error:
397  return -1;
398 }
399 
400 always_inline int
402 {
403  /* SND.UNA =< SEG.ACK =< SND.NXT */
404  if (!(seq_leq (tc->snd_una, vnet_buffer (b)->tcp.ack_number)
405  && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
406  {
407  if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)
408  && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
409  {
410  tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
411  goto acceptable;
412  }
413  *error = TCP_ERROR_ACK_INVALID;
414  return -1;
415  }
416 
417 acceptable:
418  tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
419  tc->snd_una = vnet_buffer (b)->tcp.ack_number;
420  *error = TCP_ERROR_ACK_OK;
421  return 0;
422 }
423 
424 /**
425  * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
426  *
427  * Note that although in the original article srtt and rttvar are scaled
428  * to minimize round-off errors, here we don't. Instead, we rely on
429  * better precision time measurements.
430  *
431  * A known limitation of the algorithm is that a drop in rtt results in a
432  * rttvar increase and bigger RTO.
433  *
434  * mrtt must be provided in @ref TCP_TICK multiples, i.e., in us. Note that
435  * timestamps are measured as ms ticks so they must be converted before
436  * calling this function.
437  */
438 static void
440 {
441  int err, diff;
442 
443  err = mrtt - tc->srtt;
444  tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
445  diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
446  tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
447 }
448 
449 static inline void
451 {
452  tc->mrtt_us = tc->mrtt_us + (mrtt - tc->mrtt_us) * 0.125;
453 }
454 
455 /**
456  * Update rtt estimate
457  *
458  * We have potentially three sources of rtt measurements:
459  *
460  * TSOPT difference between current and echoed timestamp. It has ms
461  * precision and can be computed per ack
462  * ACK timing one sequence number is tracked per rtt with us (micro second)
463  * precision.
464  * rate sample if enabled, all outstanding bytes are tracked with us
465  * precision. Every ack and sack are a rtt sample
466  *
467  * Middle boxes are known to fiddle with TCP options so we give higher
468  * priority to ACK timing.
469  *
470  * For now, rate sample rtts are only used under congestion.
471  */
472 static int
474 {
475  u32 mrtt = 0;
476 
477  /* Karn's rule, part 1. Don't use retransmitted segments to estimate
478  * RTT because they're ambiguous. */
479  if (tcp_in_cong_recovery (tc))
480  {
481  /* Accept rtt estimates for samples that have not been retransmitted */
482  if (!(tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
483  || (rs->flags & TCP_BTS_IS_RXT))
484  goto done;
485  if (rs->rtt_time)
486  tcp_estimate_rtt_us (tc, rs->rtt_time);
487  mrtt = rs->rtt_time * THZ;
488  goto estimate_rtt;
489  }
490 
491  if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
492  {
493  f64 sample = tcp_time_now_us (tc->c_thread_index) - tc->rtt_ts;
494  tcp_estimate_rtt_us (tc, sample);
495  mrtt = clib_max ((u32) (sample * THZ), 1);
496  /* Allow measuring of a new RTT */
497  tc->rtt_ts = 0;
498  }
499  /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
500  * snd_una, i.e., the left side of the send window:
501  * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
502  else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
503  {
504  mrtt = clib_max (tcp_tstamp (tc) - tc->rcv_opts.tsecr, 1);
505  mrtt *= TCP_TSTP_TO_HZ;
506  }
507 
508 estimate_rtt:
509 
510  /* Ignore dubious measurements */
511  if (mrtt == 0 || mrtt > TCP_RTT_MAX)
512  goto done;
513 
514  tcp_estimate_rtt (tc, mrtt);
515 
516 done:
517 
518  /* If we got here something must've been ACKed so make sure boff is 0,
519  * even if mrtt is not valid since we update the rto lower */
520  tc->rto_boff = 0;
521  tcp_update_rto (tc);
522 
523  return 0;
524 }
525 
526 static void
528 {
529  u8 thread_index = vlib_num_workers ()? 1 : 0;
530  int mrtt;
531 
532  if (tc->rtt_ts)
533  {
534  tc->mrtt_us = tcp_time_now_us (thread_index) - tc->rtt_ts;
535  tc->mrtt_us = clib_max (tc->mrtt_us, 0.0001);
536  mrtt = clib_max ((u32) (tc->mrtt_us * THZ), 1);
537  tc->rtt_ts = 0;
538  }
539  else
540  {
541  mrtt = tcp_tstamp (tc) - tc->rcv_opts.tsecr;
542  mrtt = clib_max (mrtt, 1) * TCP_TSTP_TO_HZ;
543  /* Due to retransmits we don't know the initial mrtt */
544  if (tc->rto_boff && mrtt > 1 * THZ)
545  mrtt = 1 * THZ;
546  tc->mrtt_us = (f64) mrtt *TCP_TICK;
547  }
548 
549  if (mrtt > 0 && mrtt < TCP_RTT_MAX)
550  {
551  /* First measurement as per RFC 6298 */
552  tc->srtt = mrtt;
553  tc->rttvar = mrtt >> 1;
554  }
555  tcp_update_rto (tc);
556 }
557 
558 /**
559  * Dequeue bytes for connections that have received acks in last burst
560  */
561 static void
563 {
564  u32 thread_index = wrk->vm->thread_index;
565  u32 *pending_deq_acked;
566  tcp_connection_t *tc;
567  int i;
568 
569  if (!vec_len (wrk->pending_deq_acked))
570  return;
571 
572  pending_deq_acked = wrk->pending_deq_acked;
573  for (i = 0; i < vec_len (pending_deq_acked); i++)
574  {
575  tc = tcp_connection_get (pending_deq_acked[i], thread_index);
576  tc->flags &= ~TCP_CONN_DEQ_PENDING;
577 
578  if (PREDICT_FALSE (!tc->burst_acked))
579  continue;
580 
581  /* Dequeue the newly ACKed bytes */
582  session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
583  tcp_validate_txf_size (tc, tc->snd_nxt - tc->snd_una);
584 
585  if (tcp_is_descheduled (tc))
586  tcp_reschedule (tc);
587 
588  /* If everything has been acked, stop retransmit timer
589  * otherwise update. */
591 
592  /* Update pacer based on our new cwnd estimate */
594 
595  tc->burst_acked = 0;
596  }
597  _vec_len (wrk->pending_deq_acked) = 0;
598 }
599 
600 static void
602 {
603  if (!(tc->flags & TCP_CONN_DEQ_PENDING))
604  {
605  vec_add1 (wrk->pending_deq_acked, tc->c_c_index);
606  tc->flags |= TCP_CONN_DEQ_PENDING;
607  }
608  tc->burst_acked += tc->bytes_acked;
609 }
610 
611 /**
612  * Try to update snd_wnd based on feedback received from peer.
613  *
614  * If successful, and new window is 'effectively' 0, activate persist
615  * timer.
616  */
617 static void
618 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
619 {
620  /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
621  * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
622  if (seq_lt (tc->snd_wl1, seq)
623  || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
624  {
625  tc->snd_wnd = snd_wnd;
626  tc->snd_wl1 = seq;
627  tc->snd_wl2 = ack;
628  TCP_EVT (TCP_EVT_SND_WND, tc);
629 
630  if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss))
631  {
632  /* Set persist timer if not set and we just got 0 wnd */
633  if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
634  && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
635  {
636  tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
638  }
639  }
640  else
641  {
642  if (PREDICT_FALSE (tcp_timer_is_active (tc, TCP_TIMER_PERSIST)))
643  {
644  tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
646  }
647 
649  tcp_reschedule (tc);
650 
651  if (PREDICT_FALSE (!tcp_in_recovery (tc) && tc->rto_boff > 0))
652  {
653  tc->rto_boff = 0;
654  tcp_update_rto (tc);
655  }
656  }
657  }
658 }
659 
660 /**
661  * Init loss recovery/fast recovery.
662  *
663  * Triggered by dup acks as opposed to timer timeout. Note that cwnd is
664  * updated in @ref tcp_cc_handle_event after fast retransmit
665  */
666 static void
668 {
669  tcp_fastrecovery_on (tc);
670  tc->snd_congestion = tc->snd_nxt;
671  tc->cwnd_acc_bytes = 0;
672  tc->snd_rxt_bytes = 0;
673  tc->rxt_delivered = 0;
674  tc->prr_delivered = 0;
675  tc->prr_start = tc->snd_una;
676  tc->prev_ssthresh = tc->ssthresh;
677  tc->prev_cwnd = tc->cwnd;
678 
679  tc->snd_rxt_ts = tcp_tstamp (tc);
680  tcp_cc_congestion (tc);
681 
682  /* Post retransmit update cwnd to ssthresh and account for the
683  * three segments that have left the network and should've been
684  * buffered at the receiver XXX */
685  if (!tcp_opts_sack_permitted (&tc->rcv_opts))
686  tc->cwnd += TCP_DUPACK_THRESHOLD * tc->snd_mss;
687 
688  tc->fr_occurences += 1;
689  TCP_EVT (TCP_EVT_CC_EVT, tc, 4);
690 }
691 
692 static void
694 {
695  tc->cwnd = tc->prev_cwnd;
696  tc->ssthresh = tc->prev_ssthresh;
698  ASSERT (tc->rto_boff == 0);
699  TCP_EVT (TCP_EVT_CC_EVT, tc, 5);
700 }
701 
702 static inline u8
704 {
705  return (tcp_in_recovery (tc) && tc->rto_boff == 1
706  && tc->snd_rxt_ts
707  && tcp_opts_tstamp (&tc->rcv_opts)
708  && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
709 }
710 
711 static inline u8
713 {
714  return (tcp_cc_is_spurious_timeout_rxt (tc));
715 }
716 
717 static inline u8
719 {
720  if (!has_sack)
721  {
722  /* If of of the two conditions lower hold, reset dupacks because
723  * we're probably after timeout (RFC6582 heuristics).
724  * If Cumulative ack does not cover more than congestion threshold,
725  * and:
726  * 1) The following doesn't hold: The congestion window is greater
727  * than SMSS bytes and the difference between highest_ack
728  * and prev_highest_ack is at most 4*SMSS bytes
729  * 2) Echoed timestamp in the last non-dup ack does not equal the
730  * stored timestamp
731  */
732  if (seq_leq (tc->snd_una, tc->snd_congestion)
733  && ((!(tc->cwnd > tc->snd_mss
734  && tc->bytes_acked <= 4 * tc->snd_mss))
735  || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
736  {
737  tc->rcv_dupacks = 0;
738  return 0;
739  }
740  }
741  return tc->sack_sb.lost_bytes || tc->rcv_dupacks >= tc->sack_sb.reorder;
742 }
743 
744 static int
746 {
748  u8 is_spurious = 0;
749 
751 
753  {
755  is_spurious = 1;
756  }
757 
758  tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
759  tc->rcv_dupacks = 0;
760 
761  /* Previous recovery left us congested. Continue sending as part
762  * of the current recovery event with an updated snd_congestion */
763  if (tc->sack_sb.sacked_bytes)
764  {
765  tc->snd_congestion = tc->snd_nxt;
767  return is_spurious;
768  }
769 
770  tc->rxt_delivered = 0;
771  tc->snd_rxt_bytes = 0;
772  tc->snd_rxt_ts = 0;
773  tc->prr_delivered = 0;
774  tc->rtt_ts = 0;
775  tc->flags &= ~TCP_CONN_RXT_PENDING;
776 
777  hole = scoreboard_first_hole (&tc->sack_sb);
778  if (hole && hole->start == tc->snd_una && hole->end == tc->snd_nxt)
779  scoreboard_clear (&tc->sack_sb);
780 
781  if (!tcp_in_recovery (tc) && !is_spurious)
782  tcp_cc_recovered (tc);
783 
786  tcp_recovery_off (tc);
787  TCP_EVT (TCP_EVT_CC_EVT, tc, 3);
788 
789  ASSERT (tc->rto_boff == 0);
792 
793  return is_spurious;
794 }
795 
796 static void
798 {
800 
801  /* Congestion avoidance */
802  tcp_cc_rcv_ack (tc, rs);
803 
804  /* If a cumulative ack, make sure dupacks is 0 */
805  tc->rcv_dupacks = 0;
806 
807  /* When dupacks hits the threshold we only enter fast retransmit if
808  * cumulative ack covers more than snd_congestion. Should snd_una
809  * wrap this test may fail under otherwise valid circumstances.
810  * Therefore, proactively update snd_congestion when wrap detected. */
811  if (PREDICT_FALSE
812  (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
813  && seq_gt (tc->snd_congestion, tc->snd_una)))
814  tc->snd_congestion = tc->snd_una - 1;
815 }
816 
817 /**
818  * One function to rule them all ... and in the darkness bind them
819  */
820 static void
822  u32 is_dack)
823 {
824  u8 has_sack = tcp_opts_sack_permitted (&tc->rcv_opts);
825 
826  /* If reneging, wait for timer based retransmits */
827  if (PREDICT_FALSE (tcp_is_lost_fin (tc) || tc->sack_sb.is_reneging))
828  return;
829 
830  /*
831  * If not in recovery, figure out if we should enter
832  */
833  if (!tcp_in_cong_recovery (tc))
834  {
835  ASSERT (is_dack);
836 
837  tc->rcv_dupacks++;
838  TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
840 
841  if (tcp_should_fastrecover (tc, has_sack))
842  {
844 
845  if (has_sack)
846  scoreboard_init_rxt (&tc->sack_sb, tc->snd_una);
847 
848  tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
850  }
851 
852  return;
853  }
854 
855  /*
856  * Already in recovery
857  */
858 
859  /*
860  * Process (re)transmit feedback. Output path uses this to decide how much
861  * more data to release into the network
862  */
863  if (has_sack)
864  {
865  if (!tc->bytes_acked && tc->sack_sb.rxt_sacked)
867 
868  tc->rxt_delivered += tc->sack_sb.rxt_sacked;
869  tc->prr_delivered += tc->bytes_acked + tc->sack_sb.last_sacked_bytes
870  - tc->sack_sb.last_bytes_delivered;
871  }
872  else
873  {
874  if (is_dack)
875  {
876  tc->rcv_dupacks += 1;
877  TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
878  }
879  tc->rxt_delivered = clib_min (tc->rxt_delivered + tc->bytes_acked,
880  tc->snd_rxt_bytes);
881  if (is_dack)
882  tc->prr_delivered += clib_min (tc->snd_mss,
883  tc->snd_nxt - tc->snd_una);
884  else
885  tc->prr_delivered += tc->bytes_acked - clib_min (tc->bytes_acked,
886  tc->snd_mss *
887  tc->rcv_dupacks);
888 
889  /* If partial ack, assume that the first un-acked segment was lost */
890  if (tc->bytes_acked || tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
892  }
893 
894  /*
895  * See if we can exit and stop retransmitting
896  */
897  if (seq_geq (tc->snd_una, tc->snd_congestion))
898  {
899  /* If spurious return, we've already updated everything */
900  if (tcp_cc_recover (tc))
901  {
902  tc->tsecr_last_ack = tc->rcv_opts.tsecr;
903  return;
904  }
905 
906  /* Treat as congestion avoidance ack */
907  tcp_cc_rcv_ack (tc, rs);
908  return;
909  }
910 
912 
913  /*
914  * Notify cc of the event
915  */
916 
917  if (!tc->bytes_acked)
918  {
920  return;
921  }
922 
923  /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
924  * reset dupacks to 0. Also needed if in congestion recovery */
925  tc->rcv_dupacks = 0;
926 
927  if (tcp_in_recovery (tc))
928  tcp_cc_rcv_ack (tc, rs);
929  else
931 }
932 
933 static void
935 {
936  if (!tcp_in_cong_recovery (tc))
937  return;
938 
939  if (tcp_opts_sack_permitted (&tc->rcv_opts))
940  tcp_rcv_sacks (tc, tc->snd_una);
941 
942  tc->bytes_acked = 0;
943 
944  if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
946 
947  tcp_cc_handle_event (tc, rs, 1);
948 }
949 
950 /**
951  * Check if duplicate ack as per RFC5681 Sec. 2
952  */
955  u32 prev_snd_una)
956 {
957  return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
958  && seq_gt (tc->snd_nxt, tc->snd_una)
959  && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
960  && (prev_snd_wnd == tc->snd_wnd));
961 }
962 
963 /**
964  * Checks if ack is a congestion control event.
965  */
966 static u8
968  u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
969 {
970  /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
971  * defined to be 'duplicate' as well */
972  *is_dack = tc->sack_sb.last_sacked_bytes
973  || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
974 
975  return (*is_dack || tcp_in_cong_recovery (tc));
976 }
977 
978 /**
979  * Process incoming ACK
980  */
981 static int
983  tcp_header_t * th, u32 * error)
984 {
985  u32 prev_snd_wnd, prev_snd_una;
986  tcp_rate_sample_t rs = { 0 };
987  u8 is_dack;
988 
989  TCP_EVT (TCP_EVT_CC_STAT, tc);
990 
991  /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
992  if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
993  {
994  /* We've probably entered recovery and the peer still has some
995  * of the data we've sent. Update snd_nxt and accept the ack */
996  if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)
997  && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
998  {
999  tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1000  goto process_ack;
1001  }
1002 
1003  tc->errors.above_ack_wnd += 1;
1004  *error = TCP_ERROR_ACK_FUTURE;
1005  TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 0, vnet_buffer (b)->tcp.ack_number);
1006  return -1;
1007  }
1008 
1009  /* If old ACK, probably it's an old dupack */
1010  if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1011  {
1012  tc->errors.below_ack_wnd += 1;
1013  *error = TCP_ERROR_ACK_OLD;
1014  TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 1, vnet_buffer (b)->tcp.ack_number);
1015 
1016  if (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una - tc->rcv_wnd))
1017  return -1;
1018 
1019  tcp_handle_old_ack (tc, &rs);
1020 
1021  /* Don't drop yet */
1022  return 0;
1023  }
1024 
1025 process_ack:
1026 
1027  /*
1028  * Looks okay, process feedback
1029  */
1030 
1031  if (tcp_opts_sack_permitted (&tc->rcv_opts))
1032  tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1033 
1034  prev_snd_wnd = tc->snd_wnd;
1035  prev_snd_una = tc->snd_una;
1036  tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1037  vnet_buffer (b)->tcp.ack_number,
1038  clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1039  tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1040  tc->snd_una = vnet_buffer (b)->tcp.ack_number;
1041  tcp_validate_txf_size (tc, tc->bytes_acked);
1042 
1043  if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1044  tcp_bt_sample_delivery_rate (tc, &rs);
1045 
1046  if (tc->bytes_acked + tc->sack_sb.last_sacked_bytes)
1047  {
1048  tcp_update_rtt (tc, &rs, vnet_buffer (b)->tcp.ack_number);
1049  if (tc->bytes_acked)
1050  tcp_program_dequeue (wrk, tc);
1051  }
1052 
1053  TCP_EVT (TCP_EVT_ACK_RCVD, tc);
1054 
1055  /*
1056  * Check if we have congestion event
1057  */
1058 
1059  if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1060  {
1061  tcp_cc_handle_event (tc, &rs, is_dack);
1062  tc->dupacks_in += is_dack;
1063  if (!tcp_in_cong_recovery (tc))
1064  {
1065  *error = TCP_ERROR_ACK_OK;
1066  return 0;
1067  }
1068  *error = TCP_ERROR_ACK_DUP;
1069  if (vnet_buffer (b)->tcp.data_len || tcp_is_fin (th))
1070  return 0;
1071  return -1;
1072  }
1073 
1074  /*
1075  * Update congestion control (slow start/congestion avoidance)
1076  */
1077  tcp_cc_update (tc, &rs);
1078  *error = TCP_ERROR_ACK_OK;
1079  return 0;
1080 }
1081 
1082 static void
1084 {
1085  if (!tcp_disconnect_pending (tc))
1086  {
1087  vec_add1 (wrk->pending_disconnects, tc->c_c_index);
1089  }
1090 }
1091 
1092 static void
1094 {
1095  u32 thread_index, *pending_disconnects, *pending_resets;
1096  tcp_connection_t *tc;
1097  int i;
1098 
1099  if (vec_len (wrk->pending_disconnects))
1100  {
1101  thread_index = wrk->vm->thread_index;
1102  pending_disconnects = wrk->pending_disconnects;
1103  for (i = 0; i < vec_len (pending_disconnects); i++)
1104  {
1105  tc = tcp_connection_get (pending_disconnects[i], thread_index);
1107  session_transport_closing_notify (&tc->connection);
1108  }
1109  _vec_len (wrk->pending_disconnects) = 0;
1110  }
1111 
1112  if (vec_len (wrk->pending_resets))
1113  {
1114  thread_index = wrk->vm->thread_index;
1115  pending_resets = wrk->pending_resets;
1116  for (i = 0; i < vec_len (pending_resets); i++)
1117  {
1118  tc = tcp_connection_get (pending_resets[i], thread_index);
1120  tcp_handle_rst (tc);
1121  }
1122  _vec_len (wrk->pending_resets) = 0;
1123  }
1124 }
1125 
1126 static void
1128  u32 * error)
1129 {
1130  /* Reject out-of-order fins */
1131  if (vnet_buffer (b)->tcp.seq_end != tc->rcv_nxt)
1132  return;
1133 
1134  /* Account for the FIN and send ack */
1135  tc->rcv_nxt += 1;
1136  tc->flags |= TCP_CONN_FINRCVD;
1137  tcp_program_ack (tc);
1138  /* Enter CLOSE-WAIT and notify session. To avoid lingering
1139  * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1140  tcp_connection_set_state (tc, TCP_STATE_CLOSE_WAIT);
1141  tcp_program_disconnect (wrk, tc);
1142  tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
1143  tcp_cfg.closewait_time);
1144  TCP_EVT (TCP_EVT_FIN_RCVD, tc);
1145  *error = TCP_ERROR_FIN_RCVD;
1146 }
1147 
1148 /** Enqueue data for delivery to application */
1149 static int
1151  u16 data_len)
1152 {
1153  int written, error = TCP_ERROR_ENQUEUED;
1154 
1155  ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1156  ASSERT (data_len);
1157  written = session_enqueue_stream_connection (&tc->connection, b, 0,
1158  1 /* queue event */ , 1);
1159  tc->bytes_in += written;
1160 
1161  TCP_EVT (TCP_EVT_INPUT, tc, 0, data_len, written);
1162 
1163  /* Update rcv_nxt */
1164  if (PREDICT_TRUE (written == data_len))
1165  {
1166  tc->rcv_nxt += written;
1167  }
1168  /* If more data written than expected, account for out-of-order bytes. */
1169  else if (written > data_len)
1170  {
1171  tc->rcv_nxt += written;
1172  TCP_EVT (TCP_EVT_CC_INPUT, tc, data_len, written);
1173  }
1174  else if (written > 0)
1175  {
1176  /* We've written something but FIFO is probably full now */
1177  tc->rcv_nxt += written;
1178  error = TCP_ERROR_PARTIALLY_ENQUEUED;
1179  }
1180  else
1181  {
1182  /* Packet made it through for ack processing */
1183  if (tc->rcv_wnd < tc->snd_mss)
1184  return TCP_ERROR_ZERO_RWND;
1185 
1186  return TCP_ERROR_FIFO_FULL;
1187  }
1188 
1189  /* Update SACK list if need be */
1190  if (tcp_opts_sack_permitted (&tc->rcv_opts))
1191  {
1192  /* Remove SACK blocks that have been delivered */
1193  tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1194  }
1195 
1196  return error;
1197 }
1198 
1199 /** Enqueue out-of-order data */
1200 static int
1202  u16 data_len)
1203 {
1204  session_t *s0;
1205  int rv, offset;
1206 
1207  ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1208  ASSERT (data_len);
1209 
1210  /* Enqueue out-of-order data with relative offset */
1211  rv = session_enqueue_stream_connection (&tc->connection, b,
1212  vnet_buffer (b)->tcp.seq_number -
1213  tc->rcv_nxt, 0 /* queue event */ ,
1214  0);
1215 
1216  /* Nothing written */
1217  if (rv)
1218  {
1219  TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, 0);
1220  return TCP_ERROR_FIFO_FULL;
1221  }
1222 
1223  TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1224  tc->bytes_in += data_len;
1225 
1226  /* Update SACK list if in use */
1227  if (tcp_opts_sack_permitted (&tc->rcv_opts))
1228  {
1229  ooo_segment_t *newest;
1230  u32 start, end;
1231 
1232  s0 = session_get (tc->c_s_index, tc->c_thread_index);
1233 
1234  /* Get the newest segment from the fifo */
1235  newest = svm_fifo_newest_ooo_segment (s0->rx_fifo);
1236  if (newest)
1237  {
1238  offset = ooo_segment_offset_prod (s0->rx_fifo, newest);
1239  ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
1240  start = tc->rcv_nxt + offset;
1241  end = start + ooo_segment_length (s0->rx_fifo, newest);
1242  tcp_update_sack_list (tc, start, end);
1244  TCP_EVT (TCP_EVT_CC_SACKS, tc);
1245  }
1246  }
1247 
1248  return TCP_ERROR_ENQUEUED_OOO;
1249 }
1250 
1251 static int
1253 {
1254  u32 discard, first = b->current_length;
1255  vlib_main_t *vm = vlib_get_main ();
1256 
1257  /* Handle multi-buffer segments */
1258  if (n_bytes_to_drop > b->current_length)
1259  {
1260  if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1261  return -1;
1262  do
1263  {
1264  discard = clib_min (n_bytes_to_drop, b->current_length);
1265  vlib_buffer_advance (b, discard);
1266  b = vlib_get_buffer (vm, b->next_buffer);
1267  n_bytes_to_drop -= discard;
1268  }
1269  while (n_bytes_to_drop);
1270  if (n_bytes_to_drop > first)
1271  b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
1272  }
1273  else
1274  vlib_buffer_advance (b, n_bytes_to_drop);
1275  vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
1276  return 0;
1277 }
1278 
1279 /**
1280  * Receive buffer for connection and handle acks
1281  *
1282  * It handles both in order or out-of-order data.
1283  */
1284 static int
1286  vlib_buffer_t * b)
1287 {
1288  u32 error, n_bytes_to_drop, n_data_bytes;
1289 
1290  vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
1291  n_data_bytes = vnet_buffer (b)->tcp.data_len;
1292  ASSERT (n_data_bytes);
1293  tc->data_segs_in += 1;
1294 
1295  /* Make sure we don't consume trailing bytes */
1296  if (PREDICT_FALSE (b->current_length > n_data_bytes))
1297  b->current_length = n_data_bytes;
1298 
1299  /* Handle out-of-order data */
1300  if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1301  {
1302  /* Old sequence numbers allowed through because they overlapped
1303  * the rx window */
1304  if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1305  {
1306  /* Completely in the past (possible retransmit). Ack
1307  * retransmissions since we may not have any data to send */
1308  if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1309  {
1310  tcp_program_dupack (tc);
1311  tc->errors.below_data_wnd++;
1312  error = TCP_ERROR_SEGMENT_OLD;
1313  goto done;
1314  }
1315 
1316  /* Chop off the bytes in the past and see if what is left
1317  * can be enqueued in order */
1318  n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1319  n_data_bytes -= n_bytes_to_drop;
1320  vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
1321  if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
1322  {
1323  error = TCP_ERROR_SEGMENT_OLD;
1324  goto done;
1325  }
1326  goto in_order;
1327  }
1328 
1329  /* RFC2581: Enqueue and send DUPACK for fast retransmit */
1330  error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1331  tcp_program_dupack (tc);
1332  TCP_EVT (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
1333  tc->errors.above_data_wnd += seq_gt (vnet_buffer (b)->tcp.seq_end,
1334  tc->rcv_las + tc->rcv_wnd);
1335  goto done;
1336  }
1337 
1338 in_order:
1339 
1340  /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1341  * segments can be enqueued after fifo tail offset changes. */
1342  error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1343  tcp_program_ack (tc);
1344 
1345 done:
1346  return error;
1347 }
1348 
1349 typedef struct
1350 {
1353 } tcp_rx_trace_t;
1354 
1355 static u8 *
1356 format_tcp_rx_trace (u8 * s, va_list * args)
1357 {
1358  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1359  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1360  tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1361  tcp_connection_t *tc = &t->tcp_connection;
1362  u32 indent = format_get_indent (s);
1363 
1364  s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
1365  format_tcp_state, tc->state, format_white_space, indent,
1366  format_tcp_header, &t->tcp_header, 128);
1367 
1368  return s;
1369 }
1370 
1371 static u8 *
1372 format_tcp_rx_trace_short (u8 * s, va_list * args)
1373 {
1374  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1375  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1376  tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1377 
1378  s = format (s, "%d -> %d (%U)",
1379  clib_net_to_host_u16 (t->tcp_header.dst_port),
1380  clib_net_to_host_u16 (t->tcp_header.src_port), format_tcp_state,
1381  t->tcp_connection.state);
1382 
1383  return s;
1384 }
1385 
1386 static void
1388  tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
1389 {
1390  if (tc0)
1391  {
1392  clib_memcpy_fast (&t0->tcp_connection, tc0,
1393  sizeof (t0->tcp_connection));
1394  }
1395  else
1396  {
1397  th0 = tcp_buffer_hdr (b0);
1398  }
1399  clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
1400 }
1401 
1402 static void
1404  vlib_frame_t * frame, u8 is_ip4)
1405 {
1406  u32 *from, n_left;
1407 
1408  n_left = frame->n_vectors;
1409  from = vlib_frame_vector_args (frame);
1410 
1411  while (n_left >= 1)
1412  {
1413  tcp_connection_t *tc0;
1414  tcp_rx_trace_t *t0;
1415  tcp_header_t *th0;
1416  vlib_buffer_t *b0;
1417  u32 bi0;
1418 
1419  bi0 = from[0];
1420  b0 = vlib_get_buffer (vm, bi0);
1421 
1422  if (b0->flags & VLIB_BUFFER_IS_TRACED)
1423  {
1424  t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
1425  tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1426  vm->thread_index);
1427  th0 = tcp_buffer_hdr (b0);
1428  tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
1429  }
1430 
1431  from += 1;
1432  n_left -= 1;
1433  }
1434 }
1435 
1436 always_inline void
1437 tcp_node_inc_counter_i (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
1438  u8 is_ip4, u32 evt, u32 val)
1439 {
1440  if (is_ip4)
1441  vlib_node_increment_counter (vm, tcp4_node, evt, val);
1442  else
1443  vlib_node_increment_counter (vm, tcp6_node, evt, val);
1444 }
1445 
1446 #define tcp_maybe_inc_counter(node_id, err, count) \
1447 { \
1448  if (next0 != tcp_next_drop (is_ip4)) \
1449  tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
1450  tcp6_##node_id##_node.index, is_ip4, err, \
1451  1); \
1452 }
1453 #define tcp_inc_counter(node_id, err, count) \
1454  tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
1455  tcp6_##node_id##_node.index, is_ip4, \
1456  err, count)
1457 #define tcp_maybe_inc_err_counter(cnts, err) \
1458 { \
1459  cnts[err] += (next0 != tcp_next_drop (is_ip4)); \
1460 }
1461 #define tcp_inc_err_counter(cnts, err, val) \
1462 { \
1463  cnts[err] += val; \
1464 }
1465 #define tcp_store_err_counters(node_id, cnts) \
1466 { \
1467  int i; \
1468  for (i = 0; i < TCP_N_ERROR; i++) \
1469  if (cnts[i]) \
1470  tcp_inc_counter(node_id, i, cnts[i]); \
1471 }
1472 
1473 
1476  vlib_frame_t * frame, int is_ip4)
1477 {
1478  u32 thread_index = vm->thread_index, errors = 0;
1479  tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
1480  u32 n_left_from, *from, *first_buffer;
1481  u16 err_counters[TCP_N_ERROR] = { 0 };
1482 
1483  if (node->flags & VLIB_NODE_FLAG_TRACE)
1484  tcp_established_trace_frame (vm, node, frame, is_ip4);
1485 
1486  first_buffer = from = vlib_frame_vector_args (frame);
1487  n_left_from = frame->n_vectors;
1488 
1489  while (n_left_from > 0)
1490  {
1491  u32 bi0, error0 = TCP_ERROR_ACK_OK;
1492  vlib_buffer_t *b0;
1493  tcp_header_t *th0;
1494  tcp_connection_t *tc0;
1495 
1496  if (n_left_from > 1)
1497  {
1498  vlib_buffer_t *pb;
1499  pb = vlib_get_buffer (vm, from[1]);
1500  vlib_prefetch_buffer_header (pb, LOAD);
1501  CLIB_PREFETCH (pb->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
1502  }
1503 
1504  bi0 = from[0];
1505  from += 1;
1506  n_left_from -= 1;
1507 
1508  b0 = vlib_get_buffer (vm, bi0);
1509  tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1510  thread_index);
1511 
1512  if (PREDICT_FALSE (tc0 == 0))
1513  {
1514  error0 = TCP_ERROR_INVALID_CONNECTION;
1515  goto done;
1516  }
1517 
1518  th0 = tcp_buffer_hdr (b0);
1519 
1520  /* TODO header prediction fast path */
1521 
1522  /* 1-4: check SEQ, RST, SYN */
1523  if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, th0, &error0)))
1524  {
1525  TCP_EVT (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
1526  goto done;
1527  }
1528 
1529  /* 5: check the ACK field */
1530  if (PREDICT_FALSE (tcp_rcv_ack (wrk, tc0, b0, th0, &error0)))
1531  goto done;
1532 
1533  /* 6: check the URG bit TODO */
1534 
1535  /* 7: process the segment text */
1536  if (vnet_buffer (b0)->tcp.data_len)
1537  error0 = tcp_segment_rcv (wrk, tc0, b0);
1538 
1539  /* 8: check the FIN bit */
1540  if (PREDICT_FALSE (tcp_is_fin (th0)))
1541  tcp_rcv_fin (wrk, tc0, b0, &error0);
1542 
1543  done:
1544  tcp_inc_err_counter (err_counters, error0, 1);
1545  }
1546 
1547  errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
1548  thread_index);
1549  err_counters[TCP_ERROR_MSG_QUEUE_FULL] = errors;
1550  tcp_store_err_counters (established, err_counters);
1552  tcp_handle_disconnects (wrk);
1553  vlib_buffer_free (vm, first_buffer, frame->n_vectors);
1554 
1555  return frame->n_vectors;
1556 }
1557 
1560  vlib_frame_t * from_frame)
1561 {
1562  return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
1563 }
1564 
1567  vlib_frame_t * from_frame)
1568 {
1569  return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
1570 }
1571 
1572 /* *INDENT-OFF* */
1574 {
1575  .name = "tcp4-established",
1576  /* Takes a vector of packets. */
1577  .vector_size = sizeof (u32),
1578  .n_errors = TCP_N_ERROR,
1579  .error_strings = tcp_error_strings,
1580  .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1581  .next_nodes =
1582  {
1583 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1585 #undef _
1586  },
1587  .format_trace = format_tcp_rx_trace_short,
1588 };
1589 /* *INDENT-ON* */
1590 
1591 /* *INDENT-OFF* */
1593 {
1594  .name = "tcp6-established",
1595  /* Takes a vector of packets. */
1596  .vector_size = sizeof (u32),
1597  .n_errors = TCP_N_ERROR,
1598  .error_strings = tcp_error_strings,
1599  .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1600  .next_nodes =
1601  {
1602 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1604 #undef _
1605  },
1606  .format_trace = format_tcp_rx_trace_short,
1607 };
1608 /* *INDENT-ON* */
1609 
1610 
1611 static u8
1613  tcp_header_t * hdr)
1614 {
1615  transport_connection_t *tmp = 0;
1616  u64 handle;
1617 
1618  if (!tc)
1619  return 1;
1620 
1621  /* Proxy case */
1622  if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
1623  return 1;
1624 
1625  u8 is_ip_valid = 0, val_l, val_r;
1626 
1627  if (tc->connection.is_ip4)
1628  {
1630 
1631  val_l = !ip4_address_compare (&ip4_hdr->dst_address,
1632  &tc->connection.lcl_ip.ip4);
1633  val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 1);
1634  val_r = !ip4_address_compare (&ip4_hdr->src_address,
1635  &tc->connection.rmt_ip.ip4);
1636  val_r = val_r || tc->state == TCP_STATE_LISTEN;
1637  is_ip_valid = val_l && val_r;
1638  }
1639  else
1640  {
1642 
1643  val_l = !ip6_address_compare (&ip6_hdr->dst_address,
1644  &tc->connection.lcl_ip.ip6);
1645  val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 0);
1646  val_r = !ip6_address_compare (&ip6_hdr->src_address,
1647  &tc->connection.rmt_ip.ip6);
1648  val_r = val_r || tc->state == TCP_STATE_LISTEN;
1649  is_ip_valid = val_l && val_r;
1650  }
1651 
1652  u8 is_valid = (tc->c_lcl_port == hdr->dst_port
1653  && (tc->state == TCP_STATE_LISTEN
1654  || tc->c_rmt_port == hdr->src_port) && is_ip_valid);
1655 
1656  if (!is_valid)
1657  {
1658  handle = session_lookup_half_open_handle (&tc->connection);
1659  tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
1660  tc->c_proto, tc->c_is_ip4);
1661 
1662  if (tmp)
1663  {
1664  if (tmp->lcl_port == hdr->dst_port
1665  && tmp->rmt_port == hdr->src_port)
1666  {
1667  TCP_DBG ("half-open is valid!");
1668  is_valid = 1;
1669  }
1670  }
1671  }
1672  return is_valid;
1673 }
1674 
1675 /**
1676  * Lookup transport connection
1677  */
1678 static tcp_connection_t *
1679 tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
1680  u8 is_ip4)
1681 {
1682  tcp_header_t *tcp;
1683  transport_connection_t *tconn;
1684  tcp_connection_t *tc;
1685  u8 is_filtered = 0;
1686  if (is_ip4)
1687  {
1688  ip4_header_t *ip4;
1689  ip4 = vlib_buffer_get_current (b);
1690  tcp = ip4_next_header (ip4);
1691  tconn = session_lookup_connection_wt4 (fib_index,
1692  &ip4->dst_address,
1693  &ip4->src_address,
1694  tcp->dst_port,
1695  tcp->src_port,
1696  TRANSPORT_PROTO_TCP,
1697  thread_index, &is_filtered);
1698  tc = tcp_get_connection_from_transport (tconn);
1699  ASSERT (tcp_lookup_is_valid (tc, b, tcp));
1700  }
1701  else
1702  {
1703  ip6_header_t *ip6;
1704  ip6 = vlib_buffer_get_current (b);
1705  tcp = ip6_next_header (ip6);
1706  tconn = session_lookup_connection_wt6 (fib_index,
1707  &ip6->dst_address,
1708  &ip6->src_address,
1709  tcp->dst_port,
1710  tcp->src_port,
1711  TRANSPORT_PROTO_TCP,
1712  thread_index, &is_filtered);
1713  tc = tcp_get_connection_from_transport (tconn);
1714  ASSERT (tcp_lookup_is_valid (tc, b, tcp));
1715  }
1716  return tc;
1717 }
1718 
1719 static tcp_connection_t *
1720 tcp_lookup_listener (vlib_buffer_t * b, u32 fib_index, int is_ip4)
1721 {
1722  session_t *s;
1723 
1724  if (is_ip4)
1725  {
1727  tcp_header_t *tcp = tcp_buffer_hdr (b);
1728  s = session_lookup_listener4 (fib_index,
1729  &ip4->dst_address,
1730  tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
1731  }
1732  else
1733  {
1735  tcp_header_t *tcp = tcp_buffer_hdr (b);
1736  s = session_lookup_listener6 (fib_index,
1737  &ip6->dst_address,
1738  tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
1739 
1740  }
1741  if (PREDICT_TRUE (s != 0))
1743  (TRANSPORT_PROTO_TCP,
1744  s->connection_index));
1745  else
1746  return 0;
1747 }
1748 
1749 always_inline void
1751 {
1752  vnet_main_t *vnm = vnet_get_main ();
1753  const dpo_id_t *dpo;
1754  const load_balance_t *lb;
1755  vnet_hw_interface_t *hw_if;
1756  u32 sw_if_idx, lb_idx;
1757 
1758  if (is_ipv4)
1759  {
1760  ip4_address_t *dst_addr = &(tc->c_rmt_ip.ip4);
1761  lb_idx = ip4_fib_forwarding_lookup (tc->c_fib_index, dst_addr);
1762  }
1763  else
1764  {
1765  ip6_address_t *dst_addr = &(tc->c_rmt_ip.ip6);
1766  lb_idx = ip6_fib_table_fwding_lookup (tc->c_fib_index, dst_addr);
1767  }
1768 
1769  lb = load_balance_get (lb_idx);
1770  if (PREDICT_FALSE (lb->lb_n_buckets > 1))
1771  return;
1772  dpo = load_balance_get_bucket_i (lb, 0);
1773 
1774  sw_if_idx = dpo_get_urpf (dpo);
1775  if (PREDICT_FALSE (sw_if_idx == ~0))
1776  return;
1777 
1778  hw_if = vnet_get_sup_hw_interface (vnm, sw_if_idx);
1780  tc->cfg_flags |= TCP_CFG_F_TSO;
1781 }
1782 
1785  vlib_frame_t * from_frame, int is_ip4)
1786 {
1787  u32 n_left_from, *from, *first_buffer, errors = 0;
1788  u32 my_thread_index = vm->thread_index;
1789  tcp_worker_ctx_t *wrk = tcp_get_worker (my_thread_index);
1790 
1791  from = first_buffer = vlib_frame_vector_args (from_frame);
1792  n_left_from = from_frame->n_vectors;
1793 
1794  while (n_left_from > 0)
1795  {
1796  u32 bi0, ack0, seq0, error0 = TCP_ERROR_NONE;
1797  tcp_connection_t *tc0, *new_tc0;
1798  tcp_header_t *tcp0 = 0;
1799  tcp_rx_trace_t *t0;
1800  vlib_buffer_t *b0;
1801 
1802  bi0 = from[0];
1803  from += 1;
1804  n_left_from -= 1;
1805 
1806  b0 = vlib_get_buffer (vm, bi0);
1807  tc0 =
1808  tcp_half_open_connection_get (vnet_buffer (b0)->tcp.connection_index);
1809  if (PREDICT_FALSE (tc0 == 0))
1810  {
1811  error0 = TCP_ERROR_INVALID_CONNECTION;
1812  goto drop;
1813  }
1814 
1815  /* Half-open completed recently but the connection was't removed
1816  * yet by the owning thread */
1817  if (PREDICT_FALSE (tc0->flags & TCP_CONN_HALF_OPEN_DONE))
1818  {
1819  /* Make sure the connection actually exists */
1820  ASSERT (tcp_lookup_connection (tc0->c_fib_index, b0,
1821  my_thread_index, is_ip4));
1822  error0 = TCP_ERROR_SPURIOUS_SYN_ACK;
1823  goto drop;
1824  }
1825 
1826  ack0 = vnet_buffer (b0)->tcp.ack_number;
1827  seq0 = vnet_buffer (b0)->tcp.seq_number;
1828  tcp0 = tcp_buffer_hdr (b0);
1829 
1830  /* Crude check to see if the connection handle does not match
1831  * the packet. Probably connection just switched to established */
1832  if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
1833  || tcp0->src_port != tc0->c_rmt_port))
1834  {
1835  error0 = TCP_ERROR_INVALID_CONNECTION;
1836  goto drop;
1837  }
1838 
1839  if (PREDICT_FALSE (!tcp_ack (tcp0) && !tcp_rst (tcp0)
1840  && !tcp_syn (tcp0)))
1841  {
1842  error0 = TCP_ERROR_SEGMENT_INVALID;
1843  goto drop;
1844  }
1845 
1846  /* SYNs consume sequence numbers */
1847  vnet_buffer (b0)->tcp.seq_end += tcp_is_syn (tcp0);
1848 
1849  /*
1850  * 1. check the ACK bit
1851  */
1852 
1853  /*
1854  * If the ACK bit is set
1855  * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
1856  * the RST bit is set, if so drop the segment and return)
1857  * <SEQ=SEG.ACK><CTL=RST>
1858  * and discard the segment. Return.
1859  * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
1860  */
1861  if (tcp_ack (tcp0))
1862  {
1863  if (seq_leq (ack0, tc0->iss) || seq_gt (ack0, tc0->snd_nxt))
1864  {
1865  if (!tcp_rst (tcp0))
1866  tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
1867  error0 = TCP_ERROR_RCV_WND;
1868  goto drop;
1869  }
1870 
1871  /* Make sure ACK is valid */
1872  if (seq_gt (tc0->snd_una, ack0))
1873  {
1874  error0 = TCP_ERROR_ACK_INVALID;
1875  goto drop;
1876  }
1877  }
1878 
1879  /*
1880  * 2. check the RST bit
1881  */
1882 
1883  if (tcp_rst (tcp0))
1884  {
1885  /* If ACK is acceptable, signal client that peer is not
1886  * willing to accept connection and drop connection*/
1887  if (tcp_ack (tcp0))
1888  tcp_rcv_rst (wrk, tc0);
1889  error0 = TCP_ERROR_RST_RCVD;
1890  goto drop;
1891  }
1892 
1893  /*
1894  * 3. check the security and precedence (skipped)
1895  */
1896 
1897  /*
1898  * 4. check the SYN bit
1899  */
1900 
1901  /* No SYN flag. Drop. */
1902  if (!tcp_syn (tcp0))
1903  {
1904  error0 = TCP_ERROR_SEGMENT_INVALID;
1905  goto drop;
1906  }
1907 
1908  /* Parse options */
1909  if (tcp_options_parse (tcp0, &tc0->rcv_opts, 1))
1910  {
1911  error0 = TCP_ERROR_OPTIONS;
1912  goto drop;
1913  }
1914 
1915  /* Valid SYN or SYN-ACK. Move connection from half-open pool to
1916  * current thread pool. */
1917  new_tc0 = tcp_connection_alloc_w_base (my_thread_index, tc0);
1918  new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
1919  new_tc0->irs = seq0;
1920  new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
1921  new_tc0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1922 
1923  if (tcp_opts_tstamp (&new_tc0->rcv_opts))
1924  {
1925  new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
1926  new_tc0->tsval_recent_age = tcp_time_now ();
1927  }
1928 
1929  if (tcp_opts_wscale (&new_tc0->rcv_opts))
1930  new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
1931  else
1932  new_tc0->rcv_wscale = 0;
1933 
1934  new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
1935  << new_tc0->snd_wscale;
1936  new_tc0->snd_wl1 = seq0;
1937  new_tc0->snd_wl2 = ack0;
1938 
1939  tcp_connection_init_vars (new_tc0);
1940 
1941  /* SYN-ACK: See if we can switch to ESTABLISHED state */
1942  if (PREDICT_TRUE (tcp_ack (tcp0)))
1943  {
1944  /* Our SYN is ACKed: we have iss < ack = snd_una */
1945 
1946  /* TODO Dequeue acknowledged segments if we support Fast Open */
1947  new_tc0->snd_una = ack0;
1948  new_tc0->state = TCP_STATE_ESTABLISHED;
1949 
1950  /* Make sure las is initialized for the wnd computation */
1951  new_tc0->rcv_las = new_tc0->rcv_nxt;
1952 
1953  /* Notify app that we have connection. If session layer can't
1954  * allocate session send reset */
1955  if (session_stream_connect_notify (&new_tc0->connection,
1956  SESSION_E_NONE))
1957  {
1958  tcp_send_reset_w_pkt (new_tc0, b0, my_thread_index, is_ip4);
1959  tcp_connection_cleanup (new_tc0);
1960  error0 = TCP_ERROR_CREATE_SESSION_FAIL;
1961  goto cleanup_ho;
1962  }
1963 
1964  transport_fifos_init_ooo (&new_tc0->connection);
1965  new_tc0->tx_fifo_size =
1966  transport_tx_fifo_size (&new_tc0->connection);
1967  /* Update rtt with the syn-ack sample */
1968  tcp_estimate_initial_rtt (new_tc0);
1969  TCP_EVT (TCP_EVT_SYNACK_RCVD, new_tc0);
1970  error0 = TCP_ERROR_SYN_ACKS_RCVD;
1971  }
1972  /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
1973  else
1974  {
1975  new_tc0->state = TCP_STATE_SYN_RCVD;
1976 
1977  /* Notify app that we have connection */
1978  if (session_stream_connect_notify (&new_tc0->connection,
1979  SESSION_E_NONE))
1980  {
1981  tcp_connection_cleanup (new_tc0);
1982  tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
1983  TCP_EVT (TCP_EVT_RST_SENT, tc0);
1984  error0 = TCP_ERROR_CREATE_SESSION_FAIL;
1985  goto cleanup_ho;
1986  }
1987 
1988  transport_fifos_init_ooo (&new_tc0->connection);
1989  new_tc0->tx_fifo_size =
1990  transport_tx_fifo_size (&new_tc0->connection);
1991  new_tc0->rtt_ts = 0;
1992  tcp_init_snd_vars (new_tc0);
1993  tcp_send_synack (new_tc0);
1994  error0 = TCP_ERROR_SYNS_RCVD;
1995  goto cleanup_ho;
1996  }
1997 
1998  if (!(new_tc0->cfg_flags & TCP_CFG_F_NO_TSO))
1999  tcp_check_tx_offload (new_tc0, is_ip4);
2000 
2001  /* Read data, if any */
2002  if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
2003  {
2004  clib_warning ("rcvd data in syn-sent");
2005  error0 = tcp_segment_rcv (wrk, new_tc0, b0);
2006  if (error0 == TCP_ERROR_ACK_OK)
2007  error0 = TCP_ERROR_SYN_ACKS_RCVD;
2008  }
2009  else
2010  {
2011  /* Send ack now instead of programming it because connection was
2012  * just established and it's not optional. */
2013  tcp_send_ack (new_tc0);
2014  }
2015 
2016  cleanup_ho:
2017 
2018  /* If this is not the owning thread, wait for syn retransmit to
2019  * expire and cleanup then */
2021  tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2022 
2023  drop:
2024 
2025  tcp_inc_counter (syn_sent, error0, 1);
2026  if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
2027  {
2028  t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2029  clib_memcpy_fast (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2030  clib_memcpy_fast (&t0->tcp_connection, tc0,
2031  sizeof (t0->tcp_connection));
2032  }
2033  }
2034 
2035  errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2036  my_thread_index);
2037  tcp_inc_counter (syn_sent, TCP_ERROR_MSG_QUEUE_FULL, errors);
2038  vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2039  tcp_handle_disconnects (wrk);
2040 
2041  return from_frame->n_vectors;
2042 }
2043 
2046  vlib_frame_t * from_frame)
2047 {
2048  return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2049 }
2050 
2053  vlib_frame_t * from_frame)
2054 {
2055  return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2056 }
2057 
2058 /* *INDENT-OFF* */
2060 {
2061  .name = "tcp4-syn-sent",
2062  /* Takes a vector of packets. */
2063  .vector_size = sizeof (u32),
2064  .n_errors = TCP_N_ERROR,
2065  .error_strings = tcp_error_strings,
2066  .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2067  .next_nodes =
2068  {
2069 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2071 #undef _
2072  },
2073  .format_trace = format_tcp_rx_trace_short,
2074 };
2075 /* *INDENT-ON* */
2076 
2077 /* *INDENT-OFF* */
2079 {
2080  .name = "tcp6-syn-sent",
2081  /* Takes a vector of packets. */
2082  .vector_size = sizeof (u32),
2083  .n_errors = TCP_N_ERROR,
2084  .error_strings = tcp_error_strings,
2085  .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2086  .next_nodes =
2087  {
2088 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2090 #undef _
2091  },
2092  .format_trace = format_tcp_rx_trace_short,
2093 };
2094 /* *INDENT-ON* */
2095 
2096 /**
2097  * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2098  * as per RFC793 p. 64
2099  */
2102  vlib_frame_t * from_frame, int is_ip4)
2103 {
2104  u32 thread_index = vm->thread_index, errors = 0, *first_buffer;
2105  tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2106  u32 n_left_from, *from, max_dequeue;
2107 
2108  from = first_buffer = vlib_frame_vector_args (from_frame);
2109  n_left_from = from_frame->n_vectors;
2110 
2111  while (n_left_from > 0)
2112  {
2113  u32 bi0, error0 = TCP_ERROR_NONE;
2114  tcp_header_t *tcp0 = 0;
2115  tcp_connection_t *tc0;
2116  vlib_buffer_t *b0;
2117  u8 is_fin0;
2118 
2119  bi0 = from[0];
2120  from += 1;
2121  n_left_from -= 1;
2122 
2123  b0 = vlib_get_buffer (vm, bi0);
2124  tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2125  thread_index);
2126  if (PREDICT_FALSE (tc0 == 0))
2127  {
2128  error0 = TCP_ERROR_INVALID_CONNECTION;
2129  goto drop;
2130  }
2131 
2132  tcp0 = tcp_buffer_hdr (b0);
2133  is_fin0 = tcp_is_fin (tcp0);
2134 
2135  if (CLIB_DEBUG)
2136  {
2137  if (!(tc0->connection.flags & TRANSPORT_CONNECTION_F_NO_LOOKUP))
2138  {
2139  tcp_connection_t *tmp;
2140  tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
2141  is_ip4);
2142  if (tmp->state != tc0->state)
2143  {
2144  if (tc0->state != TCP_STATE_CLOSED)
2145  clib_warning ("state changed");
2146  goto drop;
2147  }
2148  }
2149  }
2150 
2151  /*
2152  * Special treatment for CLOSED
2153  */
2154  if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
2155  {
2156  error0 = TCP_ERROR_CONNECTION_CLOSED;
2157  goto drop;
2158  }
2159 
2160  /*
2161  * For all other states (except LISTEN)
2162  */
2163 
2164  /* 1-4: check SEQ, RST, SYN */
2165  if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, tcp0, &error0)))
2166  goto drop;
2167 
2168  /* 5: check the ACK field */
2169  switch (tc0->state)
2170  {
2171  case TCP_STATE_SYN_RCVD:
2172 
2173  /* Make sure the segment is exactly right */
2174  if (tc0->rcv_nxt != vnet_buffer (b0)->tcp.seq_number || is_fin0)
2175  {
2176  tcp_send_reset_w_pkt (tc0, b0, thread_index, is_ip4);
2177  error0 = TCP_ERROR_SEGMENT_INVALID;
2178  goto drop;
2179  }
2180 
2181  /*
2182  * If the segment acknowledgment is not acceptable, form a
2183  * reset segment,
2184  * <SEQ=SEG.ACK><CTL=RST>
2185  * and send it.
2186  */
2187  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2188  {
2189  tcp_send_reset_w_pkt (tc0, b0, thread_index, is_ip4);
2190  error0 = TCP_ERROR_SEGMENT_INVALID;
2191  goto drop;
2192  }
2193 
2194  /* Update rtt and rto */
2197 
2198  /* Switch state to ESTABLISHED */
2199  tc0->state = TCP_STATE_ESTABLISHED;
2200  TCP_EVT (TCP_EVT_STATE_CHANGE, tc0);
2201 
2202  if (!(tc0->cfg_flags & TCP_CFG_F_NO_TSO))
2203  tcp_check_tx_offload (tc0, is_ip4);
2204 
2205  /* Initialize session variables */
2206  tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2207  tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2208  << tc0->rcv_opts.wscale;
2209  tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2210  tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2211 
2212  /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2214  if (session_stream_accept_notify (&tc0->connection))
2215  {
2216  error0 = TCP_ERROR_MSG_QUEUE_FULL;
2217  tcp_send_reset (tc0);
2218  session_transport_delete_notify (&tc0->connection);
2219  tcp_connection_cleanup (tc0);
2220  goto drop;
2221  }
2222  error0 = TCP_ERROR_ACK_OK;
2223  break;
2224  case TCP_STATE_ESTABLISHED:
2225  /* We can get packets in established state here because they
2226  * were enqueued before state change */
2227  if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2228  goto drop;
2229 
2230  break;
2231  case TCP_STATE_FIN_WAIT_1:
2232  /* In addition to the processing for the ESTABLISHED state, if
2233  * our FIN is now acknowledged then enter FIN-WAIT-2 and
2234  * continue processing in that state. */
2235  if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2236  goto drop;
2237 
2238  /* Still have to send the FIN */
2239  if (tc0->flags & TCP_CONN_FINPNDG)
2240  {
2241  /* TX fifo finally drained */
2242  max_dequeue = transport_max_tx_dequeue (&tc0->connection);
2243  if (max_dequeue <= tc0->burst_acked)
2244  tcp_send_fin (tc0);
2245  /* If a fin was received and data was acked extend wait */
2246  else if ((tc0->flags & TCP_CONN_FINRCVD) && tc0->bytes_acked)
2247  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2248  tcp_cfg.closewait_time);
2249  }
2250  /* If FIN is ACKed */
2251  else if (tc0->snd_una == tc0->snd_nxt)
2252  {
2253  /* Stop all retransmit timers because we have nothing more
2254  * to send. */
2256 
2257  /* We already have a FIN but didn't transition to CLOSING
2258  * because of outstanding tx data. Close the connection. */
2259  if (tc0->flags & TCP_CONN_FINRCVD)
2260  {
2261  tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2262  session_transport_closed_notify (&tc0->connection);
2263  tcp_program_cleanup (wrk, tc0);
2264  goto drop;
2265  }
2266 
2267  tcp_connection_set_state (tc0, TCP_STATE_FIN_WAIT_2);
2268  /* Enable waitclose because we're willing to wait for peer's
2269  * FIN but not indefinitely. */
2270  tcp_timer_set (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2271  tcp_cfg.finwait2_time);
2272 
2273  /* Don't try to deq the FIN acked */
2274  if (tc0->burst_acked > 1)
2275  session_tx_fifo_dequeue_drop (&tc0->connection,
2276  tc0->burst_acked - 1);
2277  tc0->burst_acked = 0;
2278  }
2279  break;
2280  case TCP_STATE_FIN_WAIT_2:
2281  /* In addition to the processing for the ESTABLISHED state, if
2282  * the retransmission queue is empty, the user's CLOSE can be
2283  * acknowledged ("ok") but do not delete the TCB. */
2284  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2285  goto drop;
2286  tc0->burst_acked = 0;
2287  break;
2288  case TCP_STATE_CLOSE_WAIT:
2289  /* Do the same processing as for the ESTABLISHED state. */
2290  if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2291  goto drop;
2292 
2293  if (!(tc0->flags & TCP_CONN_FINPNDG))
2294  break;
2295 
2296  /* Still have outstanding tx data */
2297  max_dequeue = transport_max_tx_dequeue (&tc0->connection);
2298  if (max_dequeue > tc0->burst_acked)
2299  break;
2300 
2301  tcp_send_fin (tc0);
2303  tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2304  tcp_timer_set (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2305  tcp_cfg.lastack_time);
2306  break;
2307  case TCP_STATE_CLOSING:
2308  /* In addition to the processing for the ESTABLISHED state, if
2309  * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2310  * otherwise ignore the segment. */
2311  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2312  goto drop;
2313 
2314  if (tc0->snd_una != tc0->snd_nxt)
2315  goto drop;
2316 
2318  tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
2319  tcp_timer_set (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2320  tcp_cfg.timewait_time);
2321  session_transport_closed_notify (&tc0->connection);
2322  goto drop;
2323 
2324  break;
2325  case TCP_STATE_LAST_ACK:
2326  /* The only thing that [should] arrive in this state is an
2327  * acknowledgment of our FIN. If our FIN is now acknowledged,
2328  * delete the TCB, enter the CLOSED state, and return. */
2329 
2330  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2331  goto drop;
2332 
2333  /* Apparently our ACK for the peer's FIN was lost */
2334  if (is_fin0 && tc0->snd_una != tc0->snd_nxt)
2335  {
2336  tcp_send_fin (tc0);
2337  goto drop;
2338  }
2339 
2340  tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2341  session_transport_closed_notify (&tc0->connection);
2342 
2343  /* Don't free the connection from the data path since
2344  * we can't ensure that we have no packets already enqueued
2345  * to output. Rely instead on the waitclose timer */
2347  tcp_program_cleanup (tcp_get_worker (tc0->c_thread_index), tc0);
2348 
2349  goto drop;
2350 
2351  break;
2352  case TCP_STATE_TIME_WAIT:
2353  /* The only thing that can arrive in this state is a
2354  * retransmission of the remote FIN. Acknowledge it, and restart
2355  * the 2 MSL timeout. */
2356 
2357  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2358  goto drop;
2359 
2360  if (!is_fin0)
2361  goto drop;
2362 
2363  tcp_program_ack (tc0);
2364  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2365  tcp_cfg.timewait_time);
2366  goto drop;
2367 
2368  break;
2369  default:
2370  ASSERT (0);
2371  }
2372 
2373  /* 6: check the URG bit TODO */
2374 
2375  /* 7: process the segment text */
2376  switch (tc0->state)
2377  {
2378  case TCP_STATE_ESTABLISHED:
2379  case TCP_STATE_FIN_WAIT_1:
2380  case TCP_STATE_FIN_WAIT_2:
2381  if (vnet_buffer (b0)->tcp.data_len)
2382  error0 = tcp_segment_rcv (wrk, tc0, b0);
2383  /* Don't accept out of order fins lower */
2384  if (vnet_buffer (b0)->tcp.seq_end != tc0->rcv_nxt)
2385  goto drop;
2386  break;
2387  case TCP_STATE_CLOSE_WAIT:
2388  case TCP_STATE_CLOSING:
2389  case TCP_STATE_LAST_ACK:
2390  case TCP_STATE_TIME_WAIT:
2391  /* This should not occur, since a FIN has been received from the
2392  * remote side. Ignore the segment text. */
2393  break;
2394  }
2395 
2396  /* 8: check the FIN bit */
2397  if (!is_fin0)
2398  goto drop;
2399 
2400  TCP_EVT (TCP_EVT_FIN_RCVD, tc0);
2401 
2402  switch (tc0->state)
2403  {
2404  case TCP_STATE_ESTABLISHED:
2405  /* Account for the FIN and send ack */
2406  tc0->rcv_nxt += 1;
2407  tcp_program_ack (tc0);
2408  tcp_connection_set_state (tc0, TCP_STATE_CLOSE_WAIT);
2409  tcp_program_disconnect (wrk, tc0);
2410  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2411  tcp_cfg.closewait_time);
2412  break;
2413  case TCP_STATE_SYN_RCVD:
2414  /* Send FIN-ACK, enter LAST-ACK and because the app was not
2415  * notified yet, set a cleanup timer instead of relying on
2416  * disconnect notify and the implicit close call. */
2418  tc0->rcv_nxt += 1;
2419  tcp_send_fin (tc0);
2420  tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2421  tcp_timer_set (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2422  tcp_cfg.lastack_time);
2423  break;
2424  case TCP_STATE_CLOSE_WAIT:
2425  case TCP_STATE_CLOSING:
2426  case TCP_STATE_LAST_ACK:
2427  /* move along .. */
2428  break;
2429  case TCP_STATE_FIN_WAIT_1:
2430  tc0->rcv_nxt += 1;
2431 
2432  if (tc0->flags & TCP_CONN_FINPNDG)
2433  {
2434  /* If data is outstanding, stay in FIN_WAIT_1 and try to finish
2435  * sending it. Since we already received a fin, do not wait
2436  * for too long. */
2437  tc0->flags |= TCP_CONN_FINRCVD;
2438  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2439  tcp_cfg.closewait_time);
2440  }
2441  else
2442  {
2443  tcp_connection_set_state (tc0, TCP_STATE_CLOSING);
2444  tcp_program_ack (tc0);
2445  /* Wait for ACK for our FIN but not forever */
2446  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2447  tcp_cfg.closing_time);
2448  }
2449  break;
2450  case TCP_STATE_FIN_WAIT_2:
2451  /* Got FIN, send ACK! Be more aggressive with resource cleanup */
2452  tc0->rcv_nxt += 1;
2453  tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
2455  tcp_timer_set (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2456  tcp_cfg.timewait_time);
2457  tcp_program_ack (tc0);
2458  session_transport_closed_notify (&tc0->connection);
2459  break;
2460  case TCP_STATE_TIME_WAIT:
2461  /* Remain in the TIME-WAIT state. Restart the time-wait
2462  * timeout.
2463  */
2464  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2465  tcp_cfg.timewait_time);
2466  break;
2467  }
2468  error0 = TCP_ERROR_FIN_RCVD;
2469 
2470  drop:
2471 
2472  tcp_inc_counter (rcv_process, error0, 1);
2473  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2474  {
2475  tcp_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2476  tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
2477  }
2478  }
2479 
2480  errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2481  thread_index);
2482  tcp_inc_counter (rcv_process, TCP_ERROR_MSG_QUEUE_FULL, errors);
2484  tcp_handle_disconnects (wrk);
2485  vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2486 
2487  return from_frame->n_vectors;
2488 }
2489 
2492  vlib_frame_t * from_frame)
2493 {
2494  return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2495 }
2496 
2499  vlib_frame_t * from_frame)
2500 {
2501  return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2502 }
2503 
2504 /* *INDENT-OFF* */
2506 {
2507  .name = "tcp4-rcv-process",
2508  /* Takes a vector of packets. */
2509  .vector_size = sizeof (u32),
2510  .n_errors = TCP_N_ERROR,
2511  .error_strings = tcp_error_strings,
2512  .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2513  .next_nodes =
2514  {
2515 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2517 #undef _
2518  },
2519  .format_trace = format_tcp_rx_trace_short,
2520 };
2521 /* *INDENT-ON* */
2522 
2523 /* *INDENT-OFF* */
2525 {
2526  .name = "tcp6-rcv-process",
2527  /* Takes a vector of packets. */
2528  .vector_size = sizeof (u32),
2529  .n_errors = TCP_N_ERROR,
2530  .error_strings = tcp_error_strings,
2531  .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2532  .next_nodes =
2533  {
2534 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2536 #undef _
2537  },
2538  .format_trace = format_tcp_rx_trace_short,
2539 };
2540 /* *INDENT-ON* */
2541 
2542 /**
2543  * LISTEN state processing as per RFC 793 p. 65
2544  */
2547  vlib_frame_t * from_frame, int is_ip4)
2548 {
2549  u32 n_left_from, *from, n_syns = 0, *first_buffer;
2550  u32 thread_index = vm->thread_index;
2551 
2552  from = first_buffer = vlib_frame_vector_args (from_frame);
2553  n_left_from = from_frame->n_vectors;
2554 
2555  while (n_left_from > 0)
2556  {
2557  u32 bi, error = TCP_ERROR_NONE;
2558  tcp_connection_t *lc, *child;
2559  vlib_buffer_t *b;
2560 
2561  bi = from[0];
2562  from += 1;
2563  n_left_from -= 1;
2564 
2565  b = vlib_get_buffer (vm, bi);
2566 
2567  lc = tcp_listener_get (vnet_buffer (b)->tcp.connection_index);
2568  if (PREDICT_FALSE (lc == 0))
2569  {
2570  tcp_connection_t *tc;
2571  tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2572  thread_index);
2573  if (tc->state != TCP_STATE_TIME_WAIT)
2574  {
2575  error = TCP_ERROR_CREATE_EXISTS;
2576  goto done;
2577  }
2578  lc = tcp_lookup_listener (b, tc->c_fib_index, is_ip4);
2579  /* clean up the old session */
2580  tcp_connection_del (tc);
2581  /* listener was cleaned up */
2582  if (!lc)
2583  {
2584  error = TCP_ERROR_NO_LISTENER;
2585  goto done;
2586  }
2587  }
2588 
2589  /* Make sure connection wasn't just created */
2590  child = tcp_lookup_connection (lc->c_fib_index, b, thread_index,
2591  is_ip4);
2592  if (PREDICT_FALSE (child->state != TCP_STATE_LISTEN))
2593  {
2594  error = TCP_ERROR_CREATE_EXISTS;
2595  goto done;
2596  }
2597 
2598  /* Create child session. For syn-flood protection use filter */
2599 
2600  /* 1. first check for an RST: handled in dispatch */
2601  /* if (tcp_rst (th0))
2602  goto drop;
2603  */
2604 
2605  /* 2. second check for an ACK: handled in dispatch */
2606  /* if (tcp_ack (th0))
2607  {
2608  tcp_send_reset (b0, is_ip4);
2609  goto drop;
2610  }
2611  */
2612 
2613  /* 3. check for a SYN (did that already) */
2614 
2615  /* Create child session and send SYN-ACK */
2616  child = tcp_connection_alloc (thread_index);
2617 
2618  if (tcp_options_parse (tcp_buffer_hdr (b), &child->rcv_opts, 1))
2619  {
2620  error = TCP_ERROR_OPTIONS;
2621  tcp_connection_free (child);
2622  goto done;
2623  }
2624 
2625  tcp_init_w_buffer (child, b, is_ip4);
2626 
2627  child->state = TCP_STATE_SYN_RCVD;
2628  child->c_fib_index = lc->c_fib_index;
2629  child->cc_algo = lc->cc_algo;
2630  tcp_connection_init_vars (child);
2631  child->rto = TCP_RTO_MIN;
2632 
2633  /*
2634  * This initializes elog track, must be done before synack.
2635  * We also do it before possible tcp_connection_cleanup() as it
2636  * generates TCP_EVT_DELETE event.
2637  */
2638  TCP_EVT (TCP_EVT_SYN_RCVD, child, 1);
2639 
2640  if (session_stream_accept (&child->connection, lc->c_s_index,
2641  lc->c_thread_index, 0 /* notify */ ))
2642  {
2643  tcp_connection_cleanup (child);
2644  error = TCP_ERROR_CREATE_SESSION_FAIL;
2645  goto done;
2646  }
2647 
2648  transport_fifos_init_ooo (&child->connection);
2649  child->tx_fifo_size = transport_tx_fifo_size (&child->connection);
2650 
2651  tcp_send_synack (child);
2652 
2653  done:
2654 
2655  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
2656  {
2657  tcp_rx_trace_t *t = vlib_add_trace (vm, node, b, sizeof (*t));
2658  tcp_set_rx_trace_data (t, lc, tcp_buffer_hdr (b), b, is_ip4);
2659  }
2660 
2661  n_syns += (error == TCP_ERROR_NONE);
2662  }
2663 
2664  tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
2665  vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2666 
2667  return from_frame->n_vectors;
2668 }
2669 
2671  vlib_frame_t * from_frame)
2672 {
2673  return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2674 }
2675 
2677  vlib_frame_t * from_frame)
2678 {
2679  return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2680 }
2681 
2682 /* *INDENT-OFF* */
2684 {
2685  .name = "tcp4-listen",
2686  /* Takes a vector of packets. */
2687  .vector_size = sizeof (u32),
2688  .n_errors = TCP_N_ERROR,
2689  .error_strings = tcp_error_strings,
2690  .n_next_nodes = TCP_LISTEN_N_NEXT,
2691  .next_nodes =
2692  {
2693 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2695 #undef _
2696  },
2697  .format_trace = format_tcp_rx_trace_short,
2698 };
2699 /* *INDENT-ON* */
2700 
2701 /* *INDENT-OFF* */
2703 {
2704  .name = "tcp6-listen",
2705  /* Takes a vector of packets. */
2706  .vector_size = sizeof (u32),
2707  .n_errors = TCP_N_ERROR,
2708  .error_strings = tcp_error_strings,
2709  .n_next_nodes = TCP_LISTEN_N_NEXT,
2710  .next_nodes =
2711  {
2712 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2714 #undef _
2715  },
2716  .format_trace = format_tcp_rx_trace_short,
2717 };
2718 /* *INDENT-ON* */
2719 
2720 typedef enum _tcp_input_next
2721 {
2731 
2732 #define foreach_tcp4_input_next \
2733  _ (DROP, "ip4-drop") \
2734  _ (LISTEN, "tcp4-listen") \
2735  _ (RCV_PROCESS, "tcp4-rcv-process") \
2736  _ (SYN_SENT, "tcp4-syn-sent") \
2737  _ (ESTABLISHED, "tcp4-established") \
2738  _ (RESET, "tcp4-reset") \
2739  _ (PUNT, "ip4-punt")
2740 
2741 #define foreach_tcp6_input_next \
2742  _ (DROP, "ip6-drop") \
2743  _ (LISTEN, "tcp6-listen") \
2744  _ (RCV_PROCESS, "tcp6-rcv-process") \
2745  _ (SYN_SENT, "tcp6-syn-sent") \
2746  _ (ESTABLISHED, "tcp6-established") \
2747  _ (RESET, "tcp6-reset") \
2748  _ (PUNT, "ip6-punt")
2749 
2750 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
2751 
2752 static void
2754  vlib_buffer_t ** bs, u32 n_bufs, u8 is_ip4)
2755 {
2756  tcp_connection_t *tc;
2757  tcp_header_t *tcp;
2758  tcp_rx_trace_t *t;
2759  int i;
2760 
2761  for (i = 0; i < n_bufs; i++)
2762  {
2763  if (bs[i]->flags & VLIB_BUFFER_IS_TRACED)
2764  {
2765  t = vlib_add_trace (vm, node, bs[i], sizeof (*t));
2766  tc = tcp_connection_get (vnet_buffer (bs[i])->tcp.connection_index,
2767  vm->thread_index);
2768  tcp = vlib_buffer_get_current (bs[i]);
2769  tcp_set_rx_trace_data (t, tc, tcp, bs[i], is_ip4);
2770  }
2771  }
2772 }
2773 
2774 static void
2776 {
2777  if (*error == TCP_ERROR_FILTERED || *error == TCP_ERROR_WRONG_THREAD)
2778  {
2779  *next = TCP_INPUT_NEXT_DROP;
2780  }
2781  else if ((is_ip4 && tm->punt_unknown4) || (!is_ip4 && tm->punt_unknown6))
2782  {
2783  *next = TCP_INPUT_NEXT_PUNT;
2784  *error = TCP_ERROR_PUNT;
2785  }
2786  else
2787  {
2788  *next = TCP_INPUT_NEXT_RESET;
2789  *error = TCP_ERROR_NO_LISTENER;
2790  }
2791 }
2792 
2793 static inline void
2795  vlib_buffer_t * b, u16 * next,
2796  vlib_node_runtime_t * error_node)
2797 {
2798  tcp_header_t *tcp;
2799  u32 error;
2800  u8 flags;
2801 
2802  tcp = tcp_buffer_hdr (b);
2803  flags = tcp->flags & filter_flags;
2804  *next = tm->dispatch_table[tc->state][flags].next;
2805  error = tm->dispatch_table[tc->state][flags].error;
2806  tc->segs_in += 1;
2807 
2808  if (PREDICT_FALSE (error != TCP_ERROR_NONE))
2809  {
2810  b->error = error_node->errors[error];
2811  if (error == TCP_ERROR_DISPATCH)
2812  clib_warning ("tcp conn %u disp error state %U flags %U",
2813  tc->c_c_index, format_tcp_state, tc->state,
2814  format_tcp_flags, (int) flags);
2815  }
2816 }
2817 
2820  vlib_frame_t * frame, int is_ip4, u8 is_nolookup)
2821 {
2822  u32 n_left_from, *from, thread_index = vm->thread_index;
2823  tcp_main_t *tm = vnet_get_tcp_main ();
2824  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2825  u16 nexts[VLIB_FRAME_SIZE], *next;
2826 
2827  tcp_set_time_now (tcp_get_worker (thread_index));
2828 
2829  from = vlib_frame_vector_args (frame);
2830  n_left_from = frame->n_vectors;
2831  vlib_get_buffers (vm, from, bufs, n_left_from);
2832 
2833  b = bufs;
2834  next = nexts;
2835 
2836  while (n_left_from >= 4)
2837  {
2838  u32 error0 = TCP_ERROR_NO_LISTENER, error1 = TCP_ERROR_NO_LISTENER;
2839  tcp_connection_t *tc0, *tc1;
2840 
2841  {
2842  vlib_prefetch_buffer_header (b[2], STORE);
2843  CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2844 
2845  vlib_prefetch_buffer_header (b[3], STORE);
2846  CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2847  }
2848 
2849  next[0] = next[1] = TCP_INPUT_NEXT_DROP;
2850 
2851  tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
2852  is_nolookup);
2853  tc1 = tcp_input_lookup_buffer (b[1], thread_index, &error1, is_ip4,
2854  is_nolookup);
2855 
2856  if (PREDICT_TRUE (!tc0 + !tc1 == 0))
2857  {
2858  ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
2859  ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
2860 
2861  vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2862  vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
2863 
2864  tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], node);
2865  tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], node);
2866  }
2867  else
2868  {
2869  if (PREDICT_TRUE (tc0 != 0))
2870  {
2871  ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
2872  vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2873  tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], node);
2874  }
2875  else
2876  {
2877  tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
2878  b[0]->error = node->errors[error0];
2879  }
2880 
2881  if (PREDICT_TRUE (tc1 != 0))
2882  {
2883  ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
2884  vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
2885  tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], node);
2886  }
2887  else
2888  {
2889  tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
2890  b[1]->error = node->errors[error1];
2891  }
2892  }
2893 
2894  b += 2;
2895  next += 2;
2896  n_left_from -= 2;
2897  }
2898  while (n_left_from > 0)
2899  {
2900  tcp_connection_t *tc0;
2901  u32 error0 = TCP_ERROR_NO_LISTENER;
2902 
2903  if (n_left_from > 1)
2904  {
2905  vlib_prefetch_buffer_header (b[1], STORE);
2906  CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2907  }
2908 
2909  next[0] = TCP_INPUT_NEXT_DROP;
2910  tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
2911  is_nolookup);
2912  if (PREDICT_TRUE (tc0 != 0))
2913  {
2914  ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
2915  vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2916  tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], node);
2917  }
2918  else
2919  {
2920  tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
2921  b[0]->error = node->errors[error0];
2922  }
2923 
2924  b += 1;
2925  next += 1;
2926  n_left_from -= 1;
2927  }
2928 
2930  tcp_input_trace_frame (vm, node, bufs, frame->n_vectors, is_ip4);
2931 
2932  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2933  return frame->n_vectors;
2934 }
2935 
2938  vlib_frame_t * from_frame)
2939 {
2940  return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
2941  1 /* is_nolookup */ );
2942 }
2943 
2946  vlib_frame_t * from_frame)
2947 {
2948  return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
2949  1 /* is_nolookup */ );
2950 }
2951 
2952 /* *INDENT-OFF* */
2954 {
2955  .name = "tcp4-input-nolookup",
2956  /* Takes a vector of packets. */
2957  .vector_size = sizeof (u32),
2958  .n_errors = TCP_N_ERROR,
2959  .error_strings = tcp_error_strings,
2960  .n_next_nodes = TCP_INPUT_N_NEXT,
2961  .next_nodes =
2962  {
2963 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
2965 #undef _
2966  },
2967  .format_buffer = format_tcp_header,
2968  .format_trace = format_tcp_rx_trace,
2969 };
2970 /* *INDENT-ON* */
2971 
2972 /* *INDENT-OFF* */
2974 {
2975  .name = "tcp6-input-nolookup",
2976  /* Takes a vector of packets. */
2977  .vector_size = sizeof (u32),
2978  .n_errors = TCP_N_ERROR,
2979  .error_strings = tcp_error_strings,
2980  .n_next_nodes = TCP_INPUT_N_NEXT,
2981  .next_nodes =
2982  {
2983 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
2985 #undef _
2986  },
2987  .format_buffer = format_tcp_header,
2988  .format_trace = format_tcp_rx_trace,
2989 };
2990 /* *INDENT-ON* */
2991 
2993  vlib_frame_t * from_frame)
2994 {
2995  return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
2996  0 /* is_nolookup */ );
2997 }
2998 
3000  vlib_frame_t * from_frame)
3001 {
3002  return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
3003  0 /* is_nolookup */ );
3004 }
3005 
3006 /* *INDENT-OFF* */
3008 {
3009  .name = "tcp4-input",
3010  /* Takes a vector of packets. */
3011  .vector_size = sizeof (u32),
3012  .n_errors = TCP_N_ERROR,
3013  .error_strings = tcp_error_strings,
3014  .n_next_nodes = TCP_INPUT_N_NEXT,
3015  .next_nodes =
3016  {
3017 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3019 #undef _
3020  },
3021  .format_buffer = format_tcp_header,
3022  .format_trace = format_tcp_rx_trace,
3023 };
3024 /* *INDENT-ON* */
3025 
3026 /* *INDENT-OFF* */
3028 {
3029  .name = "tcp6-input",
3030  /* Takes a vector of packets. */
3031  .vector_size = sizeof (u32),
3032  .n_errors = TCP_N_ERROR,
3033  .error_strings = tcp_error_strings,
3034  .n_next_nodes = TCP_INPUT_N_NEXT,
3035  .next_nodes =
3036  {
3037 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3039 #undef _
3040  },
3041  .format_buffer = format_tcp_header,
3042  .format_trace = format_tcp_rx_trace,
3043 };
3044 /* *INDENT-ON* */
3045 
3046 #ifndef CLIB_MARCH_VARIANT
3047 static void
3049 {
3050  int i, j;
3051  for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3052  for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3053  {
3054  tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3055  tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3056  }
3057 
3058 #define _(t,f,n,e) \
3059 do { \
3060  tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
3061  tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
3062 } while (0)
3063 
3064  /* RFC 793: In LISTEN if RST drop and if ACK return RST */
3065  _(LISTEN, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3066  _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_ACK_INVALID);
3067  _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_INVALID_CONNECTION);
3068  _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3070  TCP_ERROR_ACK_INVALID);
3072  TCP_ERROR_SEGMENT_INVALID);
3074  TCP_ERROR_SEGMENT_INVALID);
3076  TCP_ERROR_INVALID_CONNECTION);
3077  _(LISTEN, TCP_FLAG_FIN, TCP_INPUT_NEXT_RESET, TCP_ERROR_SEGMENT_INVALID);
3079  TCP_ERROR_SEGMENT_INVALID);
3081  TCP_ERROR_SEGMENT_INVALID);
3083  TCP_ERROR_SEGMENT_INVALID);
3085  TCP_ERROR_SEGMENT_INVALID);
3087  TCP_ERROR_SEGMENT_INVALID);
3089  TCP_ERROR_SEGMENT_INVALID);
3091  TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3092  /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3093  _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3094  _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3096  TCP_ERROR_NONE);
3097  _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3099  TCP_ERROR_NONE);
3101  TCP_ERROR_NONE);
3102  _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3103  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3104  _(SYN_RCVD, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3106  TCP_ERROR_NONE);
3108  TCP_ERROR_NONE);
3109  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3110  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3112  TCP_ERROR_NONE);
3113  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3114  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3115  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3116  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3118  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3119  _(SYN_RCVD, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3120  /* SYN-ACK for a SYN */
3122  TCP_ERROR_NONE);
3123  _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3124  _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3126  TCP_ERROR_NONE);
3127  _(SYN_SENT, TCP_FLAG_FIN, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3129  TCP_ERROR_NONE);
3130  /* ACK for for established connection -> tcp-established. */
3131  _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3132  /* FIN for for established connection -> tcp-established. */
3133  _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3135  TCP_ERROR_NONE);
3137  TCP_ERROR_NONE);
3138  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3139  TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3141  TCP_ERROR_NONE);
3142  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3143  TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3144  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3145  TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3146  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3147  TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3148  _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3150  TCP_ERROR_NONE);
3151  _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3153  TCP_ERROR_NONE);
3155  TCP_ERROR_NONE);
3156  _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3157  TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3158  _(ESTABLISHED, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3159  /* ACK or FIN-ACK to our FIN */
3160  _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3162  TCP_ERROR_NONE);
3163  /* FIN in reply to our FIN from the other side */
3164  _(FIN_WAIT_1, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3165  _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3167  TCP_ERROR_NONE);
3168  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3169  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3170  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3171  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3172  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3173  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3175  TCP_ERROR_NONE);
3176  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3177  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3178  _(FIN_WAIT_1, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3180  TCP_ERROR_NONE);
3182  TCP_ERROR_NONE);
3183  _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3184  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3185  _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3187  TCP_ERROR_NONE);
3188  _(CLOSING, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3189  _(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3190  _(CLOSING, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3192  TCP_ERROR_NONE);
3194  TCP_ERROR_NONE);
3195  _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3196  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3197  _(CLOSING, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3199  TCP_ERROR_NONE);
3200  _(CLOSING, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3202  TCP_ERROR_NONE);
3204  TCP_ERROR_NONE);
3205  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3206  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3208  TCP_ERROR_NONE);
3209  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3210  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3211  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3212  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3214  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3215  /* FIN confirming that the peer (app) has closed */
3216  _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3217  _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3219  TCP_ERROR_NONE);
3220  _(FIN_WAIT_2, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3222  TCP_ERROR_NONE);
3223  _(FIN_WAIT_2, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3224  _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3226  TCP_ERROR_NONE);
3227  _(CLOSE_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3229  TCP_ERROR_NONE);
3230  _(CLOSE_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3231  _(LAST_ACK, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3232  _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3233  _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3235  TCP_ERROR_NONE);
3237  TCP_ERROR_NONE);
3238  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3239  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3241  TCP_ERROR_NONE);
3242  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3243  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3244  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3245  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3247  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3248  _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3250  TCP_ERROR_NONE);
3251  _(LAST_ACK, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3253  TCP_ERROR_NONE);
3255  TCP_ERROR_NONE);
3256  _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3257  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3258  _(TIME_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3259  _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3261  TCP_ERROR_NONE);
3262  _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3264  TCP_ERROR_NONE);
3265  _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3266  /* RFC793 CLOSED: An incoming segment containing a RST is discarded. An
3267  * incoming segment not containing a RST causes a RST to be sent in
3268  * response.*/
3269  _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3271  TCP_ERROR_CONNECTION_CLOSED);
3272  _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
3273  _(CLOSED, TCP_FLAG_SYN, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
3275  TCP_ERROR_CONNECTION_CLOSED);
3276 #undef _
3277 }
3278 
3279 static clib_error_t *
3281 {
3282  clib_error_t *error = 0;
3283  tcp_main_t *tm = vnet_get_tcp_main ();
3284 
3285  if ((error = vlib_call_init_function (vm, tcp_init)))
3286  return error;
3287 
3288  /* Initialize dispatch table. */
3290 
3291  return error;
3292 }
3293 
3295 
3296 #endif /* CLIB_MARCH_VARIANT */
3297 
3298 /*
3299  * fd.io coding-style-patch-verification: ON
3300  *
3301  * Local Variables:
3302  * eval: (c-set-style "gnu")
3303  * End:
3304  */
static void tcp_program_disconnect(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Definition: tcp_input.c:1083
static int tcp_session_enqueue_ooo(tcp_connection_t *tc, vlib_buffer_t *b, u16 data_len)
Enqueue out-of-order data.
Definition: tcp_input.c:1201
static void tcp_update_timestamp(tcp_connection_t *tc, u32 seq, u32 seq_end)
Update tsval recent.
Definition: tcp_input.c:138
u16 lb_n_buckets
number of buckets in the load-balance.
Definition: load_balance.h:116
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u32 connection_index
Index of the transport connection associated to the session.
void tcp_program_retransmit(tcp_connection_t *tc)
Definition: tcp_output.c:1065
#define TCP_TIMER_HANDLE_INVALID
Definition: tcp_types.h:79
#define clib_min(x, y)
Definition: clib.h:328
#define CLIB_UNUSED(x)
Definition: clib.h:87
u32 * pending_disconnects
vector of pending disconnect notifications
Definition: tcp.h:86
vlib_node_registration_t tcp6_rcv_process_node
(constructor) VLIB_REGISTER_NODE (tcp6_rcv_process_node)
Definition: tcp_input.c:2524
static u32 ip6_fib_table_fwding_lookup(u32 fib_index, const ip6_address_t *dst)
Definition: ip6_fib.h:115
static void tcp_persist_timer_set(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
Definition: tcp_timer.h:69
static void tcp_rcv_fin(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, vlib_buffer_t *b, u32 *error)
Definition: tcp_input.c:1127
static u32 tcp_time_now(void)
Definition: tcp_inlines.h:191
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
static tcp_connection_t * tcp_connection_get(u32 conn_index, u32 thread_index)
Definition: tcp_inlines.h:30
ip4_address_t src_address
Definition: ip4_packet.h:125
static u8 tcp_cc_is_spurious_retransmit(tcp_connection_t *tc)
Definition: tcp_input.c:712
transport_connection_t * session_lookup_connection_wt6(u32 fib_index, ip6_address_t *lcl, ip6_address_t *rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 thread_index, u8 *result)
Lookup connection with ip6 and transport layer information.
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
enum _tcp_state_next tcp_state_next_t
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define tcp_rst(_th)
Definition: tcp_packet.h:81
#define TCP_FLAG_SYN
Definition: fa_node.h:13
#define THZ
TCP tick frequency.
Definition: tcp_types.h:25
#define tcp_opts_tstamp(_to)
Definition: tcp_packet.h:156
#define PREDICT_TRUE(x)
Definition: clib.h:122
#define tcp_inc_err_counter(cnts, err, val)
Definition: tcp_input.c:1461
unsigned long u64
Definition: types.h:89
#define tcp_store_err_counters(node_id, cnts)
Definition: tcp_input.c:1465
static void tcp_dispatch_table_init(tcp_main_t *tm)
Definition: tcp_input.c:3048
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
static u8 * format_tcp_rx_trace_short(u8 *s, va_list *args)
Definition: tcp_input.c:1372
static int tcp_segment_rcv(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, vlib_buffer_t *b)
Receive buffer for connection and handle acks.
Definition: tcp_input.c:1285
void session_transport_delete_notify(transport_connection_t *tc)
Notification from transport that connection is being deleted.
Definition: session.c:997
static uword tcp46_established_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip4)
Definition: tcp_input.c:1475
svm_fifo_t * rx_fifo
Pointers to rx/tx buffers.
#define tcp_fastrecovery_first_off(tc)
Definition: tcp_types.h:424
static void tcp_input_dispatch_buffer(tcp_main_t *tm, tcp_connection_t *tc, vlib_buffer_t *b, u16 *next, vlib_node_runtime_t *error_node)
Definition: tcp_input.c:2794
struct _tcp_main tcp_main_t
u32 thread_index
Definition: main.h:250
void tcp_connection_timers_reset(tcp_connection_t *tc)
Stop all connection timers.
Definition: tcp.c:493
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
int session_main_flush_enqueue_events(u8 transport_proto, u32 thread_index)
Flushes queue of sessions that are to be notified of new data enqueued events.
Definition: session.c:742
struct _tcp_connection tcp_connection_t
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:592
static u32 tcp_set_time_now(tcp_worker_ctx_t *wrk)
Definition: tcp_inlines.h:219
#define clib_abs(x)
Definition: clib.h:343
void session_transport_reset_notify(transport_connection_t *tc)
Notify application that connection has been reset.
Definition: session.c:1098
u32 dpo_get_urpf(const dpo_id_t *dpo)
Get a uRPF interface for the DPO.
Definition: dpo.c:387
u32 * pending_resets
vector of pending reset notifications
Definition: tcp.h:89
#define tcp_disconnect_pending_on(tc)
Definition: tcp_types.h:420
static u32 format_get_indent(u8 *s)
Definition: format.h:72
vlib_node_registration_t tcp4_rcv_process_node
(constructor) VLIB_REGISTER_NODE (tcp4_rcv_process_node)
Definition: tcp_input.c:2505
static void tcp_cc_congestion(tcp_connection_t *tc)
Definition: tcp_cc.h:36
static u32 tcp_time_now_w_thread(u32 thread_index)
Definition: tcp_inlines.h:197
vlib_main_t * vm
Definition: in2out_ed.c:1580
#define timestamp_lt(_t1, _t2)
Definition: tcp_packet.h:185
static session_t * session_get(u32 si, u32 thread_index)
Definition: session.h:307
#define TCP_TICK
TCP tick period (s)
Definition: tcp_types.h:24
#define tcp_disconnect_pending_off(tc)
Definition: tcp_types.h:421
tcp_connection_t tcp_connection
Definition: tcp_input.c:1352
#define VLIB_NODE_FN(node)
Definition: node.h:203
static void tcp_cc_congestion_undo(tcp_connection_t *tc)
Definition: tcp_input.c:693
int session_enqueue_stream_connection(transport_connection_t *tc, vlib_buffer_t *b, u32 offset, u8 queue_event, u8 is_in_order)
Definition: session.c:460
u64 session_lookup_half_open_handle(transport_connection_t *tc)
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
format_function_t format_tcp_flags
Definition: tcp.h:348
static u8 tcp_is_descheduled(tcp_connection_t *tc)
Definition: tcp_inlines.h:380
struct _tcp_header tcp_header_t
int tcp_half_open_connection_cleanup(tcp_connection_t *tc)
Try to cleanup half-open connection.
Definition: tcp.c:209
ip6_address_t src_address
Definition: ip6_packet.h:310
#define tcp_in_cong_recovery(tc)
Definition: tcp_types.h:426
u32 * pending_deq_acked
vector of pending ack dequeues
Definition: tcp.h:83
unsigned char u8
Definition: types.h:56
#define tcp_inc_counter(node_id, err, count)
Definition: tcp_input.c:1453
vlib_node_registration_t tcp6_syn_sent_node
(constructor) VLIB_REGISTER_NODE (tcp6_syn_sent_node)
Definition: tcp_input.c:2078
u8 data[128]
Definition: ipsec_types.api:90
static tcp_connection_t * tcp_lookup_connection(u32 fib_index, vlib_buffer_t *b, u8 thread_index, u8 is_ip4)
Lookup transport connection.
Definition: tcp_input.c:1679
double f64
Definition: types.h:142
void session_transport_closing_notify(transport_connection_t *tc)
Notification from transport that connection is being closed.
Definition: session.c:975
#define tcp_is_fin(_th)
Definition: tcp_packet.h:90
static u8 * format_tcp_rx_trace(u8 *s, va_list *args)
Definition: tcp_input.c:1356
#define timestamp_leq(_t1, _t2)
Definition: tcp_packet.h:186
void tcp_init_snd_vars(tcp_connection_t *tc)
Initialize connection send variables.
Definition: tcp.c:669
#define tcp_cfg
Definition: tcp.h:271
static void tcp_persist_timer_reset(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
Definition: tcp_timer.h:90
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
vlib_node_registration_t tcp4_established_node
(constructor) VLIB_REGISTER_NODE (tcp4_established_node)
Definition: tcp_input.c:1573
static int tcp_options_parse(tcp_header_t *th, tcp_options_t *to, u8 is_syn)
Parse TCP header options.
Definition: tcp_packet.h:197
void tcp_bt_sample_delivery_rate(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Generate a delivery rate sample from recently acked bytes.
Definition: tcp_bt.c:596
vl_api_ip6_address_t ip6
Definition: one.api:424
ip4_address_t dst_address
Definition: ip4_packet.h:125
#define seq_leq(_s1, _s2)
Definition: tcp_packet.h:179
#define TCP_FLAG_ACK
Definition: fa_node.h:16
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
transport_connection_t * session_lookup_connection_wt4(u32 fib_index, ip4_address_t *lcl, ip4_address_t *rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 thread_index, u8 *result)
Lookup connection with ip4 and transport layer information.
static void tcp_handle_rst(tcp_connection_t *tc)
Definition: tcp_input.c:157
description fragment has unexpected format
Definition: map.api:433
vnet_hw_interface_flags_t flags
Definition: interface.h:538
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:207
void transport_fifos_init_ooo(transport_connection_t *tc)
Definition: transport.c:783
static int tcp_segment_validate(tcp_worker_ctx_t *wrk, tcp_connection_t *tc0, vlib_buffer_t *b0, tcp_header_t *th0, u32 *error0)
Validate incoming segment as per RFC793 p.
Definition: tcp_input.c:259
#define tcp_fastrecovery_off(tc)
Definition: tcp_types.h:413
const cJSON *const b
Definition: cJSON.h:255
vlib_node_registration_t tcp6_input_node
(constructor) VLIB_REGISTER_NODE (tcp6_input_node)
Definition: tcp_input.c:3027
static u8 tcp_ack_is_dupack(tcp_connection_t *tc, vlib_buffer_t *b, u32 prev_snd_wnd, u32 prev_snd_una)
Check if duplicate ack as per RFC5681 Sec.
Definition: tcp_input.c:954
static u32 ooo_segment_length(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.h:686
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:196
unsigned int u32
Definition: types.h:88
static sack_scoreboard_hole_t * scoreboard_first_hole(sack_scoreboard_t *sb)
Definition: tcp_sack.h:59
static tcp_header_t * tcp_buffer_hdr(vlib_buffer_t *b)
Definition: tcp_inlines.h:22
#define vlib_call_init_function(vm, x)
Definition: init.h:270
static void tcp_node_inc_counter_i(vlib_main_t *vm, u32 tcp4_node, u32 tcp6_node, u8 is_ip4, u32 evt, u32 val)
Definition: tcp_input.c:1437
#define TCP_TSTP_TO_HZ
Definition: tcp_types.h:30
#define VLIB_FRAME_SIZE
Definition: node.h:378
static void tcp_cc_init_congestion(tcp_connection_t *tc)
Init loss recovery/fast recovery.
Definition: tcp_input.c:667
#define tcp_validate_txf_size(_tc, _a)
Definition: tcp.h:354
#define tcp_fastrecovery_on(tc)
Definition: tcp_types.h:412
static void tcp_retransmit_timer_update(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
Definition: tcp_timer.h:96
static void tcp_timer_set(tcp_timer_wheel_t *tw, tcp_connection_t *tc, u8 timer_id, u32 interval)
Definition: tcp_timer.h:21
static void tcp_cc_recovered(tcp_connection_t *tc)
Definition: tcp_cc.h:48
static void svm_fifo_newest_ooo_segment_reset(svm_fifo_t *f)
Definition: svm_fifo.h:670
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static void tcp_retransmit_timer_reset(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
Definition: tcp_timer.h:63
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:170
static u8 tcp_should_fastrecover(tcp_connection_t *tc, u8 has_sack)
Definition: tcp_input.c:718
void tcp_update_sack_list(tcp_connection_t *tc, u32 start, u32 end)
Build SACK list as per RFC2018.
Definition: tcp_sack.c:568
Definition: cJSON.c:84
vlib_main_t * vm
convenience pointer to this thread&#39;s vlib main
Definition: tcp.h:92
static tcp_connection_t * tcp_half_open_connection_get(u32 conn_index)
Definition: tcp_inlines.h:67
static void tcp_program_dequeue(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Definition: tcp_input.c:601
void tcp_send_ack(tcp_connection_t *tc)
Definition: tcp_output.c:1023
static void tcp_handle_disconnects(tcp_worker_ctx_t *wrk)
Definition: tcp_input.c:1093
static uword tcp46_listen_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip4)
LISTEN state processing as per RFC 793 p.
Definition: tcp_input.c:2546
void tcp_connection_tx_pacer_reset(tcp_connection_t *tc, u32 window, u32 start_bucket)
Definition: tcp.c:1209
static void tcp_input_set_error_next(tcp_main_t *tm, u16 *next, u32 *error, u8 is_ip4)
Definition: tcp_input.c:2775
tcp_connection_t * tcp_connection_alloc_w_base(u8 thread_index, tcp_connection_t *base)
Definition: tcp.c:309
static const dpo_id_t * load_balance_get_bucket_i(const load_balance_t *lb, u32 bucket)
Definition: load_balance.h:229
format_function_t format_tcp_connection_id
Definition: tcp.h:352
vlib_node_registration_t tcp4_input_nolookup_node
(constructor) VLIB_REGISTER_NODE (tcp4_input_nolookup_node)
Definition: tcp_input.c:2953
unsigned short u16
Definition: types.h:57
#define TCP_DUPACK_THRESHOLD
Definition: tcp_types.h:37
#define foreach_tcp4_input_next
Definition: tcp_input.c:2732
u8 data_len
Definition: ikev2_types.api:24
tcp_connection_t * tcp_connection_alloc(u8 thread_index)
Definition: tcp.c:296
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
#define filter_flags
Definition: tcp_input.c:2750
void tcp_connection_tx_pacer_update(tcp_connection_t *tc)
Definition: tcp.c:1196
static int tcp_buffer_discard_bytes(vlib_buffer_t *b, u32 n_bytes_to_drop)
Definition: tcp_input.c:1252
#define TCP_PAWS_IDLE
24 days
Definition: tcp_types.h:29
static void tcp_check_tx_offload(tcp_connection_t *tc, int is_ipv4)
Definition: tcp_input.c:1750
#define foreach_tcp6_input_next
Definition: tcp_input.c:2741
The FIB DPO provieds;.
Definition: load_balance.h:106
tcp_timer_wheel_t timer_wheel
worker timer wheel
Definition: tcp.h:118
static void tcp_input_trace_frame(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t **bs, u32 n_bufs, u8 is_ip4)
Definition: tcp_input.c:2753
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
Definition: ip46_cli.c:60
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define always_inline
Definition: ipsec.h:28
static int tcp_rcv_ack_no_cc(tcp_connection_t *tc, vlib_buffer_t *b, u32 *error)
Definition: tcp_input.c:401
vl_api_ip4_address_t ip4
Definition: one.api:376
#define TCP_FLAG_FIN
Definition: fa_node.h:12
static u8 tcp_is_lost_fin(tcp_connection_t *tc)
Definition: tcp_inlines.h:183
static void tcp_cc_handle_event(tcp_connection_t *tc, tcp_rate_sample_t *rs, u32 is_dack)
One function to rule them all ...
Definition: tcp_input.c:821
vlib_node_registration_t tcp4_listen_node
(constructor) VLIB_REGISTER_NODE (tcp4_listen_node)
Definition: tcp_input.c:2683
static ooo_segment_t * svm_fifo_newest_ooo_segment(svm_fifo_t *f)
Definition: svm_fifo.h:662
static void tcp_cc_rcv_ack(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Definition: tcp_cc.h:22
vlib_node_registration_t tcp6_established_node
(constructor) VLIB_REGISTER_NODE (tcp6_established_node)
Definition: tcp_input.c:1592
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
static int tcp_cc_recover(tcp_connection_t *tc)
Definition: tcp_input.c:745
#define TCP_FLAG_RST
Definition: fa_node.h:14
#define TCP_DBG(_fmt, _args...)
Definition: tcp_debug.h:146
#define tcp_recovery_off(tc)
Definition: tcp_types.h:415
static int tcp_rcv_ack(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, vlib_buffer_t *b, tcp_header_t *th, u32 *error)
Process incoming ACK.
Definition: tcp_input.c:982
void tcp_program_cleanup(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Definition: tcp.c:335
void tcp_connection_free(tcp_connection_t *tc)
Definition: tcp.c:322
static void tcp_program_reset_ntf(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Definition: tcp_input.c:190
vl_api_mac_address_t dst_addr
Definition: flow_types.api:65
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
vlib_node_registration_t tcp4_syn_sent_node
(constructor) VLIB_REGISTER_NODE (tcp4_syn_sent_node)
Definition: tcp_input.c:2059
u16 n_vectors
Definition: node.h:397
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
int ip4_address_compare(ip4_address_t *a1, ip4_address_t *a2)
Definition: ip46_cli.c:53
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:339
#define tcp_disconnect_pending(tc)
Definition: tcp_types.h:419
static void tcp_set_rx_trace_data(tcp_rx_trace_t *t0, tcp_connection_t *tc0, tcp_header_t *th0, vlib_buffer_t *b0, u8 is_ip4)
Definition: tcp_input.c:1387
void tcp_program_dupack(tcp_connection_t *tc)
Definition: tcp_output.c:1053
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
void tcp_send_reset(tcp_connection_t *tc)
Build and set reset packet for connection.
Definition: tcp_output.c:749
format_function_t format_tcp_state
Definition: tcp.h:347
static void tcp_update_rto(tcp_connection_t *tc)
Definition: tcp_inlines.h:373
#define clib_warning(format, args...)
Definition: error.h:59
u8 data[]
Packet data.
Definition: buffer.h:181
#define TCP_RTO_MIN
Definition: tcp_types.h:87
#define tcp_in_recovery(tc)
Definition: tcp_types.h:417
Don&#39;t register connection in lookup.
tcp_header_t tcp_header
Definition: tcp_input.c:1351
format_function_t format_tcp_header
Definition: format.h:100
struct _transport_connection transport_connection_t
f64 rtt_time
RTT for sample.
Definition: tcp_types.h:228
static void tcp_cc_undo_recovery(tcp_connection_t *tc)
Definition: tcp_cc.h:54
#define TCP_RTT_MAX
Definition: tcp_types.h:88
#define ARRAY_LEN(x)
Definition: clib.h:67
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:376
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
static u32 transport_max_tx_dequeue(transport_connection_t *tc)
Definition: session.h:509
void tcp_send_synack(tcp_connection_t *tc)
Definition: tcp_output.c:844
#define seq_geq(_s1, _s2)
Definition: tcp_packet.h:181
#define ASSERT(truth)
#define tcp_syn(_th)
Definition: tcp_packet.h:80
static clib_error_t * tcp_input_init(vlib_main_t *vm)
Definition: tcp_input.c:3280
static void tcp_estimate_rtt(tcp_connection_t *tc, u32 mrtt)
Compute smoothed RTT as per VJ&#39;s &#39;88 SIGCOMM and RFC6298.
Definition: tcp_input.c:439
static int tcp_update_rtt(tcp_connection_t *tc, tcp_rate_sample_t *rs, u32 ack)
Update rtt estimate.
Definition: tcp_input.c:473
enum _tcp_rcv_process_next tcp_rcv_process_next_t
static load_balance_t * load_balance_get(index_t lbi)
Definition: load_balance.h:220
static void tcp_cc_update(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Definition: tcp_input.c:797
static void tcp_handle_postponed_dequeues(tcp_worker_ctx_t *wrk)
Dequeue bytes for connections that have received acks in last burst.
Definition: tcp_input.c:562
static void tcp_cong_recovery_off(tcp_connection_t *tc)
Definition: tcp_types.h:430
static index_t ip4_fib_forwarding_lookup(u32 fib_index, const ip4_address_t *addr)
Definition: ip4_fib.h:160
static void tcp_estimate_initial_rtt(tcp_connection_t *tc)
Definition: tcp_input.c:527
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:252
#define seq_gt(_s1, _s2)
Definition: tcp_packet.h:180
static int tcp_segment_check_paws(tcp_connection_t *tc)
RFC1323: Check against wrapped sequence numbers (PAWS).
Definition: tcp_input.c:128
static u8 tcp_cc_is_spurious_timeout_rxt(tcp_connection_t *tc)
Definition: tcp_input.c:703
static void tcp_established_trace_frame(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_ip4)
Definition: tcp_input.c:1403
#define tcp_fastrecovery_first_on(tc)
Definition: tcp_types.h:423
enum _tcp_input_next tcp_input_next_t
int session_stream_accept_notify(transport_connection_t *tc)
Definition: session.c:1113
struct _sack_scoreboard_hole sack_scoreboard_hole_t
static u8 tcp_segment_in_rcv_wnd(tcp_connection_t *tc, u32 seq, u32 end_seq)
Validate segment sequence number.
Definition: tcp_input.c:112
#define clib_max(x, y)
Definition: clib.h:321
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static clib_error_t * tcp_init(vlib_main_t *vm)
Definition: tcp.c:1439
u8 ip_is_zero(ip46_address_t *ip46_address, u8 is_ip4)
Definition: ip.c:20
u8 tcp_scoreboard_is_sane_post_recovery(tcp_connection_t *tc)
Test that scoreboard is sane after recovery.
Definition: tcp_sack.c:317
#define tcp_is_syn(_th)
Definition: tcp_packet.h:89
#define tcp_opts_wscale(_to)
Definition: tcp_packet.h:157
enum _tcp_syn_sent_next tcp_syn_sent_next_t
void tcp_send_reset_w_pkt(tcp_connection_t *tc, vlib_buffer_t *pkt, u32 thread_index, u8 is_ip4)
Send reset without reusing existing buffer.
Definition: tcp_output.c:661
static void tcp_update_snd_wnd(tcp_connection_t *tc, u32 seq, u32 ack, u32 snd_wnd)
Try to update snd_wnd based on feedback received from peer.
Definition: tcp_input.c:618
enum _tcp_established_next tcp_established_next_t
static void tcp_timer_update(tcp_timer_wheel_t *tw, tcp_connection_t *tc, u8 timer_id, u32 interval)
Definition: tcp_timer.h:43
vlib_node_registration_t tcp4_input_node
(constructor) VLIB_REGISTER_NODE (tcp4_input_node)
Definition: tcp_input.c:3007
void scoreboard_clear(sack_scoreboard_t *sb)
Definition: tcp_sack.c:277
void tcp_send_fin(tcp_connection_t *tc)
Send FIN.
Definition: tcp_output.c:873
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
enum _tcp_listen_next tcp_listen_next_t
#define foreach_tcp_state_next
Definition: tcp_input.c:31
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
static tcp_connection_t * tcp_listener_get(u32 tli)
Definition: tcp_inlines.h:58
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
static tcp_worker_ctx_t * tcp_get_worker(u32 thread_index)
Definition: tcp.h:282
void session_transport_closed_notify(transport_connection_t *tc)
Notification from transport that it is closed.
Definition: session.c:1063
VLIB buffer representation.
Definition: buffer.h:102
static int tcp_session_enqueue_data(tcp_connection_t *tc, vlib_buffer_t *b, u16 data_len)
Enqueue data for delivery to application.
Definition: tcp_input.c:1150
u64 uword
Definition: types.h:112
int session_stream_connect_notify(transport_connection_t *tc, session_error_t err)
Definition: session.c:784
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
void tcp_connection_init_vars(tcp_connection_t *tc)
Initialize tcp connection variables.
Definition: tcp.c:703
static void tcp_init_w_buffer(tcp_connection_t *tc, vlib_buffer_t *b, u8 is_ip4)
Initialize connection by gleaning network and rcv params from buffer.
Definition: tcp_inlines.h:328
session_t * session_lookup_listener6(u32 fib_index, ip6_address_t *lcl, u16 lcl_port, u8 proto, u8 use_wildcard)
static f64 tcp_time_now_us(u32 thread_index)
Definition: tcp_inlines.h:213
void scoreboard_init_rxt(sack_scoreboard_t *sb, u32 snd_una)
Definition: tcp_sack.c:254
static void tcp_connection_set_state(tcp_connection_t *tc, tcp_state_t state)
Definition: tcp_inlines.h:51
static tcp_connection_t * tcp_lookup_listener(vlib_buffer_t *b, u32 fib_index, int is_ip4)
Definition: tcp_input.c:1720
static u32 ooo_segment_offset_prod(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.h:676
struct clib_bihash_value offset
template key/value backing page structure
#define vnet_buffer(b)
Definition: buffer.h:417
static u8 tcp_lookup_is_valid(tcp_connection_t *tc, vlib_buffer_t *b, tcp_header_t *hdr)
Definition: tcp_input.c:1612
static u32 vlib_num_workers()
Definition: threads.h:377
void tcp_connection_cleanup(tcp_connection_t *tc)
Cleans up connection state.
Definition: tcp.c:242
void tcp_connection_del(tcp_connection_t *tc)
Connection removal.
Definition: tcp.c:289
f64 end
end of the time range
Definition: mactime.api:44
void tcp_reschedule(tcp_connection_t *tc)
Definition: tcp.c:1220
u16 flags
Copy of main node flags.
Definition: node.h:501
u32 session_tx_fifo_dequeue_drop(transport_connection_t *tc, u32 max_bytes)
Definition: session.c:588
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
static u8 tcp_timer_is_active(tcp_connection_t *tc, tcp_timers_e timer)
Definition: tcp_timer.h:110
void tcp_program_ack(tcp_connection_t *tc)
Definition: tcp_output.c:1043
vlib_node_registration_t tcp6_listen_node
(constructor) VLIB_REGISTER_NODE (tcp6_listen_node)
Definition: tcp_input.c:2702
#define tcp_opts_sack_permitted(_to)
Definition: tcp_packet.h:159
static u32 tcp_tstamp(tcp_connection_t *tc)
Generate timestamp for tcp connection.
Definition: tcp_inlines.h:206
static void tcp_cc_rcv_cong_ack(tcp_connection_t *tc, tcp_cc_ack_t ack_type, tcp_rate_sample_t *rs)
Definition: tcp_cc.h:29
int session_stream_accept(transport_connection_t *tc, u32 listener_index, u32 thread_index, u8 notify)
Accept a stream session.
Definition: session.c:1137
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
tcp_bts_flags_t flags
Rate sample flags from bt sample.
Definition: tcp_types.h:235
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static transport_connection_t * transport_get_listener(transport_proto_t tp, u32 conn_index)
Definition: transport.h:157
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static uword tcp46_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip4, u8 is_nolookup)
Definition: tcp_input.c:2819
static tcp_connection_t * tcp_get_connection_from_transport(transport_connection_t *tconn)
Definition: tcp_types.h:443
static tcp_main_t * vnet_get_tcp_main()
Definition: tcp.h:276
static uword tcp46_syn_sent_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip4)
Definition: tcp_input.c:1784
static uword tcp46_rcv_process_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip4)
Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED as per RFC793 p...
Definition: tcp_input.c:2101
session_t * session_lookup_listener4(u32 fib_index, ip4_address_t *lcl, u16 lcl_port, u8 proto, u8 use_wildcard)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
vlib_node_registration_t tcp6_input_nolookup_node
(constructor) VLIB_REGISTER_NODE (tcp6_input_nolookup_node)
Definition: tcp_input.c:2973
static tcp_connection_t * tcp_input_lookup_buffer(vlib_buffer_t *b, u8 thread_index, u32 *error, u8 is_ip4, u8 is_nolookup)
Definition: tcp_inlines.h:225
static void tcp_handle_old_ack(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Definition: tcp_input.c:934
#define tcp_ack(_th)
Definition: tcp_packet.h:83
#define seq_lt(_s1, _s2)
Definition: tcp_packet.h:178
static u32 transport_tx_fifo_size(transport_connection_t *tc)
Definition: session.h:530
transport_connection_t * session_lookup_half_open_connection(u64 handle, u8 proto, u8 is_ip4)
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:310
static u8 tcp_ack_is_cc_event(tcp_connection_t *tc, vlib_buffer_t *b, u32 prev_snd_wnd, u32 prev_snd_una, u8 *is_dack)
Checks if ack is a congestion control event.
Definition: tcp_input.c:967
static void tcp_estimate_rtt_us(tcp_connection_t *tc, f64 mrtt)
Definition: tcp_input.c:450
void tcp_rcv_sacks(tcp_connection_t *tc, u32 ack)
Definition: tcp_sack.c:326
static char * tcp_error_strings[]
Definition: tcp_input.c:24
#define TCP_EVT(_evt, _args...)
Definition: tcp_debug.h:145
static void tcp_rcv_rst(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Handle reset packet.
Definition: tcp_input.c:207