FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
tcp_input.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/ip6_fib.h>
19 #include <vnet/tcp/tcp.h>
20 #include <vnet/tcp/tcp_inlines.h>
21 #include <vnet/session/session.h>
22 #include <math.h>
23 
24 static char *tcp_error_strings[] = {
25 #define tcp_error(n,s) s,
26 #include <vnet/tcp/tcp_error.def>
27 #undef tcp_error
28 };
29 
30 /* All TCP nodes have the same outgoing arcs */
31 #define foreach_tcp_state_next \
32  _ (DROP4, "ip4-drop") \
33  _ (DROP6, "ip6-drop") \
34  _ (TCP4_OUTPUT, "tcp4-output") \
35  _ (TCP6_OUTPUT, "tcp6-output")
36 
37 typedef enum _tcp_established_next
38 {
39 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
41 #undef _
44 
45 typedef enum _tcp_rcv_process_next
46 {
47 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
49 #undef _
52 
53 typedef enum _tcp_syn_sent_next
54 {
55 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
57 #undef _
60 
61 typedef enum _tcp_listen_next
62 {
63 #define _(s,n) TCP_LISTEN_NEXT_##s,
65 #undef _
68 
69 /* Generic, state independent indices */
70 typedef enum _tcp_state_next
71 {
72 #define _(s,n) TCP_NEXT_##s,
74 #undef _
77 
78 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
79  : TCP_NEXT_TCP6_OUTPUT)
80 
81 #define tcp_next_drop(is_ip4) (is_ip4 ? TCP_NEXT_DROP4 \
82  : TCP_NEXT_DROP6)
83 
84 /**
85  * Validate segment sequence number. As per RFC793:
86  *
87  * Segment Receive Test
88  * Length Window
89  * ------- ------- -------------------------------------------
90  * 0 0 SEG.SEQ = RCV.NXT
91  * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
92  * >0 0 not acceptable
93  * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
94  * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
95  *
96  * This ultimately consists in checking if segment falls within the window.
97  * The one important difference compared to RFC793 is that we use rcv_las,
98  * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
99  * peer's reference when computing our receive window.
100  *
101  * This:
102  * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
103  * however, is too strict when we have retransmits. Instead we just check that
104  * the seq is not beyond the right edge and that the end of the segment is not
105  * less than the left edge.
106  *
107  * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
108  * use rcv_nxt in the right edge window test instead of rcv_las.
109  *
110  */
113 {
114  return (seq_geq (end_seq, tc->rcv_las)
115  && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
116 }
117 
118 /**
119  * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
120  * timestamp to echo and it's less than tsval_recent, drop segment
121  * but still send an ACK in order to retain TCP's mechanism for detecting
122  * and recovering from half-open connections
123  *
124  * Or at least that's what the theory says. It seems that this might not work
125  * very well with packet reordering and fast retransmit. XXX
126  */
127 always_inline int
129 {
130  return tcp_opts_tstamp (&tc->rcv_opts)
131  && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
132 }
133 
134 /**
135  * Update tsval recent
136  */
137 always_inline void
139 {
140  /*
141  * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
142  * of an incoming segment:
143  * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
144  * then the TSval from the segment is copied to TS.Recent;
145  * otherwise, the TSval is ignored.
146  */
147  if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
148  && seq_leq (tc->rcv_las, seq_end))
149  {
150  ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
151  tc->tsval_recent = tc->rcv_opts.tsval;
152  tc->tsval_recent_age = tcp_time_now_w_thread (tc->c_thread_index);
153  }
154 }
155 
156 static void
158 {
159  switch (tc->rst_state)
160  {
161  case TCP_STATE_SYN_RCVD:
162  /* Cleanup everything. App wasn't notified yet */
163  session_transport_delete_notify (&tc->connection);
165  break;
166  case TCP_STATE_SYN_SENT:
167  session_stream_connect_notify (&tc->connection, SESSION_E_REFUSED);
169  break;
170  case TCP_STATE_ESTABLISHED:
171  session_transport_reset_notify (&tc->connection);
172  session_transport_closed_notify (&tc->connection);
173  break;
174  case TCP_STATE_CLOSE_WAIT:
175  case TCP_STATE_FIN_WAIT_1:
176  case TCP_STATE_FIN_WAIT_2:
177  case TCP_STATE_CLOSING:
178  case TCP_STATE_LAST_ACK:
179  session_transport_closed_notify (&tc->connection);
180  break;
181  case TCP_STATE_CLOSED:
182  case TCP_STATE_TIME_WAIT:
183  break;
184  default:
185  TCP_DBG ("reset state: %u", tc->state);
186  }
187 }
188 
189 static void
191 {
192  if (!tcp_disconnect_pending (tc))
193  {
194  tc->rst_state = tc->state;
195  vec_add1 (wrk->pending_resets, tc->c_c_index);
197  }
198 }
199 
200 /**
201  * Handle reset packet
202  *
203  * Programs disconnect/reset notification that should be sent
204  * later by calling @ref tcp_handle_disconnects
205  */
206 static void
208 {
209  TCP_EVT (TCP_EVT_RST_RCVD, tc);
210  switch (tc->state)
211  {
212  case TCP_STATE_SYN_RCVD:
213  tcp_program_reset_ntf (wrk, tc);
214  tcp_connection_set_state (tc, TCP_STATE_CLOSED);
215  break;
216  case TCP_STATE_SYN_SENT:
217  /* Do not program ntf because the connection is half-open */
218  tc->rst_state = tc->state;
219  tcp_handle_rst (tc);
220  break;
221  case TCP_STATE_ESTABLISHED:
224  tcp_program_reset_ntf (wrk, tc);
225  tcp_connection_set_state (tc, TCP_STATE_CLOSED);
226  tcp_program_cleanup (wrk, tc);
227  break;
228  case TCP_STATE_CLOSE_WAIT:
229  case TCP_STATE_FIN_WAIT_1:
230  case TCP_STATE_FIN_WAIT_2:
231  case TCP_STATE_CLOSING:
232  case TCP_STATE_LAST_ACK:
235  tcp_program_reset_ntf (wrk, tc);
236  /* Make sure we mark the session as closed. In some states we may
237  * be still trying to send data */
238  tcp_connection_set_state (tc, TCP_STATE_CLOSED);
239  tcp_program_cleanup (wrk, tc);
240  break;
241  case TCP_STATE_CLOSED:
242  case TCP_STATE_TIME_WAIT:
243  break;
244  default:
245  TCP_DBG ("reset state: %u", tc->state);
246  }
247 }
248 
249 /**
250  * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
251  *
252  * It first verifies if segment has a wrapped sequence number (PAWS) and then
253  * does the processing associated to the first four steps (ignoring security
254  * and precedence): sequence number, rst bit and syn bit checks.
255  *
256  * @return 0 if segments passes validation.
257  */
258 static int
260  vlib_buffer_t * b0, tcp_header_t * th0, u32 * error0)
261 {
262  /* We could get a burst of RSTs interleaved with acks */
263  if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
264  {
265  tcp_send_reset (tc0);
266  *error0 = TCP_ERROR_CONNECTION_CLOSED;
267  goto error;
268  }
269 
270  if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
271  {
272  *error0 = TCP_ERROR_SEGMENT_INVALID;
273  goto error;
274  }
275 
276  if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts, 0)))
277  {
278  *error0 = TCP_ERROR_OPTIONS;
279  goto error;
280  }
281 
283  {
284  *error0 = TCP_ERROR_PAWS;
285  TCP_EVT (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
286  vnet_buffer (b0)->tcp.seq_end);
287 
288  /* If it just so happens that a segment updates tsval_recent for a
289  * segment over 24 days old, invalidate tsval_recent. */
290  if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
291  tcp_time_now_w_thread (tc0->c_thread_index)))
292  {
293  tc0->tsval_recent = tc0->rcv_opts.tsval;
294  clib_warning ("paws failed: 24-day old segment");
295  }
296  /* Drop after ack if not rst. Resets can fail paws check as per
297  * RFC 7323 sec. 5.2: When an <RST> segment is received, it MUST NOT
298  * be subjected to the PAWS check by verifying an acceptable value in
299  * SEG.TSval */
300  else if (!tcp_rst (th0))
301  {
302  tcp_program_ack (tc0);
303  TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
304  goto error;
305  }
306  }
307 
308  /* 1st: check sequence number */
309  if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
310  vnet_buffer (b0)->tcp.seq_end))
311  {
312  /* SYN/SYN-ACK retransmit */
313  if (tcp_syn (th0)
314  && vnet_buffer (b0)->tcp.seq_number == tc0->rcv_nxt - 1)
315  {
316  tcp_options_parse (th0, &tc0->rcv_opts, 1);
317  if (tc0->state == TCP_STATE_SYN_RCVD)
318  {
319  tcp_send_synack (tc0);
320  TCP_EVT (TCP_EVT_SYN_RCVD, tc0, 0);
321  *error0 = TCP_ERROR_SYNS_RCVD;
322  }
323  else
324  {
325  tcp_program_ack (tc0);
326  TCP_EVT (TCP_EVT_SYNACK_RCVD, tc0);
327  *error0 = TCP_ERROR_SYN_ACKS_RCVD;
328  }
329  goto error;
330  }
331 
332  /* If our window is 0 and the packet is in sequence, let it pass
333  * through for ack processing. It should be dropped later. */
334  if (tc0->rcv_wnd < tc0->snd_mss
335  && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number)
336  goto check_reset;
337 
338  /* If we entered recovery and peer did so as well, there's a chance that
339  * dup acks won't be acceptable on either end because seq_end may be less
340  * than rcv_las. This can happen if acks are lost in both directions. */
341  if (tcp_in_recovery (tc0)
342  && seq_geq (vnet_buffer (b0)->tcp.seq_number,
343  tc0->rcv_las - tc0->rcv_wnd)
344  && seq_leq (vnet_buffer (b0)->tcp.seq_end,
345  tc0->rcv_nxt + tc0->rcv_wnd))
346  goto check_reset;
347 
348  *error0 = TCP_ERROR_RCV_WND;
349 
350  /* If we advertised a zero rcv_wnd and the segment is in the past or the
351  * next one that we expect, it is probably a window probe */
352  if ((tc0->flags & TCP_CONN_ZERO_RWND_SENT)
353  && seq_lt (vnet_buffer (b0)->tcp.seq_end,
354  tc0->rcv_las + tc0->rcv_opts.mss))
355  *error0 = TCP_ERROR_ZERO_RWND;
356 
357  tc0->errors.below_data_wnd += seq_lt (vnet_buffer (b0)->tcp.seq_end,
358  tc0->rcv_las);
359 
360  /* If not RST, send dup ack */
361  if (!tcp_rst (th0))
362  {
363  tcp_program_dupack (tc0);
364  TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
365  }
366  goto error;
367 
368  check_reset:
369  ;
370  }
371 
372  /* 2nd: check the RST bit */
373  if (PREDICT_FALSE (tcp_rst (th0)))
374  {
375  tcp_rcv_rst (wrk, tc0);
376  *error0 = TCP_ERROR_RST_RCVD;
377  goto error;
378  }
379 
380  /* 3rd: check security and precedence (skip) */
381 
382  /* 4th: check the SYN bit (in window) */
383  if (PREDICT_FALSE (tcp_syn (th0)))
384  {
385  /* As per RFC5961 send challenge ack instead of reset */
386  tcp_program_ack (tc0);
387  *error0 = TCP_ERROR_SPURIOUS_SYN;
388  goto error;
389  }
390 
391  /* If segment in window, save timestamp */
392  tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
393  vnet_buffer (b0)->tcp.seq_end);
394  return 0;
395 
396 error:
397  return -1;
398 }
399 
400 always_inline int
402 {
403  /* SND.UNA =< SEG.ACK =< SND.NXT */
404  if (!(seq_leq (tc->snd_una, vnet_buffer (b)->tcp.ack_number)
405  && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
406  {
407  if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)
408  && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
409  {
410  tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
411  goto acceptable;
412  }
413  *error = TCP_ERROR_ACK_INVALID;
414  return -1;
415  }
416 
417 acceptable:
418  tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
419  tc->snd_una = vnet_buffer (b)->tcp.ack_number;
420  *error = TCP_ERROR_ACK_OK;
421  return 0;
422 }
423 
424 /**
425  * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
426  *
427  * Note that although in the original article srtt and rttvar are scaled
428  * to minimize round-off errors, here we don't. Instead, we rely on
429  * better precision time measurements.
430  *
431  * A known limitation of the algorithm is that a drop in rtt results in a
432  * rttvar increase and bigger RTO.
433  *
434  * mrtt must be provided in @ref TCP_TICK multiples, i.e., in us. Note that
435  * timestamps are measured as ms ticks so they must be converted before
436  * calling this function.
437  */
438 static void
440 {
441  int err, diff;
442 
443  err = mrtt - tc->srtt;
444  tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
445  diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
446  tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
447 }
448 
449 static inline void
451 {
452  tc->mrtt_us = tc->mrtt_us + (mrtt - tc->mrtt_us) * 0.125;
453 }
454 
455 /**
456  * Update rtt estimate
457  *
458  * We have potentially three sources of rtt measurements:
459  *
460  * TSOPT difference between current and echoed timestamp. It has ms
461  * precision and can be computed per ack
462  * ACK timing one sequence number is tracked per rtt with us (micro second)
463  * precision.
464  * rate sample if enabled, all outstanding bytes are tracked with us
465  * precision. Every ack and sack are a rtt sample
466  *
467  * Middle boxes are known to fiddle with TCP options so we give higher
468  * priority to ACK timing.
469  *
470  * For now, rate sample rtts are only used under congestion.
471  */
472 static int
474 {
475  u32 mrtt = 0;
476 
477  /* Karn's rule, part 1. Don't use retransmitted segments to estimate
478  * RTT because they're ambiguous. */
479  if (tcp_in_cong_recovery (tc))
480  {
481  /* Accept rtt estimates for samples that have not been retransmitted */
482  if (!(tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
483  || (rs->flags & TCP_BTS_IS_RXT))
484  goto done;
485  if (rs->rtt_time)
486  tcp_estimate_rtt_us (tc, rs->rtt_time);
487  mrtt = rs->rtt_time * THZ;
488  goto estimate_rtt;
489  }
490 
491  if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
492  {
493  f64 sample = tcp_time_now_us (tc->c_thread_index) - tc->rtt_ts;
494  tcp_estimate_rtt_us (tc, sample);
495  mrtt = clib_max ((u32) (sample * THZ), 1);
496  /* Allow measuring of a new RTT */
497  tc->rtt_ts = 0;
498  }
499  /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
500  * snd_una, i.e., the left side of the send window:
501  * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
502  else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
503  {
504  mrtt = clib_max (tcp_tstamp (tc) - tc->rcv_opts.tsecr, 1);
505  mrtt *= TCP_TSTP_TO_HZ;
506  }
507 
508 estimate_rtt:
509 
510  /* Ignore dubious measurements */
511  if (mrtt == 0 || mrtt > TCP_RTT_MAX)
512  goto done;
513 
514  tcp_estimate_rtt (tc, mrtt);
515 
516 done:
517 
518  /* If we got here something must've been ACKed so make sure boff is 0,
519  * even if mrtt is not valid since we update the rto lower */
520  tc->rto_boff = 0;
521  tcp_update_rto (tc);
522 
523  return 0;
524 }
525 
526 static void
528 {
529  u8 thread_index = vlib_num_workers ()? 1 : 0;
530  int mrtt;
531 
532  if (tc->rtt_ts)
533  {
534  tc->mrtt_us = tcp_time_now_us (thread_index) - tc->rtt_ts;
535  tc->mrtt_us = clib_max (tc->mrtt_us, 0.0001);
536  mrtt = clib_max ((u32) (tc->mrtt_us * THZ), 1);
537  tc->rtt_ts = 0;
538  }
539  else
540  {
541  mrtt = tcp_tstamp (tc) - tc->rcv_opts.tsecr;
542  mrtt = clib_max (mrtt, 1) * TCP_TSTP_TO_HZ;
543  /* Due to retransmits we don't know the initial mrtt */
544  if (tc->rto_boff && mrtt > 1 * THZ)
545  mrtt = 1 * THZ;
546  tc->mrtt_us = (f64) mrtt *TCP_TICK;
547  }
548 
549  if (mrtt > 0 && mrtt < TCP_RTT_MAX)
550  {
551  /* First measurement as per RFC 6298 */
552  tc->srtt = mrtt;
553  tc->rttvar = mrtt >> 1;
554  }
555  tcp_update_rto (tc);
556 }
557 
558 /**
559  * Dequeue bytes for connections that have received acks in last burst
560  */
561 static void
563 {
564  u32 thread_index = wrk->vm->thread_index;
565  u32 *pending_deq_acked;
566  tcp_connection_t *tc;
567  int i;
568 
569  if (!vec_len (wrk->pending_deq_acked))
570  return;
571 
572  pending_deq_acked = wrk->pending_deq_acked;
573  for (i = 0; i < vec_len (pending_deq_acked); i++)
574  {
575  tc = tcp_connection_get (pending_deq_acked[i], thread_index);
576  tc->flags &= ~TCP_CONN_DEQ_PENDING;
577 
578  if (PREDICT_FALSE (!tc->burst_acked))
579  continue;
580 
581  /* Dequeue the newly ACKed bytes */
582  session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
583  tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
584 
585  if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
586  {
587  if (seq_leq (tc->psh_seq, tc->snd_una))
588  tc->flags &= ~TCP_CONN_PSH_PENDING;
589  }
590 
591  if (tcp_is_descheduled (tc))
592  tcp_reschedule (tc);
593 
594  /* If everything has been acked, stop retransmit timer
595  * otherwise update. */
597 
598  /* Update pacer based on our new cwnd estimate */
600 
601  tc->burst_acked = 0;
602  }
603  _vec_len (wrk->pending_deq_acked) = 0;
604 }
605 
606 static void
608 {
609  if (!(tc->flags & TCP_CONN_DEQ_PENDING))
610  {
611  vec_add1 (wrk->pending_deq_acked, tc->c_c_index);
612  tc->flags |= TCP_CONN_DEQ_PENDING;
613  }
614  tc->burst_acked += tc->bytes_acked;
615 }
616 
617 /**
618  * Try to update snd_wnd based on feedback received from peer.
619  *
620  * If successful, and new window is 'effectively' 0, activate persist
621  * timer.
622  */
623 static void
624 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
625 {
626  /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
627  * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
628  if (seq_lt (tc->snd_wl1, seq)
629  || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
630  {
631  tc->snd_wnd = snd_wnd;
632  tc->snd_wl1 = seq;
633  tc->snd_wl2 = ack;
634  TCP_EVT (TCP_EVT_SND_WND, tc);
635 
636  if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss))
637  {
638  /* Set persist timer if not set and we just got 0 wnd */
639  if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
640  && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
641  {
642  tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
644  }
645  }
646  else
647  {
648  if (PREDICT_FALSE (tcp_timer_is_active (tc, TCP_TIMER_PERSIST)))
649  {
650  tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
652  }
653 
655  tcp_reschedule (tc);
656 
657  if (PREDICT_FALSE (!tcp_in_recovery (tc) && tc->rto_boff > 0))
658  {
659  tc->rto_boff = 0;
660  tcp_update_rto (tc);
661  }
662  }
663  }
664 }
665 
666 /**
667  * Init loss recovery/fast recovery.
668  *
669  * Triggered by dup acks as opposed to timer timeout. Note that cwnd is
670  * updated in @ref tcp_cc_handle_event after fast retransmit
671  */
672 static void
674 {
675  tcp_fastrecovery_on (tc);
676  tc->snd_congestion = tc->snd_nxt;
677  tc->cwnd_acc_bytes = 0;
678  tc->snd_rxt_bytes = 0;
679  tc->rxt_delivered = 0;
680  tc->prr_delivered = 0;
681  tc->prr_start = tc->snd_una;
682  tc->prev_ssthresh = tc->ssthresh;
683  tc->prev_cwnd = tc->cwnd;
684 
685  tc->snd_rxt_ts = tcp_tstamp (tc);
686  tcp_cc_congestion (tc);
687 
688  /* Post retransmit update cwnd to ssthresh and account for the
689  * three segments that have left the network and should've been
690  * buffered at the receiver XXX */
691  if (!tcp_opts_sack_permitted (&tc->rcv_opts))
692  tc->cwnd += TCP_DUPACK_THRESHOLD * tc->snd_mss;
693 
694  tc->fr_occurences += 1;
695  TCP_EVT (TCP_EVT_CC_EVT, tc, 4);
696 }
697 
698 static void
700 {
701  tc->cwnd = tc->prev_cwnd;
702  tc->ssthresh = tc->prev_ssthresh;
704  ASSERT (tc->rto_boff == 0);
705  TCP_EVT (TCP_EVT_CC_EVT, tc, 5);
706 }
707 
708 static inline u8
710 {
711  return (tcp_in_recovery (tc) && tc->rto_boff == 1
712  && tc->snd_rxt_ts
713  && tcp_opts_tstamp (&tc->rcv_opts)
714  && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
715 }
716 
717 static inline u8
719 {
720  return (tcp_cc_is_spurious_timeout_rxt (tc));
721 }
722 
723 static inline u8
725 {
726  if (!has_sack)
727  {
728  /* If of of the two conditions lower hold, reset dupacks because
729  * we're probably after timeout (RFC6582 heuristics).
730  * If Cumulative ack does not cover more than congestion threshold,
731  * and:
732  * 1) The following doesn't hold: The congestion window is greater
733  * than SMSS bytes and the difference between highest_ack
734  * and prev_highest_ack is at most 4*SMSS bytes
735  * 2) Echoed timestamp in the last non-dup ack does not equal the
736  * stored timestamp
737  */
738  if (seq_leq (tc->snd_una, tc->snd_congestion)
739  && ((!(tc->cwnd > tc->snd_mss
740  && tc->bytes_acked <= 4 * tc->snd_mss))
741  || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
742  {
743  tc->rcv_dupacks = 0;
744  return 0;
745  }
746  }
747  return tc->sack_sb.lost_bytes || tc->rcv_dupacks >= tc->sack_sb.reorder;
748 }
749 
750 static int
752 {
754  u8 is_spurious = 0;
755 
757 
759  {
761  is_spurious = 1;
762  }
763 
764  tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
765  tc->rcv_dupacks = 0;
766 
767  /* Previous recovery left us congested. Continue sending as part
768  * of the current recovery event with an updated snd_congestion */
769  if (tc->sack_sb.sacked_bytes)
770  {
771  tc->snd_congestion = tc->snd_nxt;
773  return is_spurious;
774  }
775 
776  tc->rxt_delivered = 0;
777  tc->snd_rxt_bytes = 0;
778  tc->snd_rxt_ts = 0;
779  tc->prr_delivered = 0;
780  tc->rtt_ts = 0;
781  tc->flags &= ~TCP_CONN_RXT_PENDING;
782 
783  hole = scoreboard_first_hole (&tc->sack_sb);
784  if (hole && hole->start == tc->snd_una && hole->end == tc->snd_nxt)
785  scoreboard_clear (&tc->sack_sb);
786 
787  if (!tcp_in_recovery (tc) && !is_spurious)
788  tcp_cc_recovered (tc);
789 
792  tcp_recovery_off (tc);
793  TCP_EVT (TCP_EVT_CC_EVT, tc, 3);
794 
795  ASSERT (tc->rto_boff == 0);
798 
799  return is_spurious;
800 }
801 
802 static void
804 {
806 
807  /* Congestion avoidance */
808  tcp_cc_rcv_ack (tc, rs);
809 
810  /* If a cumulative ack, make sure dupacks is 0 */
811  tc->rcv_dupacks = 0;
812 
813  /* When dupacks hits the threshold we only enter fast retransmit if
814  * cumulative ack covers more than snd_congestion. Should snd_una
815  * wrap this test may fail under otherwise valid circumstances.
816  * Therefore, proactively update snd_congestion when wrap detected. */
817  if (PREDICT_FALSE
818  (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
819  && seq_gt (tc->snd_congestion, tc->snd_una)))
820  tc->snd_congestion = tc->snd_una - 1;
821 }
822 
823 /**
824  * One function to rule them all ... and in the darkness bind them
825  */
826 static void
828  u32 is_dack)
829 {
830  u8 has_sack = tcp_opts_sack_permitted (&tc->rcv_opts);
831 
832  /* If reneging, wait for timer based retransmits */
833  if (PREDICT_FALSE (tcp_is_lost_fin (tc) || tc->sack_sb.is_reneging))
834  return;
835 
836  /*
837  * If not in recovery, figure out if we should enter
838  */
839  if (!tcp_in_cong_recovery (tc))
840  {
841  ASSERT (is_dack);
842 
843  tc->rcv_dupacks++;
844  TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
846 
847  if (tcp_should_fastrecover (tc, has_sack))
848  {
850 
851  if (has_sack)
852  scoreboard_init_rxt (&tc->sack_sb, tc->snd_una);
853 
854  tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
856  }
857 
858  return;
859  }
860 
861  /*
862  * Already in recovery
863  */
864 
865  /*
866  * Process (re)transmit feedback. Output path uses this to decide how much
867  * more data to release into the network
868  */
869  if (has_sack)
870  {
871  if (!tc->bytes_acked && tc->sack_sb.rxt_sacked)
873 
874  tc->rxt_delivered += tc->sack_sb.rxt_sacked;
875  tc->prr_delivered += tc->bytes_acked + tc->sack_sb.last_sacked_bytes
876  - tc->sack_sb.last_bytes_delivered;
877  }
878  else
879  {
880  if (is_dack)
881  {
882  tc->rcv_dupacks += 1;
883  TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
884  }
885  tc->rxt_delivered = clib_min (tc->rxt_delivered + tc->bytes_acked,
886  tc->snd_rxt_bytes);
887  if (is_dack)
888  tc->prr_delivered += clib_min (tc->snd_mss,
889  tc->snd_nxt - tc->snd_una);
890  else
891  tc->prr_delivered += tc->bytes_acked - clib_min (tc->bytes_acked,
892  tc->snd_mss *
893  tc->rcv_dupacks);
894 
895  /* If partial ack, assume that the first un-acked segment was lost */
896  if (tc->bytes_acked || tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
898  }
899 
900  /*
901  * See if we can exit and stop retransmitting
902  */
903  if (seq_geq (tc->snd_una, tc->snd_congestion))
904  {
905  /* If spurious return, we've already updated everything */
906  if (tcp_cc_recover (tc))
907  {
908  tc->tsecr_last_ack = tc->rcv_opts.tsecr;
909  return;
910  }
911 
912  /* Treat as congestion avoidance ack */
913  tcp_cc_rcv_ack (tc, rs);
914  return;
915  }
916 
918 
919  /*
920  * Notify cc of the event
921  */
922 
923  if (!tc->bytes_acked)
924  {
926  return;
927  }
928 
929  /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
930  * reset dupacks to 0. Also needed if in congestion recovery */
931  tc->rcv_dupacks = 0;
932 
933  if (tcp_in_recovery (tc))
934  tcp_cc_rcv_ack (tc, rs);
935  else
937 }
938 
939 static void
941 {
942  if (!tcp_in_cong_recovery (tc))
943  return;
944 
945  if (tcp_opts_sack_permitted (&tc->rcv_opts))
946  tcp_rcv_sacks (tc, tc->snd_una);
947 
948  tc->bytes_acked = 0;
949 
950  if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
952 
953  tcp_cc_handle_event (tc, rs, 1);
954 }
955 
956 /**
957  * Check if duplicate ack as per RFC5681 Sec. 2
958  */
961  u32 prev_snd_una)
962 {
963  return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
964  && seq_gt (tc->snd_nxt, tc->snd_una)
965  && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
966  && (prev_snd_wnd == tc->snd_wnd));
967 }
968 
969 /**
970  * Checks if ack is a congestion control event.
971  */
972 static u8
974  u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
975 {
976  /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
977  * defined to be 'duplicate' as well */
978  *is_dack = tc->sack_sb.last_sacked_bytes
979  || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
980 
981  return (*is_dack || tcp_in_cong_recovery (tc));
982 }
983 
984 /**
985  * Process incoming ACK
986  */
987 static int
989  tcp_header_t * th, u32 * error)
990 {
991  u32 prev_snd_wnd, prev_snd_una;
992  tcp_rate_sample_t rs = { 0 };
993  u8 is_dack;
994 
995  TCP_EVT (TCP_EVT_CC_STAT, tc);
996 
997  /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
998  if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
999  {
1000  /* We've probably entered recovery and the peer still has some
1001  * of the data we've sent. Update snd_nxt and accept the ack */
1002  if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)
1003  && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
1004  {
1005  tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1006  goto process_ack;
1007  }
1008 
1009  tc->errors.above_ack_wnd += 1;
1010  *error = TCP_ERROR_ACK_FUTURE;
1011  TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 0, vnet_buffer (b)->tcp.ack_number);
1012  return -1;
1013  }
1014 
1015  /* If old ACK, probably it's an old dupack */
1016  if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1017  {
1018  tc->errors.below_ack_wnd += 1;
1019  *error = TCP_ERROR_ACK_OLD;
1020  TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 1, vnet_buffer (b)->tcp.ack_number);
1021 
1022  if (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una - tc->rcv_wnd))
1023  return -1;
1024 
1025  tcp_handle_old_ack (tc, &rs);
1026 
1027  /* Don't drop yet */
1028  return 0;
1029  }
1030 
1031 process_ack:
1032 
1033  /*
1034  * Looks okay, process feedback
1035  */
1036 
1037  if (tcp_opts_sack_permitted (&tc->rcv_opts))
1038  tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1039 
1040  prev_snd_wnd = tc->snd_wnd;
1041  prev_snd_una = tc->snd_una;
1042  tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1043  vnet_buffer (b)->tcp.ack_number,
1044  clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1045  tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1046  tc->snd_una = vnet_buffer (b)->tcp.ack_number;
1047  tcp_validate_txf_size (tc, tc->bytes_acked);
1048 
1049  if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1050  tcp_bt_sample_delivery_rate (tc, &rs);
1051 
1052  if (tc->bytes_acked + tc->sack_sb.last_sacked_bytes)
1053  {
1054  tcp_update_rtt (tc, &rs, vnet_buffer (b)->tcp.ack_number);
1055  if (tc->bytes_acked)
1056  tcp_program_dequeue (wrk, tc);
1057  }
1058 
1059  TCP_EVT (TCP_EVT_ACK_RCVD, tc);
1060 
1061  /*
1062  * Check if we have congestion event
1063  */
1064 
1065  if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1066  {
1067  tcp_cc_handle_event (tc, &rs, is_dack);
1068  tc->dupacks_in += is_dack;
1069  if (!tcp_in_cong_recovery (tc))
1070  {
1071  *error = TCP_ERROR_ACK_OK;
1072  return 0;
1073  }
1074  *error = TCP_ERROR_ACK_DUP;
1075  if (vnet_buffer (b)->tcp.data_len || tcp_is_fin (th))
1076  return 0;
1077  return -1;
1078  }
1079 
1080  /*
1081  * Update congestion control (slow start/congestion avoidance)
1082  */
1083  tcp_cc_update (tc, &rs);
1084  *error = TCP_ERROR_ACK_OK;
1085  return 0;
1086 }
1087 
1088 static void
1090 {
1091  if (!tcp_disconnect_pending (tc))
1092  {
1093  vec_add1 (wrk->pending_disconnects, tc->c_c_index);
1095  }
1096 }
1097 
1098 static void
1100 {
1101  u32 thread_index, *pending_disconnects, *pending_resets;
1102  tcp_connection_t *tc;
1103  int i;
1104 
1105  if (vec_len (wrk->pending_disconnects))
1106  {
1107  thread_index = wrk->vm->thread_index;
1108  pending_disconnects = wrk->pending_disconnects;
1109  for (i = 0; i < vec_len (pending_disconnects); i++)
1110  {
1111  tc = tcp_connection_get (pending_disconnects[i], thread_index);
1113  session_transport_closing_notify (&tc->connection);
1114  }
1115  _vec_len (wrk->pending_disconnects) = 0;
1116  }
1117 
1118  if (vec_len (wrk->pending_resets))
1119  {
1120  thread_index = wrk->vm->thread_index;
1121  pending_resets = wrk->pending_resets;
1122  for (i = 0; i < vec_len (pending_resets); i++)
1123  {
1124  tc = tcp_connection_get (pending_resets[i], thread_index);
1126  tcp_handle_rst (tc);
1127  }
1128  _vec_len (wrk->pending_resets) = 0;
1129  }
1130 }
1131 
1132 static void
1134  u32 * error)
1135 {
1136  /* Reject out-of-order fins */
1137  if (vnet_buffer (b)->tcp.seq_end != tc->rcv_nxt)
1138  return;
1139 
1140  /* Account for the FIN and send ack */
1141  tc->rcv_nxt += 1;
1142  tc->flags |= TCP_CONN_FINRCVD;
1143  tcp_program_ack (tc);
1144  /* Enter CLOSE-WAIT and notify session. To avoid lingering
1145  * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1146  tcp_connection_set_state (tc, TCP_STATE_CLOSE_WAIT);
1147  tcp_program_disconnect (wrk, tc);
1148  tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
1149  tcp_cfg.closewait_time);
1150  TCP_EVT (TCP_EVT_FIN_RCVD, tc);
1151  *error = TCP_ERROR_FIN_RCVD;
1152 }
1153 
1154 /** Enqueue data for delivery to application */
1155 static int
1157  u16 data_len)
1158 {
1159  int written, error = TCP_ERROR_ENQUEUED;
1160 
1161  ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1162  ASSERT (data_len);
1163  written = session_enqueue_stream_connection (&tc->connection, b, 0,
1164  1 /* queue event */ , 1);
1165  tc->bytes_in += written;
1166 
1167  TCP_EVT (TCP_EVT_INPUT, tc, 0, data_len, written);
1168 
1169  /* Update rcv_nxt */
1170  if (PREDICT_TRUE (written == data_len))
1171  {
1172  tc->rcv_nxt += written;
1173  }
1174  /* If more data written than expected, account for out-of-order bytes. */
1175  else if (written > data_len)
1176  {
1177  tc->rcv_nxt += written;
1178  TCP_EVT (TCP_EVT_CC_INPUT, tc, data_len, written);
1179  }
1180  else if (written > 0)
1181  {
1182  /* We've written something but FIFO is probably full now */
1183  tc->rcv_nxt += written;
1184  error = TCP_ERROR_PARTIALLY_ENQUEUED;
1185  }
1186  else
1187  {
1188  /* Packet made it through for ack processing */
1189  if (tc->rcv_wnd < tc->snd_mss)
1190  return TCP_ERROR_ZERO_RWND;
1191 
1192  return TCP_ERROR_FIFO_FULL;
1193  }
1194 
1195  /* Update SACK list if need be */
1196  if (tcp_opts_sack_permitted (&tc->rcv_opts))
1197  {
1198  /* Remove SACK blocks that have been delivered */
1199  tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1200  }
1201 
1202  return error;
1203 }
1204 
1205 /** Enqueue out-of-order data */
1206 static int
1208  u16 data_len)
1209 {
1210  session_t *s0;
1211  int rv, offset;
1212 
1213  ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1214  ASSERT (data_len);
1215 
1216  /* Enqueue out-of-order data with relative offset */
1217  rv = session_enqueue_stream_connection (&tc->connection, b,
1218  vnet_buffer (b)->tcp.seq_number -
1219  tc->rcv_nxt, 0 /* queue event */ ,
1220  0);
1221 
1222  /* Nothing written */
1223  if (rv)
1224  {
1225  TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, 0);
1226  return TCP_ERROR_FIFO_FULL;
1227  }
1228 
1229  TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1230  tc->bytes_in += data_len;
1231 
1232  /* Update SACK list if in use */
1233  if (tcp_opts_sack_permitted (&tc->rcv_opts))
1234  {
1235  ooo_segment_t *newest;
1236  u32 start, end;
1237 
1238  s0 = session_get (tc->c_s_index, tc->c_thread_index);
1239 
1240  /* Get the newest segment from the fifo */
1241  newest = svm_fifo_newest_ooo_segment (s0->rx_fifo);
1242  if (newest)
1243  {
1244  offset = ooo_segment_offset_prod (s0->rx_fifo, newest);
1245  ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
1246  start = tc->rcv_nxt + offset;
1247  end = start + ooo_segment_length (s0->rx_fifo, newest);
1248  tcp_update_sack_list (tc, start, end);
1250  TCP_EVT (TCP_EVT_CC_SACKS, tc);
1251  }
1252  }
1253 
1254  return TCP_ERROR_ENQUEUED_OOO;
1255 }
1256 
1257 static int
1259 {
1260  u32 discard, first = b->current_length;
1261  vlib_main_t *vm = vlib_get_main ();
1262 
1263  /* Handle multi-buffer segments */
1264  if (n_bytes_to_drop > b->current_length)
1265  {
1266  if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1267  return -1;
1268  do
1269  {
1270  discard = clib_min (n_bytes_to_drop, b->current_length);
1271  vlib_buffer_advance (b, discard);
1272  b = vlib_get_buffer (vm, b->next_buffer);
1273  n_bytes_to_drop -= discard;
1274  }
1275  while (n_bytes_to_drop);
1276  if (n_bytes_to_drop > first)
1277  b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
1278  }
1279  else
1280  vlib_buffer_advance (b, n_bytes_to_drop);
1281  vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
1282  return 0;
1283 }
1284 
1285 /**
1286  * Receive buffer for connection and handle acks
1287  *
1288  * It handles both in order or out-of-order data.
1289  */
1290 static int
1292  vlib_buffer_t * b)
1293 {
1294  u32 error, n_bytes_to_drop, n_data_bytes;
1295 
1296  vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
1297  n_data_bytes = vnet_buffer (b)->tcp.data_len;
1298  ASSERT (n_data_bytes);
1299  tc->data_segs_in += 1;
1300 
1301  /* Handle out-of-order data */
1302  if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1303  {
1304  /* Old sequence numbers allowed through because they overlapped
1305  * the rx window */
1306  if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1307  {
1308  /* Completely in the past (possible retransmit). Ack
1309  * retransmissions since we may not have any data to send */
1310  if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1311  {
1312  tcp_program_dupack (tc);
1313  tc->errors.below_data_wnd++;
1314  error = TCP_ERROR_SEGMENT_OLD;
1315  goto done;
1316  }
1317 
1318  /* Chop off the bytes in the past and see if what is left
1319  * can be enqueued in order */
1320  n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1321  n_data_bytes -= n_bytes_to_drop;
1322  vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
1323  if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
1324  {
1325  error = TCP_ERROR_SEGMENT_OLD;
1326  goto done;
1327  }
1328  goto in_order;
1329  }
1330 
1331  /* RFC2581: Enqueue and send DUPACK for fast retransmit */
1332  error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1333  tcp_program_dupack (tc);
1334  TCP_EVT (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
1335  tc->errors.above_data_wnd += seq_gt (vnet_buffer (b)->tcp.seq_end,
1336  tc->rcv_las + tc->rcv_wnd);
1337  goto done;
1338  }
1339 
1340 in_order:
1341 
1342  /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1343  * segments can be enqueued after fifo tail offset changes. */
1344  error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1345  tcp_program_ack (tc);
1346 
1347 done:
1348  return error;
1349 }
1350 
1351 typedef struct
1352 {
1355 } tcp_rx_trace_t;
1356 
1357 static u8 *
1358 format_tcp_rx_trace (u8 * s, va_list * args)
1359 {
1360  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1361  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1362  tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1363  tcp_connection_t *tc = &t->tcp_connection;
1364  u32 indent = format_get_indent (s);
1365 
1366  s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
1367  format_tcp_state, tc->state, format_white_space, indent,
1368  format_tcp_header, &t->tcp_header, 128);
1369 
1370  return s;
1371 }
1372 
1373 static u8 *
1374 format_tcp_rx_trace_short (u8 * s, va_list * args)
1375 {
1376  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1377  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1378  tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1379 
1380  s = format (s, "%d -> %d (%U)",
1381  clib_net_to_host_u16 (t->tcp_header.dst_port),
1382  clib_net_to_host_u16 (t->tcp_header.src_port), format_tcp_state,
1383  t->tcp_connection.state);
1384 
1385  return s;
1386 }
1387 
1388 static void
1390  tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
1391 {
1392  if (tc0)
1393  {
1394  clib_memcpy_fast (&t0->tcp_connection, tc0,
1395  sizeof (t0->tcp_connection));
1396  }
1397  else
1398  {
1399  th0 = tcp_buffer_hdr (b0);
1400  }
1401  clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
1402 }
1403 
1404 static void
1406  vlib_frame_t * frame, u8 is_ip4)
1407 {
1408  u32 *from, n_left;
1409 
1410  n_left = frame->n_vectors;
1411  from = vlib_frame_vector_args (frame);
1412 
1413  while (n_left >= 1)
1414  {
1415  tcp_connection_t *tc0;
1416  tcp_rx_trace_t *t0;
1417  tcp_header_t *th0;
1418  vlib_buffer_t *b0;
1419  u32 bi0;
1420 
1421  bi0 = from[0];
1422  b0 = vlib_get_buffer (vm, bi0);
1423 
1424  if (b0->flags & VLIB_BUFFER_IS_TRACED)
1425  {
1426  t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
1427  tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1428  vm->thread_index);
1429  th0 = tcp_buffer_hdr (b0);
1430  tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
1431  }
1432 
1433  from += 1;
1434  n_left -= 1;
1435  }
1436 }
1437 
1438 always_inline void
1439 tcp_node_inc_counter_i (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
1440  u8 is_ip4, u32 evt, u32 val)
1441 {
1442  if (is_ip4)
1443  vlib_node_increment_counter (vm, tcp4_node, evt, val);
1444  else
1445  vlib_node_increment_counter (vm, tcp6_node, evt, val);
1446 }
1447 
1448 #define tcp_maybe_inc_counter(node_id, err, count) \
1449 { \
1450  if (next0 != tcp_next_drop (is_ip4)) \
1451  tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
1452  tcp6_##node_id##_node.index, is_ip4, err, \
1453  1); \
1454 }
1455 #define tcp_inc_counter(node_id, err, count) \
1456  tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
1457  tcp6_##node_id##_node.index, is_ip4, \
1458  err, count)
1459 #define tcp_maybe_inc_err_counter(cnts, err) \
1460 { \
1461  cnts[err] += (next0 != tcp_next_drop (is_ip4)); \
1462 }
1463 #define tcp_inc_err_counter(cnts, err, val) \
1464 { \
1465  cnts[err] += val; \
1466 }
1467 #define tcp_store_err_counters(node_id, cnts) \
1468 { \
1469  int i; \
1470  for (i = 0; i < TCP_N_ERROR; i++) \
1471  if (cnts[i]) \
1472  tcp_inc_counter(node_id, i, cnts[i]); \
1473 }
1474 
1475 
1478  vlib_frame_t * frame, int is_ip4)
1479 {
1480  u32 thread_index = vm->thread_index, errors = 0;
1481  tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
1482  u32 n_left_from, *from, *first_buffer;
1483  u16 err_counters[TCP_N_ERROR] = { 0 };
1484 
1485  if (node->flags & VLIB_NODE_FLAG_TRACE)
1486  tcp_established_trace_frame (vm, node, frame, is_ip4);
1487 
1488  first_buffer = from = vlib_frame_vector_args (frame);
1489  n_left_from = frame->n_vectors;
1490 
1491  while (n_left_from > 0)
1492  {
1493  u32 bi0, error0 = TCP_ERROR_ACK_OK;
1494  vlib_buffer_t *b0;
1495  tcp_header_t *th0;
1496  tcp_connection_t *tc0;
1497 
1498  if (n_left_from > 1)
1499  {
1500  vlib_buffer_t *pb;
1501  pb = vlib_get_buffer (vm, from[1]);
1502  vlib_prefetch_buffer_header (pb, LOAD);
1503  CLIB_PREFETCH (pb->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
1504  }
1505 
1506  bi0 = from[0];
1507  from += 1;
1508  n_left_from -= 1;
1509 
1510  b0 = vlib_get_buffer (vm, bi0);
1511  tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1512  thread_index);
1513 
1514  if (PREDICT_FALSE (tc0 == 0))
1515  {
1516  error0 = TCP_ERROR_INVALID_CONNECTION;
1517  goto done;
1518  }
1519 
1520  th0 = tcp_buffer_hdr (b0);
1521 
1522  /* TODO header prediction fast path */
1523 
1524  /* 1-4: check SEQ, RST, SYN */
1525  if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, th0, &error0)))
1526  {
1527  TCP_EVT (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
1528  goto done;
1529  }
1530 
1531  /* 5: check the ACK field */
1532  if (PREDICT_FALSE (tcp_rcv_ack (wrk, tc0, b0, th0, &error0)))
1533  goto done;
1534 
1535  /* 6: check the URG bit TODO */
1536 
1537  /* 7: process the segment text */
1538  if (vnet_buffer (b0)->tcp.data_len)
1539  error0 = tcp_segment_rcv (wrk, tc0, b0);
1540 
1541  /* 8: check the FIN bit */
1542  if (PREDICT_FALSE (tcp_is_fin (th0)))
1543  tcp_rcv_fin (wrk, tc0, b0, &error0);
1544 
1545  done:
1546  tcp_inc_err_counter (err_counters, error0, 1);
1547  }
1548 
1549  errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
1550  thread_index);
1551  err_counters[TCP_ERROR_MSG_QUEUE_FULL] = errors;
1552  tcp_store_err_counters (established, err_counters);
1554  tcp_handle_disconnects (wrk);
1555  vlib_buffer_free (vm, first_buffer, frame->n_vectors);
1556 
1557  return frame->n_vectors;
1558 }
1559 
1562  vlib_frame_t * from_frame)
1563 {
1564  return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
1565 }
1566 
1569  vlib_frame_t * from_frame)
1570 {
1571  return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
1572 }
1573 
1574 /* *INDENT-OFF* */
1576 {
1577  .name = "tcp4-established",
1578  /* Takes a vector of packets. */
1579  .vector_size = sizeof (u32),
1580  .n_errors = TCP_N_ERROR,
1581  .error_strings = tcp_error_strings,
1582  .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1583  .next_nodes =
1584  {
1585 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1587 #undef _
1588  },
1589  .format_trace = format_tcp_rx_trace_short,
1590 };
1591 /* *INDENT-ON* */
1592 
1593 /* *INDENT-OFF* */
1595 {
1596  .name = "tcp6-established",
1597  /* Takes a vector of packets. */
1598  .vector_size = sizeof (u32),
1599  .n_errors = TCP_N_ERROR,
1600  .error_strings = tcp_error_strings,
1601  .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1602  .next_nodes =
1603  {
1604 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1606 #undef _
1607  },
1608  .format_trace = format_tcp_rx_trace_short,
1609 };
1610 /* *INDENT-ON* */
1611 
1612 
1613 static u8
1615  tcp_header_t * hdr)
1616 {
1617  transport_connection_t *tmp = 0;
1618  u64 handle;
1619 
1620  if (!tc)
1621  return 1;
1622 
1623  /* Proxy case */
1624  if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
1625  return 1;
1626 
1627  u8 is_ip_valid = 0, val_l, val_r;
1628 
1629  if (tc->connection.is_ip4)
1630  {
1632 
1633  val_l = !ip4_address_compare (&ip4_hdr->dst_address,
1634  &tc->connection.lcl_ip.ip4);
1635  val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 1);
1636  val_r = !ip4_address_compare (&ip4_hdr->src_address,
1637  &tc->connection.rmt_ip.ip4);
1638  val_r = val_r || tc->state == TCP_STATE_LISTEN;
1639  is_ip_valid = val_l && val_r;
1640  }
1641  else
1642  {
1644 
1645  val_l = !ip6_address_compare (&ip6_hdr->dst_address,
1646  &tc->connection.lcl_ip.ip6);
1647  val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 0);
1648  val_r = !ip6_address_compare (&ip6_hdr->src_address,
1649  &tc->connection.rmt_ip.ip6);
1650  val_r = val_r || tc->state == TCP_STATE_LISTEN;
1651  is_ip_valid = val_l && val_r;
1652  }
1653 
1654  u8 is_valid = (tc->c_lcl_port == hdr->dst_port
1655  && (tc->state == TCP_STATE_LISTEN
1656  || tc->c_rmt_port == hdr->src_port) && is_ip_valid);
1657 
1658  if (!is_valid)
1659  {
1660  handle = session_lookup_half_open_handle (&tc->connection);
1661  tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
1662  tc->c_proto, tc->c_is_ip4);
1663 
1664  if (tmp)
1665  {
1666  if (tmp->lcl_port == hdr->dst_port
1667  && tmp->rmt_port == hdr->src_port)
1668  {
1669  TCP_DBG ("half-open is valid!");
1670  is_valid = 1;
1671  }
1672  }
1673  }
1674  return is_valid;
1675 }
1676 
1677 /**
1678  * Lookup transport connection
1679  */
1680 static tcp_connection_t *
1681 tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
1682  u8 is_ip4)
1683 {
1684  tcp_header_t *tcp;
1685  transport_connection_t *tconn;
1686  tcp_connection_t *tc;
1687  u8 is_filtered = 0;
1688  if (is_ip4)
1689  {
1690  ip4_header_t *ip4;
1691  ip4 = vlib_buffer_get_current (b);
1692  tcp = ip4_next_header (ip4);
1693  tconn = session_lookup_connection_wt4 (fib_index,
1694  &ip4->dst_address,
1695  &ip4->src_address,
1696  tcp->dst_port,
1697  tcp->src_port,
1698  TRANSPORT_PROTO_TCP,
1699  thread_index, &is_filtered);
1700  tc = tcp_get_connection_from_transport (tconn);
1701  ASSERT (tcp_lookup_is_valid (tc, b, tcp));
1702  }
1703  else
1704  {
1705  ip6_header_t *ip6;
1706  ip6 = vlib_buffer_get_current (b);
1707  tcp = ip6_next_header (ip6);
1708  tconn = session_lookup_connection_wt6 (fib_index,
1709  &ip6->dst_address,
1710  &ip6->src_address,
1711  tcp->dst_port,
1712  tcp->src_port,
1713  TRANSPORT_PROTO_TCP,
1714  thread_index, &is_filtered);
1715  tc = tcp_get_connection_from_transport (tconn);
1716  ASSERT (tcp_lookup_is_valid (tc, b, tcp));
1717  }
1718  return tc;
1719 }
1720 
1721 static tcp_connection_t *
1722 tcp_lookup_listener (vlib_buffer_t * b, u32 fib_index, int is_ip4)
1723 {
1724  session_t *s;
1725 
1726  if (is_ip4)
1727  {
1729  tcp_header_t *tcp = tcp_buffer_hdr (b);
1730  s = session_lookup_listener4 (fib_index,
1731  &ip4->dst_address,
1732  tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
1733  }
1734  else
1735  {
1737  tcp_header_t *tcp = tcp_buffer_hdr (b);
1738  s = session_lookup_listener6 (fib_index,
1739  &ip6->dst_address,
1740  tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
1741 
1742  }
1743  if (PREDICT_TRUE (s != 0))
1745  (TRANSPORT_PROTO_TCP,
1746  s->connection_index));
1747  else
1748  return 0;
1749 }
1750 
1751 always_inline void
1753 {
1754  vnet_main_t *vnm = vnet_get_main ();
1755  const dpo_id_t *dpo;
1756  const load_balance_t *lb;
1757  vnet_hw_interface_t *hw_if;
1758  u32 sw_if_idx, lb_idx;
1759 
1760  if (is_ipv4)
1761  {
1762  ip4_address_t *dst_addr = &(tc->c_rmt_ip.ip4);
1763  lb_idx = ip4_fib_forwarding_lookup (tc->c_fib_index, dst_addr);
1764  }
1765  else
1766  {
1767  ip6_address_t *dst_addr = &(tc->c_rmt_ip.ip6);
1768  lb_idx = ip6_fib_table_fwding_lookup (tc->c_fib_index, dst_addr);
1769  }
1770 
1771  lb = load_balance_get (lb_idx);
1772  if (PREDICT_FALSE (lb->lb_n_buckets > 1))
1773  return;
1774  dpo = load_balance_get_bucket_i (lb, 0);
1775 
1776  sw_if_idx = dpo_get_urpf (dpo);
1777  if (PREDICT_FALSE (sw_if_idx == ~0))
1778  return;
1779 
1780  hw_if = vnet_get_sup_hw_interface (vnm, sw_if_idx);
1782  tc->cfg_flags |= TCP_CFG_F_TSO;
1783 }
1784 
1787  vlib_frame_t * from_frame, int is_ip4)
1788 {
1789  u32 n_left_from, *from, *first_buffer, errors = 0;
1790  u32 my_thread_index = vm->thread_index;
1791  tcp_worker_ctx_t *wrk = tcp_get_worker (my_thread_index);
1792 
1793  from = first_buffer = vlib_frame_vector_args (from_frame);
1794  n_left_from = from_frame->n_vectors;
1795 
1796  while (n_left_from > 0)
1797  {
1798  u32 bi0, ack0, seq0, error0 = TCP_ERROR_NONE;
1799  tcp_connection_t *tc0, *new_tc0;
1800  tcp_header_t *tcp0 = 0;
1801  tcp_rx_trace_t *t0;
1802  vlib_buffer_t *b0;
1803 
1804  bi0 = from[0];
1805  from += 1;
1806  n_left_from -= 1;
1807 
1808  b0 = vlib_get_buffer (vm, bi0);
1809  tc0 =
1810  tcp_half_open_connection_get (vnet_buffer (b0)->tcp.connection_index);
1811  if (PREDICT_FALSE (tc0 == 0))
1812  {
1813  error0 = TCP_ERROR_INVALID_CONNECTION;
1814  goto drop;
1815  }
1816 
1817  /* Half-open completed recently but the connection was't removed
1818  * yet by the owning thread */
1819  if (PREDICT_FALSE (tc0->flags & TCP_CONN_HALF_OPEN_DONE))
1820  {
1821  /* Make sure the connection actually exists */
1822  ASSERT (tcp_lookup_connection (tc0->c_fib_index, b0,
1823  my_thread_index, is_ip4));
1824  error0 = TCP_ERROR_SPURIOUS_SYN_ACK;
1825  goto drop;
1826  }
1827 
1828  ack0 = vnet_buffer (b0)->tcp.ack_number;
1829  seq0 = vnet_buffer (b0)->tcp.seq_number;
1830  tcp0 = tcp_buffer_hdr (b0);
1831 
1832  /* Crude check to see if the connection handle does not match
1833  * the packet. Probably connection just switched to established */
1834  if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
1835  || tcp0->src_port != tc0->c_rmt_port))
1836  {
1837  error0 = TCP_ERROR_INVALID_CONNECTION;
1838  goto drop;
1839  }
1840 
1841  if (PREDICT_FALSE (!tcp_ack (tcp0) && !tcp_rst (tcp0)
1842  && !tcp_syn (tcp0)))
1843  {
1844  error0 = TCP_ERROR_SEGMENT_INVALID;
1845  goto drop;
1846  }
1847 
1848  /* SYNs consume sequence numbers */
1849  vnet_buffer (b0)->tcp.seq_end += tcp_is_syn (tcp0);
1850 
1851  /*
1852  * 1. check the ACK bit
1853  */
1854 
1855  /*
1856  * If the ACK bit is set
1857  * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
1858  * the RST bit is set, if so drop the segment and return)
1859  * <SEQ=SEG.ACK><CTL=RST>
1860  * and discard the segment. Return.
1861  * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
1862  */
1863  if (tcp_ack (tcp0))
1864  {
1865  if (seq_leq (ack0, tc0->iss) || seq_gt (ack0, tc0->snd_nxt))
1866  {
1867  if (!tcp_rst (tcp0))
1868  tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
1869  error0 = TCP_ERROR_RCV_WND;
1870  goto drop;
1871  }
1872 
1873  /* Make sure ACK is valid */
1874  if (seq_gt (tc0->snd_una, ack0))
1875  {
1876  error0 = TCP_ERROR_ACK_INVALID;
1877  goto drop;
1878  }
1879  }
1880 
1881  /*
1882  * 2. check the RST bit
1883  */
1884 
1885  if (tcp_rst (tcp0))
1886  {
1887  /* If ACK is acceptable, signal client that peer is not
1888  * willing to accept connection and drop connection*/
1889  if (tcp_ack (tcp0))
1890  tcp_rcv_rst (wrk, tc0);
1891  error0 = TCP_ERROR_RST_RCVD;
1892  goto drop;
1893  }
1894 
1895  /*
1896  * 3. check the security and precedence (skipped)
1897  */
1898 
1899  /*
1900  * 4. check the SYN bit
1901  */
1902 
1903  /* No SYN flag. Drop. */
1904  if (!tcp_syn (tcp0))
1905  {
1906  error0 = TCP_ERROR_SEGMENT_INVALID;
1907  goto drop;
1908  }
1909 
1910  /* Parse options */
1911  if (tcp_options_parse (tcp0, &tc0->rcv_opts, 1))
1912  {
1913  error0 = TCP_ERROR_OPTIONS;
1914  goto drop;
1915  }
1916 
1917  /* Valid SYN or SYN-ACK. Move connection from half-open pool to
1918  * current thread pool. */
1919  new_tc0 = tcp_connection_alloc_w_base (my_thread_index, tc0);
1920  new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
1921  new_tc0->irs = seq0;
1922  new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
1923  new_tc0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1924 
1925  if (tcp_opts_tstamp (&new_tc0->rcv_opts))
1926  {
1927  new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
1928  new_tc0->tsval_recent_age = tcp_time_now ();
1929  }
1930 
1931  if (tcp_opts_wscale (&new_tc0->rcv_opts))
1932  new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
1933  else
1934  new_tc0->rcv_wscale = 0;
1935 
1936  new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
1937  << new_tc0->snd_wscale;
1938  new_tc0->snd_wl1 = seq0;
1939  new_tc0->snd_wl2 = ack0;
1940 
1941  tcp_connection_init_vars (new_tc0);
1942 
1943  /* SYN-ACK: See if we can switch to ESTABLISHED state */
1944  if (PREDICT_TRUE (tcp_ack (tcp0)))
1945  {
1946  /* Our SYN is ACKed: we have iss < ack = snd_una */
1947 
1948  /* TODO Dequeue acknowledged segments if we support Fast Open */
1949  new_tc0->snd_una = ack0;
1950  new_tc0->state = TCP_STATE_ESTABLISHED;
1951 
1952  /* Make sure las is initialized for the wnd computation */
1953  new_tc0->rcv_las = new_tc0->rcv_nxt;
1954 
1955  /* Notify app that we have connection. If session layer can't
1956  * allocate session send reset */
1957  if (session_stream_connect_notify (&new_tc0->connection,
1958  SESSION_E_NONE))
1959  {
1960  tcp_send_reset_w_pkt (new_tc0, b0, my_thread_index, is_ip4);
1961  tcp_connection_cleanup (new_tc0);
1962  error0 = TCP_ERROR_CREATE_SESSION_FAIL;
1963  goto cleanup_ho;
1964  }
1965 
1966  new_tc0->tx_fifo_size =
1967  transport_tx_fifo_size (&new_tc0->connection);
1968  /* Update rtt with the syn-ack sample */
1969  tcp_estimate_initial_rtt (new_tc0);
1970  TCP_EVT (TCP_EVT_SYNACK_RCVD, new_tc0);
1971  error0 = TCP_ERROR_SYN_ACKS_RCVD;
1972  }
1973  /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
1974  else
1975  {
1976  new_tc0->state = TCP_STATE_SYN_RCVD;
1977 
1978  /* Notify app that we have connection */
1979  if (session_stream_connect_notify (&new_tc0->connection,
1980  SESSION_E_NONE))
1981  {
1982  tcp_connection_cleanup (new_tc0);
1983  tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
1984  TCP_EVT (TCP_EVT_RST_SENT, tc0);
1985  error0 = TCP_ERROR_CREATE_SESSION_FAIL;
1986  goto cleanup_ho;
1987  }
1988 
1989  new_tc0->tx_fifo_size =
1990  transport_tx_fifo_size (&new_tc0->connection);
1991  new_tc0->rtt_ts = 0;
1992  tcp_init_snd_vars (new_tc0);
1993  tcp_send_synack (new_tc0);
1994  error0 = TCP_ERROR_SYNS_RCVD;
1995  goto cleanup_ho;
1996  }
1997 
1998  if (!(new_tc0->cfg_flags & TCP_CFG_F_NO_TSO))
1999  tcp_check_tx_offload (new_tc0, is_ip4);
2000 
2001  /* Read data, if any */
2002  if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
2003  {
2004  clib_warning ("rcvd data in syn-sent");
2005  error0 = tcp_segment_rcv (wrk, new_tc0, b0);
2006  if (error0 == TCP_ERROR_ACK_OK)
2007  error0 = TCP_ERROR_SYN_ACKS_RCVD;
2008  }
2009  else
2010  {
2011  /* Send ack now instead of programming it because connection was
2012  * just established and it's not optional. */
2013  tcp_send_ack (new_tc0);
2014  }
2015 
2016  cleanup_ho:
2017 
2018  /* If this is not the owning thread, wait for syn retransmit to
2019  * expire and cleanup then */
2021  tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2022 
2023  drop:
2024 
2025  tcp_inc_counter (syn_sent, error0, 1);
2026  if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
2027  {
2028  t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2029  clib_memcpy_fast (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2030  clib_memcpy_fast (&t0->tcp_connection, tc0,
2031  sizeof (t0->tcp_connection));
2032  }
2033  }
2034 
2035  errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2036  my_thread_index);
2037  tcp_inc_counter (syn_sent, TCP_ERROR_MSG_QUEUE_FULL, errors);
2038  vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2039  tcp_handle_disconnects (wrk);
2040 
2041  return from_frame->n_vectors;
2042 }
2043 
2046  vlib_frame_t * from_frame)
2047 {
2048  return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2049 }
2050 
2053  vlib_frame_t * from_frame)
2054 {
2055  return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2056 }
2057 
2058 /* *INDENT-OFF* */
2060 {
2061  .name = "tcp4-syn-sent",
2062  /* Takes a vector of packets. */
2063  .vector_size = sizeof (u32),
2064  .n_errors = TCP_N_ERROR,
2065  .error_strings = tcp_error_strings,
2066  .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2067  .next_nodes =
2068  {
2069 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2071 #undef _
2072  },
2073  .format_trace = format_tcp_rx_trace_short,
2074 };
2075 /* *INDENT-ON* */
2076 
2077 /* *INDENT-OFF* */
2079 {
2080  .name = "tcp6-syn-sent",
2081  /* Takes a vector of packets. */
2082  .vector_size = sizeof (u32),
2083  .n_errors = TCP_N_ERROR,
2084  .error_strings = tcp_error_strings,
2085  .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2086  .next_nodes =
2087  {
2088 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2090 #undef _
2091  },
2092  .format_trace = format_tcp_rx_trace_short,
2093 };
2094 /* *INDENT-ON* */
2095 
2096 /**
2097  * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2098  * as per RFC793 p. 64
2099  */
2102  vlib_frame_t * from_frame, int is_ip4)
2103 {
2104  u32 thread_index = vm->thread_index, errors = 0, *first_buffer;
2105  tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2106  u32 n_left_from, *from, max_dequeue;
2107 
2108  from = first_buffer = vlib_frame_vector_args (from_frame);
2109  n_left_from = from_frame->n_vectors;
2110 
2111  while (n_left_from > 0)
2112  {
2113  u32 bi0, error0 = TCP_ERROR_NONE;
2114  tcp_header_t *tcp0 = 0;
2115  tcp_connection_t *tc0;
2116  vlib_buffer_t *b0;
2117  u8 is_fin0;
2118 
2119  bi0 = from[0];
2120  from += 1;
2121  n_left_from -= 1;
2122 
2123  b0 = vlib_get_buffer (vm, bi0);
2124  tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2125  thread_index);
2126  if (PREDICT_FALSE (tc0 == 0))
2127  {
2128  error0 = TCP_ERROR_INVALID_CONNECTION;
2129  goto drop;
2130  }
2131 
2132  tcp0 = tcp_buffer_hdr (b0);
2133  is_fin0 = tcp_is_fin (tcp0);
2134 
2135  if (CLIB_DEBUG)
2136  {
2137  if (!(tc0->connection.flags & TRANSPORT_CONNECTION_F_NO_LOOKUP))
2138  {
2139  tcp_connection_t *tmp;
2140  tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
2141  is_ip4);
2142  if (tmp->state != tc0->state)
2143  {
2144  if (tc0->state != TCP_STATE_CLOSED)
2145  clib_warning ("state changed");
2146  goto drop;
2147  }
2148  }
2149  }
2150 
2151  /*
2152  * Special treatment for CLOSED
2153  */
2154  if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
2155  {
2156  error0 = TCP_ERROR_CONNECTION_CLOSED;
2157  goto drop;
2158  }
2159 
2160  /*
2161  * For all other states (except LISTEN)
2162  */
2163 
2164  /* 1-4: check SEQ, RST, SYN */
2165  if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, tcp0, &error0)))
2166  goto drop;
2167 
2168  /* 5: check the ACK field */
2169  switch (tc0->state)
2170  {
2171  case TCP_STATE_SYN_RCVD:
2172 
2173  /* Make sure the segment is exactly right */
2174  if (tc0->rcv_nxt != vnet_buffer (b0)->tcp.seq_number || is_fin0)
2175  {
2176  tcp_send_reset_w_pkt (tc0, b0, thread_index, is_ip4);
2177  error0 = TCP_ERROR_SEGMENT_INVALID;
2178  goto drop;
2179  }
2180 
2181  /*
2182  * If the segment acknowledgment is not acceptable, form a
2183  * reset segment,
2184  * <SEQ=SEG.ACK><CTL=RST>
2185  * and send it.
2186  */
2187  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2188  {
2189  tcp_send_reset_w_pkt (tc0, b0, thread_index, is_ip4);
2190  error0 = TCP_ERROR_SEGMENT_INVALID;
2191  goto drop;
2192  }
2193 
2194  /* Update rtt and rto */
2197 
2198  /* Switch state to ESTABLISHED */
2199  tc0->state = TCP_STATE_ESTABLISHED;
2200  TCP_EVT (TCP_EVT_STATE_CHANGE, tc0);
2201 
2202  if (!(tc0->cfg_flags & TCP_CFG_F_NO_TSO))
2203  tcp_check_tx_offload (tc0, is_ip4);
2204 
2205  /* Initialize session variables */
2206  tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2207  tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2208  << tc0->rcv_opts.wscale;
2209  tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2210  tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2211 
2212  /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2214  if (session_stream_accept_notify (&tc0->connection))
2215  {
2216  error0 = TCP_ERROR_MSG_QUEUE_FULL;
2217  tcp_send_reset (tc0);
2218  session_transport_delete_notify (&tc0->connection);
2219  tcp_connection_cleanup (tc0);
2220  goto drop;
2221  }
2222  error0 = TCP_ERROR_ACK_OK;
2223  break;
2224  case TCP_STATE_ESTABLISHED:
2225  /* We can get packets in established state here because they
2226  * were enqueued before state change */
2227  if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2228  goto drop;
2229 
2230  break;
2231  case TCP_STATE_FIN_WAIT_1:
2232  /* In addition to the processing for the ESTABLISHED state, if
2233  * our FIN is now acknowledged then enter FIN-WAIT-2 and
2234  * continue processing in that state. */
2235  if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2236  goto drop;
2237 
2238  /* Still have to send the FIN */
2239  if (tc0->flags & TCP_CONN_FINPNDG)
2240  {
2241  /* TX fifo finally drained */
2242  max_dequeue = transport_max_tx_dequeue (&tc0->connection);
2243  if (max_dequeue <= tc0->burst_acked)
2244  tcp_send_fin (tc0);
2245  /* If a fin was received and data was acked extend wait */
2246  else if ((tc0->flags & TCP_CONN_FINRCVD) && tc0->bytes_acked)
2247  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2248  tcp_cfg.closewait_time);
2249  }
2250  /* If FIN is ACKed */
2251  else if (tc0->snd_una == tc0->snd_nxt)
2252  {
2253  /* Stop all retransmit timers because we have nothing more
2254  * to send. */
2256 
2257  /* We already have a FIN but didn't transition to CLOSING
2258  * because of outstanding tx data. Close the connection. */
2259  if (tc0->flags & TCP_CONN_FINRCVD)
2260  {
2261  tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2262  session_transport_closed_notify (&tc0->connection);
2263  tcp_program_cleanup (wrk, tc0);
2264  goto drop;
2265  }
2266 
2267  tcp_connection_set_state (tc0, TCP_STATE_FIN_WAIT_2);
2268  /* Enable waitclose because we're willing to wait for peer's
2269  * FIN but not indefinitely. */
2270  tcp_timer_set (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2271  tcp_cfg.finwait2_time);
2272 
2273  /* Don't try to deq the FIN acked */
2274  if (tc0->burst_acked > 1)
2275  session_tx_fifo_dequeue_drop (&tc0->connection,
2276  tc0->burst_acked - 1);
2277  tc0->burst_acked = 0;
2278  }
2279  break;
2280  case TCP_STATE_FIN_WAIT_2:
2281  /* In addition to the processing for the ESTABLISHED state, if
2282  * the retransmission queue is empty, the user's CLOSE can be
2283  * acknowledged ("ok") but do not delete the TCB. */
2284  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2285  goto drop;
2286  tc0->burst_acked = 0;
2287  break;
2288  case TCP_STATE_CLOSE_WAIT:
2289  /* Do the same processing as for the ESTABLISHED state. */
2290  if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2291  goto drop;
2292 
2293  if (!(tc0->flags & TCP_CONN_FINPNDG))
2294  break;
2295 
2296  /* Still have outstanding tx data */
2297  max_dequeue = transport_max_tx_dequeue (&tc0->connection);
2298  if (max_dequeue > tc0->burst_acked)
2299  break;
2300 
2301  tcp_send_fin (tc0);
2303  tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2304  tcp_timer_set (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2305  tcp_cfg.lastack_time);
2306  break;
2307  case TCP_STATE_CLOSING:
2308  /* In addition to the processing for the ESTABLISHED state, if
2309  * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2310  * otherwise ignore the segment. */
2311  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2312  goto drop;
2313 
2314  if (tc0->snd_una != tc0->snd_nxt)
2315  goto drop;
2316 
2318  tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
2319  tcp_timer_set (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2320  tcp_cfg.timewait_time);
2321  session_transport_closed_notify (&tc0->connection);
2322  goto drop;
2323 
2324  break;
2325  case TCP_STATE_LAST_ACK:
2326  /* The only thing that [should] arrive in this state is an
2327  * acknowledgment of our FIN. If our FIN is now acknowledged,
2328  * delete the TCB, enter the CLOSED state, and return. */
2329 
2330  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2331  goto drop;
2332 
2333  /* Apparently our ACK for the peer's FIN was lost */
2334  if (is_fin0 && tc0->snd_una != tc0->snd_nxt)
2335  {
2336  tcp_send_fin (tc0);
2337  goto drop;
2338  }
2339 
2340  tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2341  session_transport_closed_notify (&tc0->connection);
2342 
2343  /* Don't free the connection from the data path since
2344  * we can't ensure that we have no packets already enqueued
2345  * to output. Rely instead on the waitclose timer */
2347  tcp_program_cleanup (tcp_get_worker (tc0->c_thread_index), tc0);
2348 
2349  goto drop;
2350 
2351  break;
2352  case TCP_STATE_TIME_WAIT:
2353  /* The only thing that can arrive in this state is a
2354  * retransmission of the remote FIN. Acknowledge it, and restart
2355  * the 2 MSL timeout. */
2356 
2357  if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2358  goto drop;
2359 
2360  if (!is_fin0)
2361  goto drop;
2362 
2363  tcp_program_ack (tc0);
2364  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2365  tcp_cfg.timewait_time);
2366  goto drop;
2367 
2368  break;
2369  default:
2370  ASSERT (0);
2371  }
2372 
2373  /* 6: check the URG bit TODO */
2374 
2375  /* 7: process the segment text */
2376  switch (tc0->state)
2377  {
2378  case TCP_STATE_ESTABLISHED:
2379  case TCP_STATE_FIN_WAIT_1:
2380  case TCP_STATE_FIN_WAIT_2:
2381  if (vnet_buffer (b0)->tcp.data_len)
2382  error0 = tcp_segment_rcv (wrk, tc0, b0);
2383  break;
2384  case TCP_STATE_CLOSE_WAIT:
2385  case TCP_STATE_CLOSING:
2386  case TCP_STATE_LAST_ACK:
2387  case TCP_STATE_TIME_WAIT:
2388  /* This should not occur, since a FIN has been received from the
2389  * remote side. Ignore the segment text. */
2390  break;
2391  }
2392 
2393  /* 8: check the FIN bit */
2394  if (!is_fin0)
2395  goto drop;
2396 
2397  TCP_EVT (TCP_EVT_FIN_RCVD, tc0);
2398 
2399  switch (tc0->state)
2400  {
2401  case TCP_STATE_ESTABLISHED:
2402  /* Account for the FIN and send ack */
2403  tc0->rcv_nxt += 1;
2404  tcp_program_ack (tc0);
2405  tcp_connection_set_state (tc0, TCP_STATE_CLOSE_WAIT);
2406  tcp_program_disconnect (wrk, tc0);
2407  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2408  tcp_cfg.closewait_time);
2409  break;
2410  case TCP_STATE_SYN_RCVD:
2411  /* Send FIN-ACK, enter LAST-ACK and because the app was not
2412  * notified yet, set a cleanup timer instead of relying on
2413  * disconnect notify and the implicit close call. */
2415  tc0->rcv_nxt += 1;
2416  tcp_send_fin (tc0);
2417  tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2418  tcp_timer_set (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2419  tcp_cfg.lastack_time);
2420  break;
2421  case TCP_STATE_CLOSE_WAIT:
2422  case TCP_STATE_CLOSING:
2423  case TCP_STATE_LAST_ACK:
2424  /* move along .. */
2425  break;
2426  case TCP_STATE_FIN_WAIT_1:
2427  tc0->rcv_nxt += 1;
2428 
2429  if (tc0->flags & TCP_CONN_FINPNDG)
2430  {
2431  /* If data is outstanding, stay in FIN_WAIT_1 and try to finish
2432  * sending it. Since we already received a fin, do not wait
2433  * for too long. */
2434  tc0->flags |= TCP_CONN_FINRCVD;
2435  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2436  tcp_cfg.closewait_time);
2437  }
2438  else
2439  {
2440  tcp_connection_set_state (tc0, TCP_STATE_CLOSING);
2441  tcp_program_ack (tc0);
2442  /* Wait for ACK for our FIN but not forever */
2443  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2444  tcp_cfg.closing_time);
2445  }
2446  break;
2447  case TCP_STATE_FIN_WAIT_2:
2448  /* Got FIN, send ACK! Be more aggressive with resource cleanup */
2449  tc0->rcv_nxt += 1;
2450  tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
2452  tcp_timer_set (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2453  tcp_cfg.timewait_time);
2454  tcp_program_ack (tc0);
2455  session_transport_closed_notify (&tc0->connection);
2456  break;
2457  case TCP_STATE_TIME_WAIT:
2458  /* Remain in the TIME-WAIT state. Restart the time-wait
2459  * timeout.
2460  */
2461  tcp_timer_update (&wrk->timer_wheel, tc0, TCP_TIMER_WAITCLOSE,
2462  tcp_cfg.timewait_time);
2463  break;
2464  }
2465  error0 = TCP_ERROR_FIN_RCVD;
2466 
2467  drop:
2468 
2469  tcp_inc_counter (rcv_process, error0, 1);
2470  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2471  {
2472  tcp_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2473  tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
2474  }
2475  }
2476 
2477  errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2478  thread_index);
2479  tcp_inc_counter (rcv_process, TCP_ERROR_MSG_QUEUE_FULL, errors);
2481  tcp_handle_disconnects (wrk);
2482  vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2483 
2484  return from_frame->n_vectors;
2485 }
2486 
2489  vlib_frame_t * from_frame)
2490 {
2491  return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2492 }
2493 
2496  vlib_frame_t * from_frame)
2497 {
2498  return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2499 }
2500 
2501 /* *INDENT-OFF* */
2503 {
2504  .name = "tcp4-rcv-process",
2505  /* Takes a vector of packets. */
2506  .vector_size = sizeof (u32),
2507  .n_errors = TCP_N_ERROR,
2508  .error_strings = tcp_error_strings,
2509  .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2510  .next_nodes =
2511  {
2512 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2514 #undef _
2515  },
2516  .format_trace = format_tcp_rx_trace_short,
2517 };
2518 /* *INDENT-ON* */
2519 
2520 /* *INDENT-OFF* */
2522 {
2523  .name = "tcp6-rcv-process",
2524  /* Takes a vector of packets. */
2525  .vector_size = sizeof (u32),
2526  .n_errors = TCP_N_ERROR,
2527  .error_strings = tcp_error_strings,
2528  .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2529  .next_nodes =
2530  {
2531 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2533 #undef _
2534  },
2535  .format_trace = format_tcp_rx_trace_short,
2536 };
2537 /* *INDENT-ON* */
2538 
2539 /**
2540  * LISTEN state processing as per RFC 793 p. 65
2541  */
2544  vlib_frame_t * from_frame, int is_ip4)
2545 {
2546  u32 n_left_from, *from, n_syns = 0, *first_buffer;
2547  u32 thread_index = vm->thread_index;
2548 
2549  from = first_buffer = vlib_frame_vector_args (from_frame);
2550  n_left_from = from_frame->n_vectors;
2551 
2552  while (n_left_from > 0)
2553  {
2554  u32 bi, error = TCP_ERROR_NONE;
2555  tcp_connection_t *lc, *child;
2556  vlib_buffer_t *b;
2557 
2558  bi = from[0];
2559  from += 1;
2560  n_left_from -= 1;
2561 
2562  b = vlib_get_buffer (vm, bi);
2563 
2564  lc = tcp_listener_get (vnet_buffer (b)->tcp.connection_index);
2565  if (PREDICT_FALSE (lc == 0))
2566  {
2567  tcp_connection_t *tc;
2568  tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2569  thread_index);
2570  if (tc->state != TCP_STATE_TIME_WAIT)
2571  {
2572  error = TCP_ERROR_CREATE_EXISTS;
2573  goto done;
2574  }
2575  lc = tcp_lookup_listener (b, tc->c_fib_index, is_ip4);
2576  /* clean up the old session */
2577  tcp_connection_del (tc);
2578  }
2579 
2580  /* Make sure connection wasn't just created */
2581  child = tcp_lookup_connection (lc->c_fib_index, b, thread_index,
2582  is_ip4);
2583  if (PREDICT_FALSE (child->state != TCP_STATE_LISTEN))
2584  {
2585  error = TCP_ERROR_CREATE_EXISTS;
2586  goto done;
2587  }
2588 
2589  /* Create child session. For syn-flood protection use filter */
2590 
2591  /* 1. first check for an RST: handled in dispatch */
2592  /* if (tcp_rst (th0))
2593  goto drop;
2594  */
2595 
2596  /* 2. second check for an ACK: handled in dispatch */
2597  /* if (tcp_ack (th0))
2598  {
2599  tcp_send_reset (b0, is_ip4);
2600  goto drop;
2601  }
2602  */
2603 
2604  /* 3. check for a SYN (did that already) */
2605 
2606  /* Create child session and send SYN-ACK */
2607  child = tcp_connection_alloc (thread_index);
2608 
2609  if (tcp_options_parse (tcp_buffer_hdr (b), &child->rcv_opts, 1))
2610  {
2611  error = TCP_ERROR_OPTIONS;
2612  tcp_connection_free (child);
2613  goto done;
2614  }
2615 
2616  tcp_init_w_buffer (child, b, is_ip4);
2617 
2618  child->state = TCP_STATE_SYN_RCVD;
2619  child->c_fib_index = lc->c_fib_index;
2620  child->cc_algo = lc->cc_algo;
2621  tcp_connection_init_vars (child);
2622  child->rto = TCP_RTO_MIN;
2623 
2624  /*
2625  * This initializes elog track, must be done before synack.
2626  * We also do it before possible tcp_connection_cleanup() as it
2627  * generates TCP_EVT_DELETE event.
2628  */
2629  TCP_EVT (TCP_EVT_SYN_RCVD, child, 1);
2630 
2631  if (session_stream_accept (&child->connection, lc->c_s_index,
2632  lc->c_thread_index, 0 /* notify */ ))
2633  {
2634  tcp_connection_cleanup (child);
2635  error = TCP_ERROR_CREATE_SESSION_FAIL;
2636  goto done;
2637  }
2638 
2639  child->tx_fifo_size = transport_tx_fifo_size (&child->connection);
2640 
2641  tcp_send_synack (child);
2642 
2643  done:
2644 
2645  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
2646  {
2647  tcp_rx_trace_t *t;
2648  t = vlib_add_trace (vm, node, b, sizeof (*t));
2650  sizeof (t->tcp_header));
2652  sizeof (t->tcp_connection));
2653  }
2654 
2655  n_syns += (error == TCP_ERROR_NONE);
2656  }
2657 
2658  tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
2659  vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2660 
2661  return from_frame->n_vectors;
2662 }
2663 
2665  vlib_frame_t * from_frame)
2666 {
2667  return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2668 }
2669 
2671  vlib_frame_t * from_frame)
2672 {
2673  return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2674 }
2675 
2676 /* *INDENT-OFF* */
2678 {
2679  .name = "tcp4-listen",
2680  /* Takes a vector of packets. */
2681  .vector_size = sizeof (u32),
2682  .n_errors = TCP_N_ERROR,
2683  .error_strings = tcp_error_strings,
2684  .n_next_nodes = TCP_LISTEN_N_NEXT,
2685  .next_nodes =
2686  {
2687 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2689 #undef _
2690  },
2691  .format_trace = format_tcp_rx_trace_short,
2692 };
2693 /* *INDENT-ON* */
2694 
2695 /* *INDENT-OFF* */
2697 {
2698  .name = "tcp6-listen",
2699  /* Takes a vector of packets. */
2700  .vector_size = sizeof (u32),
2701  .n_errors = TCP_N_ERROR,
2702  .error_strings = tcp_error_strings,
2703  .n_next_nodes = TCP_LISTEN_N_NEXT,
2704  .next_nodes =
2705  {
2706 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2708 #undef _
2709  },
2710  .format_trace = format_tcp_rx_trace_short,
2711 };
2712 /* *INDENT-ON* */
2713 
2714 typedef enum _tcp_input_next
2715 {
2725 
2726 #define foreach_tcp4_input_next \
2727  _ (DROP, "ip4-drop") \
2728  _ (LISTEN, "tcp4-listen") \
2729  _ (RCV_PROCESS, "tcp4-rcv-process") \
2730  _ (SYN_SENT, "tcp4-syn-sent") \
2731  _ (ESTABLISHED, "tcp4-established") \
2732  _ (RESET, "tcp4-reset") \
2733  _ (PUNT, "ip4-punt")
2734 
2735 #define foreach_tcp6_input_next \
2736  _ (DROP, "ip6-drop") \
2737  _ (LISTEN, "tcp6-listen") \
2738  _ (RCV_PROCESS, "tcp6-rcv-process") \
2739  _ (SYN_SENT, "tcp6-syn-sent") \
2740  _ (ESTABLISHED, "tcp6-established") \
2741  _ (RESET, "tcp6-reset") \
2742  _ (PUNT, "ip6-punt")
2743 
2744 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
2745 
2746 static void
2748  vlib_buffer_t ** bs, u32 n_bufs, u8 is_ip4)
2749 {
2750  tcp_connection_t *tc;
2751  tcp_header_t *tcp;
2752  tcp_rx_trace_t *t;
2753  int i;
2754 
2755  for (i = 0; i < n_bufs; i++)
2756  {
2757  if (bs[i]->flags & VLIB_BUFFER_IS_TRACED)
2758  {
2759  t = vlib_add_trace (vm, node, bs[i], sizeof (*t));
2760  tc = tcp_connection_get (vnet_buffer (bs[i])->tcp.connection_index,
2761  vm->thread_index);
2762  tcp = vlib_buffer_get_current (bs[i]);
2763  tcp_set_rx_trace_data (t, tc, tcp, bs[i], is_ip4);
2764  }
2765  }
2766 }
2767 
2768 static void
2769 tcp_input_set_error_next (tcp_main_t * tm, u16 * next, u32 * error, u8 is_ip4)
2770 {
2771  if (*error == TCP_ERROR_FILTERED || *error == TCP_ERROR_WRONG_THREAD)
2772  {
2773  *next = TCP_INPUT_NEXT_DROP;
2774  }
2775  else if ((is_ip4 && tm->punt_unknown4) || (!is_ip4 && tm->punt_unknown6))
2776  {
2777  *next = TCP_INPUT_NEXT_PUNT;
2778  *error = TCP_ERROR_PUNT;
2779  }
2780  else
2781  {
2782  *next = TCP_INPUT_NEXT_RESET;
2783  *error = TCP_ERROR_NO_LISTENER;
2784  }
2785 }
2786 
2787 static inline void
2789  vlib_buffer_t * b, u16 * next,
2790  vlib_node_runtime_t * error_node)
2791 {
2792  tcp_header_t *tcp;
2793  u32 error;
2794  u8 flags;
2795 
2796  tcp = tcp_buffer_hdr (b);
2797  flags = tcp->flags & filter_flags;
2798  *next = tm->dispatch_table[tc->state][flags].next;
2799  error = tm->dispatch_table[tc->state][flags].error;
2800  tc->segs_in += 1;
2801 
2802  if (PREDICT_FALSE (error != TCP_ERROR_NONE))
2803  {
2804  b->error = error_node->errors[error];
2805  if (error == TCP_ERROR_DISPATCH)
2806  clib_warning ("tcp conn %u disp error state %U flags %U",
2807  tc->c_c_index, format_tcp_state, tc->state,
2808  format_tcp_flags, (int) flags);
2809  }
2810 }
2811 
2814  vlib_frame_t * frame, int is_ip4, u8 is_nolookup)
2815 {
2816  u32 n_left_from, *from, thread_index = vm->thread_index;
2817  tcp_main_t *tm = vnet_get_tcp_main ();
2818  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2819  u16 nexts[VLIB_FRAME_SIZE], *next;
2820 
2821  tcp_set_time_now (tcp_get_worker (thread_index));
2822 
2823  from = vlib_frame_vector_args (frame);
2824  n_left_from = frame->n_vectors;
2825  vlib_get_buffers (vm, from, bufs, n_left_from);
2826 
2827  b = bufs;
2828  next = nexts;
2829 
2830  while (n_left_from >= 4)
2831  {
2832  u32 error0 = TCP_ERROR_NO_LISTENER, error1 = TCP_ERROR_NO_LISTENER;
2833  tcp_connection_t *tc0, *tc1;
2834 
2835  {
2836  vlib_prefetch_buffer_header (b[2], STORE);
2837  CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2838 
2839  vlib_prefetch_buffer_header (b[3], STORE);
2840  CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2841  }
2842 
2843  next[0] = next[1] = TCP_INPUT_NEXT_DROP;
2844 
2845  tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
2846  is_nolookup);
2847  tc1 = tcp_input_lookup_buffer (b[1], thread_index, &error1, is_ip4,
2848  is_nolookup);
2849 
2850  if (PREDICT_TRUE (!tc0 + !tc1 == 0))
2851  {
2852  ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
2853  ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
2854 
2855  vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2856  vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
2857 
2858  tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], node);
2859  tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], node);
2860  }
2861  else
2862  {
2863  if (PREDICT_TRUE (tc0 != 0))
2864  {
2865  ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
2866  vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2867  tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], node);
2868  }
2869  else
2870  {
2871  tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
2872  b[0]->error = node->errors[error0];
2873  }
2874 
2875  if (PREDICT_TRUE (tc1 != 0))
2876  {
2877  ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
2878  vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
2879  tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], node);
2880  }
2881  else
2882  {
2883  tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
2884  b[1]->error = node->errors[error1];
2885  }
2886  }
2887 
2888  b += 2;
2889  next += 2;
2890  n_left_from -= 2;
2891  }
2892  while (n_left_from > 0)
2893  {
2894  tcp_connection_t *tc0;
2895  u32 error0 = TCP_ERROR_NO_LISTENER;
2896 
2897  if (n_left_from > 1)
2898  {
2899  vlib_prefetch_buffer_header (b[1], STORE);
2900  CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2901  }
2902 
2903  next[0] = TCP_INPUT_NEXT_DROP;
2904  tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
2905  is_nolookup);
2906  if (PREDICT_TRUE (tc0 != 0))
2907  {
2908  ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
2909  vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2910  tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], node);
2911  }
2912  else
2913  {
2914  tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
2915  b[0]->error = node->errors[error0];
2916  }
2917 
2918  b += 1;
2919  next += 1;
2920  n_left_from -= 1;
2921  }
2922 
2924  tcp_input_trace_frame (vm, node, bufs, frame->n_vectors, is_ip4);
2925 
2926  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2927  return frame->n_vectors;
2928 }
2929 
2932  vlib_frame_t * from_frame)
2933 {
2934  return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
2935  1 /* is_nolookup */ );
2936 }
2937 
2940  vlib_frame_t * from_frame)
2941 {
2942  return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
2943  1 /* is_nolookup */ );
2944 }
2945 
2946 /* *INDENT-OFF* */
2948 {
2949  .name = "tcp4-input-nolookup",
2950  /* Takes a vector of packets. */
2951  .vector_size = sizeof (u32),
2952  .n_errors = TCP_N_ERROR,
2953  .error_strings = tcp_error_strings,
2954  .n_next_nodes = TCP_INPUT_N_NEXT,
2955  .next_nodes =
2956  {
2957 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
2959 #undef _
2960  },
2961  .format_buffer = format_tcp_header,
2962  .format_trace = format_tcp_rx_trace,
2963 };
2964 /* *INDENT-ON* */
2965 
2966 /* *INDENT-OFF* */
2968 {
2969  .name = "tcp6-input-nolookup",
2970  /* Takes a vector of packets. */
2971  .vector_size = sizeof (u32),
2972  .n_errors = TCP_N_ERROR,
2973  .error_strings = tcp_error_strings,
2974  .n_next_nodes = TCP_INPUT_N_NEXT,
2975  .next_nodes =
2976  {
2977 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
2979 #undef _
2980  },
2981  .format_buffer = format_tcp_header,
2982  .format_trace = format_tcp_rx_trace,
2983 };
2984 /* *INDENT-ON* */
2985 
2987  vlib_frame_t * from_frame)
2988 {
2989  return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
2990  0 /* is_nolookup */ );
2991 }
2992 
2994  vlib_frame_t * from_frame)
2995 {
2996  return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
2997  0 /* is_nolookup */ );
2998 }
2999 
3000 /* *INDENT-OFF* */
3002 {
3003  .name = "tcp4-input",
3004  /* Takes a vector of packets. */
3005  .vector_size = sizeof (u32),
3006  .n_errors = TCP_N_ERROR,
3007  .error_strings = tcp_error_strings,
3008  .n_next_nodes = TCP_INPUT_N_NEXT,
3009  .next_nodes =
3010  {
3011 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3013 #undef _
3014  },
3015  .format_buffer = format_tcp_header,
3016  .format_trace = format_tcp_rx_trace,
3017 };
3018 /* *INDENT-ON* */
3019 
3020 /* *INDENT-OFF* */
3022 {
3023  .name = "tcp6-input",
3024  /* Takes a vector of packets. */
3025  .vector_size = sizeof (u32),
3026  .n_errors = TCP_N_ERROR,
3027  .error_strings = tcp_error_strings,
3028  .n_next_nodes = TCP_INPUT_N_NEXT,
3029  .next_nodes =
3030  {
3031 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3033 #undef _
3034  },
3035  .format_buffer = format_tcp_header,
3036  .format_trace = format_tcp_rx_trace,
3037 };
3038 /* *INDENT-ON* */
3039 
3040 #ifndef CLIB_MARCH_VARIANT
3041 static void
3043 {
3044  int i, j;
3045  for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3046  for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3047  {
3048  tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3049  tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3050  }
3051 
3052 #define _(t,f,n,e) \
3053 do { \
3054  tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
3055  tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
3056 } while (0)
3057 
3058  /* RFC 793: In LISTEN if RST drop and if ACK return RST */
3059  _(LISTEN, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3060  _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_ACK_INVALID);
3061  _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_INVALID_CONNECTION);
3062  _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3064  TCP_ERROR_ACK_INVALID);
3066  TCP_ERROR_SEGMENT_INVALID);
3068  TCP_ERROR_SEGMENT_INVALID);
3070  TCP_ERROR_INVALID_CONNECTION);
3071  _(LISTEN, TCP_FLAG_FIN, TCP_INPUT_NEXT_RESET, TCP_ERROR_SEGMENT_INVALID);
3073  TCP_ERROR_SEGMENT_INVALID);
3075  TCP_ERROR_SEGMENT_INVALID);
3077  TCP_ERROR_SEGMENT_INVALID);
3079  TCP_ERROR_SEGMENT_INVALID);
3081  TCP_ERROR_SEGMENT_INVALID);
3083  TCP_ERROR_SEGMENT_INVALID);
3085  TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3086  /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3087  _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3088  _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3090  TCP_ERROR_NONE);
3091  _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3093  TCP_ERROR_NONE);
3095  TCP_ERROR_NONE);
3096  _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3097  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3098  _(SYN_RCVD, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3100  TCP_ERROR_NONE);
3102  TCP_ERROR_NONE);
3103  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3104  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3106  TCP_ERROR_NONE);
3107  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3108  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3109  _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3110  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3112  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3113  _(SYN_RCVD, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3114  /* SYN-ACK for a SYN */
3116  TCP_ERROR_NONE);
3117  _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3118  _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3120  TCP_ERROR_NONE);
3121  _(SYN_SENT, TCP_FLAG_FIN, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3123  TCP_ERROR_NONE);
3124  /* ACK for for established connection -> tcp-established. */
3125  _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3126  /* FIN for for established connection -> tcp-established. */
3127  _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3129  TCP_ERROR_NONE);
3131  TCP_ERROR_NONE);
3132  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3133  TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3135  TCP_ERROR_NONE);
3136  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3137  TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3138  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3139  TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3140  _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3141  TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3142  _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3144  TCP_ERROR_NONE);
3145  _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3147  TCP_ERROR_NONE);
3149  TCP_ERROR_NONE);
3150  _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3151  TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3152  _(ESTABLISHED, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3153  /* ACK or FIN-ACK to our FIN */
3154  _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3156  TCP_ERROR_NONE);
3157  /* FIN in reply to our FIN from the other side */
3158  _(FIN_WAIT_1, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3159  _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3161  TCP_ERROR_NONE);
3162  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3163  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3164  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3165  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3166  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3167  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3169  TCP_ERROR_NONE);
3170  _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3171  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3172  _(FIN_WAIT_1, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3174  TCP_ERROR_NONE);
3176  TCP_ERROR_NONE);
3177  _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3178  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3179  _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3181  TCP_ERROR_NONE);
3182  _(CLOSING, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3183  _(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3184  _(CLOSING, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3186  TCP_ERROR_NONE);
3188  TCP_ERROR_NONE);
3189  _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3190  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3191  _(CLOSING, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3193  TCP_ERROR_NONE);
3194  _(CLOSING, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3196  TCP_ERROR_NONE);
3198  TCP_ERROR_NONE);
3199  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3200  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3202  TCP_ERROR_NONE);
3203  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3204  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3205  _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3206  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3208  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3209  /* FIN confirming that the peer (app) has closed */
3210  _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3211  _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3213  TCP_ERROR_NONE);
3214  _(FIN_WAIT_2, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3216  TCP_ERROR_NONE);
3217  _(FIN_WAIT_2, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3218  _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3220  TCP_ERROR_NONE);
3221  _(CLOSE_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3223  TCP_ERROR_NONE);
3224  _(CLOSE_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3225  _(LAST_ACK, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3226  _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3227  _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3229  TCP_ERROR_NONE);
3231  TCP_ERROR_NONE);
3232  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3233  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3235  TCP_ERROR_NONE);
3236  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3237  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3238  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3239  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3241  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3242  _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3244  TCP_ERROR_NONE);
3245  _(LAST_ACK, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3247  TCP_ERROR_NONE);
3249  TCP_ERROR_NONE);
3250  _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3251  TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3252  _(TIME_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3253  _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3255  TCP_ERROR_NONE);
3256  _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3258  TCP_ERROR_NONE);
3259  _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3260  /* RFC793 CLOSED: An incoming segment containing a RST is discarded. An
3261  * incoming segment not containing a RST causes a RST to be sent in
3262  * response.*/
3263  _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3265  TCP_ERROR_CONNECTION_CLOSED);
3266  _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
3267  _(CLOSED, TCP_FLAG_SYN, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
3269  TCP_ERROR_CONNECTION_CLOSED);
3270 #undef _
3271 }
3272 
3273 static clib_error_t *
3275 {
3276  clib_error_t *error = 0;
3277  tcp_main_t *tm = vnet_get_tcp_main ();
3278 
3279  if ((error = vlib_call_init_function (vm, tcp_init)))
3280  return error;
3281 
3282  /* Initialize dispatch table. */
3284 
3285  return error;
3286 }
3287 
3289 
3290 #endif /* CLIB_MARCH_VARIANT */
3291 
3292 /*
3293  * fd.io coding-style-patch-verification: ON
3294  *
3295  * Local Variables:
3296  * eval: (c-set-style "gnu")
3297  * End:
3298  */
static void tcp_program_disconnect(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Definition: tcp_input.c:1089
static int tcp_session_enqueue_ooo(tcp_connection_t *tc, vlib_buffer_t *b, u16 data_len)
Enqueue out-of-order data.
Definition: tcp_input.c:1207
static void tcp_update_timestamp(tcp_connection_t *tc, u32 seq, u32 seq_end)
Update tsval recent.
Definition: tcp_input.c:138
u16 lb_n_buckets
number of buckets in the load-balance.
Definition: load_balance.h:116
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u32 connection_index
Index of the transport connection associated to the session.
void tcp_program_retransmit(tcp_connection_t *tc)
Definition: tcp_output.c:1057
#define TCP_TIMER_HANDLE_INVALID
Definition: tcp_types.h:81
#define clib_min(x, y)
Definition: clib.h:327
#define CLIB_UNUSED(x)
Definition: clib.h:87
u32 * pending_disconnects
vector of pending disconnect notifications
Definition: tcp.h:86
vlib_node_registration_t tcp6_rcv_process_node
(constructor) VLIB_REGISTER_NODE (tcp6_rcv_process_node)
Definition: tcp_input.c:2521
static u32 ip6_fib_table_fwding_lookup(u32 fib_index, const ip6_address_t *dst)
Definition: ip6_fib.h:67
static void tcp_persist_timer_set(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
Definition: tcp_timer.h:77
static void tcp_rcv_fin(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, vlib_buffer_t *b, u32 *error)
Definition: tcp_input.c:1133
static u32 tcp_time_now(void)
Definition: tcp_inlines.h:191
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:103
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
static tcp_connection_t * tcp_connection_get(u32 conn_index, u32 thread_index)
Definition: tcp_inlines.h:30
ip4_address_t src_address
Definition: ip4_packet.h:125
static u8 tcp_cc_is_spurious_retransmit(tcp_connection_t *tc)
Definition: tcp_input.c:718
transport_connection_t * session_lookup_connection_wt6(u32 fib_index, ip6_address_t *lcl, ip6_address_t *rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 thread_index, u8 *result)
Lookup connection with ip6 and transport layer information.
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
enum _tcp_state_next tcp_state_next_t
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define tcp_rst(_th)
Definition: tcp_packet.h:81
#define TCP_FLAG_SYN
Definition: fa_node.h:13
#define THZ
TCP tick frequency.
Definition: tcp_types.h:26
#define tcp_opts_tstamp(_to)
Definition: tcp_packet.h:156
#define PREDICT_TRUE(x)
Definition: clib.h:121
#define tcp_inc_err_counter(cnts, err, val)
Definition: tcp_input.c:1463
unsigned long u64
Definition: types.h:89
#define tcp_store_err_counters(node_id, cnts)
Definition: tcp_input.c:1467
static void tcp_dispatch_table_init(tcp_main_t *tm)
Definition: tcp_input.c:3042
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
static u8 * format_tcp_rx_trace_short(u8 *s, va_list *args)
Definition: tcp_input.c:1374
static int tcp_segment_rcv(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, vlib_buffer_t *b)
Receive buffer for connection and handle acks.
Definition: tcp_input.c:1291
void session_transport_delete_notify(transport_connection_t *tc)
Notification from transport that connection is being deleted.
Definition: session.c:970
static uword tcp46_established_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip4)
Definition: tcp_input.c:1477
svm_fifo_t * rx_fifo
Pointers to rx/tx buffers.
#define tcp_fastrecovery_first_off(tc)
Definition: tcp_types.h:427
static void tcp_input_dispatch_buffer(tcp_main_t *tm, tcp_connection_t *tc, vlib_buffer_t *b, u16 *next, vlib_node_runtime_t *error_node)
Definition: tcp_input.c:2788
struct _tcp_main tcp_main_t
u32 thread_index
Definition: main.h:249
void tcp_connection_timers_reset(tcp_connection_t *tc)
Stop all connection timers.
Definition: tcp.c:493
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
int session_main_flush_enqueue_events(u8 transport_proto, u32 thread_index)
Flushes queue of sessions that are to be notified of new data enqueued events.
Definition: session.c:715
struct _tcp_connection tcp_connection_t
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:592
static u32 tcp_set_time_now(tcp_worker_ctx_t *wrk)
Definition: tcp_inlines.h:219
#define clib_abs(x)
Definition: clib.h:334
void session_transport_reset_notify(transport_connection_t *tc)
Notify application that connection has been reset.
Definition: session.c:1071
u32 dpo_get_urpf(const dpo_id_t *dpo)
Get a uRPF interface for the DPO.
Definition: dpo.c:382
u32 * pending_resets
vector of pending reset notifications
Definition: tcp.h:89
#define tcp_disconnect_pending_on(tc)
Definition: tcp_types.h:423
static u32 format_get_indent(u8 *s)
Definition: format.h:72
vlib_node_registration_t tcp4_rcv_process_node
(constructor) VLIB_REGISTER_NODE (tcp4_rcv_process_node)
Definition: tcp_input.c:2502
static void tcp_cc_congestion(tcp_connection_t *tc)
Definition: tcp_cc.h:36
static u32 tcp_time_now_w_thread(u32 thread_index)
Definition: tcp_inlines.h:197
vlib_main_t * vm
Definition: in2out_ed.c:1582
#define timestamp_lt(_t1, _t2)
Definition: tcp_packet.h:184
static session_t * session_get(u32 si, u32 thread_index)
Definition: session.h:301
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define TCP_TICK
TCP tick period (s)
Definition: tcp_types.h:25
#define tcp_disconnect_pending_off(tc)
Definition: tcp_types.h:424
tcp_connection_t tcp_connection
Definition: tcp_input.c:1354
#define VLIB_NODE_FN(node)
Definition: node.h:202
static void tcp_cc_congestion_undo(tcp_connection_t *tc)
Definition: tcp_input.c:699
int session_enqueue_stream_connection(transport_connection_t *tc, vlib_buffer_t *b, u32 offset, u8 queue_event, u8 is_in_order)
Definition: session.c:460
u64 session_lookup_half_open_handle(transport_connection_t *tc)
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:469
format_function_t format_tcp_flags
Definition: tcp.h:348
static u8 tcp_is_descheduled(tcp_connection_t *tc)
Definition: tcp_inlines.h:380
struct _tcp_header tcp_header_t
int tcp_half_open_connection_cleanup(tcp_connection_t *tc)
Try to cleanup half-open connection.
Definition: tcp.c:209
ip6_address_t src_address
Definition: ip6_packet.h:310
#define tcp_in_cong_recovery(tc)
Definition: tcp_types.h:429
u32 * pending_deq_acked
vector of pending ack dequeues
Definition: tcp.h:83
unsigned char u8
Definition: types.h:56
#define tcp_inc_counter(node_id, err, count)
Definition: tcp_input.c:1455
vlib_node_registration_t tcp6_syn_sent_node
(constructor) VLIB_REGISTER_NODE (tcp6_syn_sent_node)
Definition: tcp_input.c:2078
u8 data[128]
Definition: ipsec_types.api:89
static tcp_connection_t * tcp_lookup_connection(u32 fib_index, vlib_buffer_t *b, u8 thread_index, u8 is_ip4)
Lookup transport connection.
Definition: tcp_input.c:1681
double f64
Definition: types.h:142
void session_transport_closing_notify(transport_connection_t *tc)
Notification from transport that connection is being closed.
Definition: session.c:948
#define tcp_is_fin(_th)
Definition: tcp_packet.h:90
static u8 * format_tcp_rx_trace(u8 *s, va_list *args)
Definition: tcp_input.c:1358
#define timestamp_leq(_t1, _t2)
Definition: tcp_packet.h:185
void tcp_init_snd_vars(tcp_connection_t *tc)
Initialize connection send variables.
Definition: tcp.c:669
#define tcp_cfg
Definition: tcp.h:271
static void tcp_persist_timer_reset(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
Definition: tcp_timer.h:98
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
vlib_node_registration_t tcp4_established_node
(constructor) VLIB_REGISTER_NODE (tcp4_established_node)
Definition: tcp_input.c:1575
static int tcp_options_parse(tcp_header_t *th, tcp_options_t *to, u8 is_syn)
Parse TCP header options.
Definition: tcp_packet.h:196
void tcp_bt_sample_delivery_rate(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Generate a delivery rate sample from recently acked bytes.
Definition: tcp_bt.c:592
vl_api_ip6_address_t ip6
Definition: one.api:424
ip4_address_t dst_address
Definition: ip4_packet.h:125
#define seq_leq(_s1, _s2)
Definition: tcp_packet.h:178
#define TCP_FLAG_ACK
Definition: fa_node.h:16
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
transport_connection_t * session_lookup_connection_wt4(u32 fib_index, ip4_address_t *lcl, ip4_address_t *rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 thread_index, u8 *result)
Lookup connection with ip4 and transport layer information.
static void tcp_handle_rst(tcp_connection_t *tc)
Definition: tcp_input.c:157
vnet_hw_interface_flags_t flags
Definition: interface.h:537
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
static int tcp_segment_validate(tcp_worker_ctx_t *wrk, tcp_connection_t *tc0, vlib_buffer_t *b0, tcp_header_t *th0, u32 *error0)
Validate incoming segment as per RFC793 p.
Definition: tcp_input.c:259
#define tcp_fastrecovery_off(tc)
Definition: tcp_types.h:416
vlib_node_registration_t tcp6_input_node
(constructor) VLIB_REGISTER_NODE (tcp6_input_node)
Definition: tcp_input.c:3021
static u8 tcp_ack_is_dupack(tcp_connection_t *tc, vlib_buffer_t *b, u32 prev_snd_wnd, u32 prev_snd_una)
Check if duplicate ack as per RFC5681 Sec.
Definition: tcp_input.c:960
static u32 ooo_segment_length(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.h:660
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:196
unsigned int u32
Definition: types.h:88
static sack_scoreboard_hole_t * scoreboard_first_hole(sack_scoreboard_t *sb)
Definition: tcp_sack.h:59
static tcp_header_t * tcp_buffer_hdr(vlib_buffer_t *b)
Definition: tcp_inlines.h:22
#define vlib_call_init_function(vm, x)
Definition: init.h:270
static void tcp_node_inc_counter_i(vlib_main_t *vm, u32 tcp4_node, u32 tcp6_node, u8 is_ip4, u32 evt, u32 val)
Definition: tcp_input.c:1439
#define TCP_TSTP_TO_HZ
Definition: tcp_types.h:31
#define VLIB_FRAME_SIZE
Definition: node.h:377
static void tcp_cc_init_congestion(tcp_connection_t *tc)
Init loss recovery/fast recovery.
Definition: tcp_input.c:673
#define tcp_validate_txf_size(_tc, _a)
Definition: tcp.h:354
#define tcp_fastrecovery_on(tc)
Definition: tcp_types.h:415
static void tcp_retransmit_timer_update(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
Definition: tcp_timer.h:104
static void tcp_timer_set(tcp_timer_wheel_t *tw, tcp_connection_t *tc, u8 timer_id, u32 interval)
Definition: tcp_timer.h:21
static void tcp_cc_recovered(tcp_connection_t *tc)
Definition: tcp_cc.h:48
static void svm_fifo_newest_ooo_segment_reset(svm_fifo_t *f)
Definition: svm_fifo.h:644
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static void tcp_retransmit_timer_reset(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
Definition: tcp_timer.h:63
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:170
static u8 tcp_should_fastrecover(tcp_connection_t *tc, u8 has_sack)
Definition: tcp_input.c:724
void tcp_update_sack_list(tcp_connection_t *tc, u32 start, u32 end)
Build SACK list as per RFC2018.
Definition: tcp_sack.c:568
vlib_main_t * vm
convenience pointer to this thread&#39;s vlib main
Definition: tcp.h:92
static tcp_connection_t * tcp_half_open_connection_get(u32 conn_index)
Definition: tcp_inlines.h:67
static void tcp_program_dequeue(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Definition: tcp_input.c:607
void tcp_send_ack(tcp_connection_t *tc)
Definition: tcp_output.c:1015
static void tcp_handle_disconnects(tcp_worker_ctx_t *wrk)
Definition: tcp_input.c:1099
static uword tcp46_listen_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip4)
LISTEN state processing as per RFC 793 p.
Definition: tcp_input.c:2543
void tcp_connection_tx_pacer_reset(tcp_connection_t *tc, u32 window, u32 start_bucket)
Definition: tcp.c:1206
static void tcp_input_set_error_next(tcp_main_t *tm, u16 *next, u32 *error, u8 is_ip4)
Definition: tcp_input.c:2769
tcp_connection_t * tcp_connection_alloc_w_base(u8 thread_index, tcp_connection_t *base)
Definition: tcp.c:309
static const dpo_id_t * load_balance_get_bucket_i(const load_balance_t *lb, u32 bucket)
Definition: load_balance.h:229
format_function_t format_tcp_connection_id
Definition: tcp.h:352
vlib_node_registration_t tcp4_input_nolookup_node
(constructor) VLIB_REGISTER_NODE (tcp4_input_nolookup_node)
Definition: tcp_input.c:2947
unsigned short u16
Definition: types.h:57
#define TCP_DUPACK_THRESHOLD
Definition: tcp_types.h:39
#define foreach_tcp4_input_next
Definition: tcp_input.c:2726
u8 data_len
Definition: ikev2_types.api:24
tcp_connection_t * tcp_connection_alloc(u8 thread_index)
Definition: tcp.c:296
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define filter_flags
Definition: tcp_input.c:2744
void tcp_connection_tx_pacer_update(tcp_connection_t *tc)
Definition: tcp.c:1193
static int tcp_buffer_discard_bytes(vlib_buffer_t *b, u32 n_bytes_to_drop)
Definition: tcp_input.c:1258
#define TCP_PAWS_IDLE
24 days
Definition: tcp_types.h:30
static void tcp_check_tx_offload(tcp_connection_t *tc, int is_ipv4)
Definition: tcp_input.c:1752
#define foreach_tcp6_input_next
Definition: tcp_input.c:2735
The FIB DPO provieds;.
Definition: load_balance.h:106
tcp_timer_wheel_t timer_wheel
worker timer wheel
Definition: tcp.h:118
static void tcp_input_trace_frame(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t **bs, u32 n_bufs, u8 is_ip4)
Definition: tcp_input.c:2747
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
Definition: ip46_cli.c:60
#define PREDICT_FALSE(x)
Definition: clib.h:120
#define always_inline
Definition: ipsec.h:28
static int tcp_rcv_ack_no_cc(tcp_connection_t *tc, vlib_buffer_t *b, u32 *error)
Definition: tcp_input.c:401
vl_api_ip4_address_t ip4
Definition: one.api:376
#define TCP_FLAG_FIN
Definition: fa_node.h:12
static u8 tcp_is_lost_fin(tcp_connection_t *tc)
Definition: tcp_inlines.h:183
static void tcp_cc_handle_event(tcp_connection_t *tc, tcp_rate_sample_t *rs, u32 is_dack)
One function to rule them all ...
Definition: tcp_input.c:827
vlib_node_registration_t tcp4_listen_node
(constructor) VLIB_REGISTER_NODE (tcp4_listen_node)
Definition: tcp_input.c:2677
static ooo_segment_t * svm_fifo_newest_ooo_segment(svm_fifo_t *f)
Definition: svm_fifo.h:636
static void tcp_cc_rcv_ack(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Definition: tcp_cc.h:22
vlib_node_registration_t tcp6_established_node
(constructor) VLIB_REGISTER_NODE (tcp6_established_node)
Definition: tcp_input.c:1594
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
static int tcp_cc_recover(tcp_connection_t *tc)
Definition: tcp_input.c:751
#define TCP_FLAG_RST
Definition: fa_node.h:14
#define TCP_DBG(_fmt, _args...)
Definition: tcp_debug.h:146
#define tcp_recovery_off(tc)
Definition: tcp_types.h:418
static int tcp_rcv_ack(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, vlib_buffer_t *b, tcp_header_t *th, u32 *error)
Process incoming ACK.
Definition: tcp_input.c:988
void tcp_program_cleanup(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Definition: tcp.c:335
void tcp_connection_free(tcp_connection_t *tc)
Definition: tcp.c:322
static void tcp_program_reset_ntf(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Definition: tcp_input.c:190
vl_api_mac_address_t dst_addr
Definition: flow_types.api:65
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
vlib_node_registration_t tcp4_syn_sent_node
(constructor) VLIB_REGISTER_NODE (tcp4_syn_sent_node)
Definition: tcp_input.c:2059
u16 n_vectors
Definition: node.h:396
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
int ip4_address_compare(ip4_address_t *a1, ip4_address_t *a2)
Definition: ip46_cli.c:53
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:339
#define tcp_disconnect_pending(tc)
Definition: tcp_types.h:422
static void tcp_set_rx_trace_data(tcp_rx_trace_t *t0, tcp_connection_t *tc0, tcp_header_t *th0, vlib_buffer_t *b0, u8 is_ip4)
Definition: tcp_input.c:1389
void tcp_program_dupack(tcp_connection_t *tc)
Definition: tcp_output.c:1045
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
void tcp_send_reset(tcp_connection_t *tc)
Build and set reset packet for connection.
Definition: tcp_output.c:740
format_function_t format_tcp_state
Definition: tcp.h:347
static void tcp_update_rto(tcp_connection_t *tc)
Definition: tcp_inlines.h:373
#define clib_warning(format, args...)
Definition: error.h:59
u8 data[]
Packet data.
Definition: buffer.h:181
#define TCP_RTO_MIN
Definition: tcp_types.h:89
#define tcp_in_recovery(tc)
Definition: tcp_types.h:420
Don&#39;t register connection in lookup.
tcp_header_t tcp_header
Definition: tcp_input.c:1353
format_function_t format_tcp_header
Definition: format.h:100
struct _transport_connection transport_connection_t
f64 rtt_time
RTT for sample.
Definition: tcp_types.h:230
static void tcp_cc_undo_recovery(tcp_connection_t *tc)
Definition: tcp_cc.h:54
#define TCP_RTT_MAX
Definition: tcp_types.h:90
#define ARRAY_LEN(x)
Definition: clib.h:67
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:371
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1582
static u32 transport_max_tx_dequeue(transport_connection_t *tc)
Definition: session.h:503
void tcp_send_synack(tcp_connection_t *tc)
Definition: tcp_output.c:835
#define seq_geq(_s1, _s2)
Definition: tcp_packet.h:180
#define ASSERT(truth)
#define tcp_syn(_th)
Definition: tcp_packet.h:80
static clib_error_t * tcp_input_init(vlib_main_t *vm)
Definition: tcp_input.c:3274
static void tcp_estimate_rtt(tcp_connection_t *tc, u32 mrtt)
Compute smoothed RTT as per VJ&#39;s &#39;88 SIGCOMM and RFC6298.
Definition: tcp_input.c:439
static int tcp_update_rtt(tcp_connection_t *tc, tcp_rate_sample_t *rs, u32 ack)
Update rtt estimate.
Definition: tcp_input.c:473
enum _tcp_rcv_process_next tcp_rcv_process_next_t
static load_balance_t * load_balance_get(index_t lbi)
Definition: load_balance.h:220
static void tcp_cc_update(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Definition: tcp_input.c:803
static void tcp_handle_postponed_dequeues(tcp_worker_ctx_t *wrk)
Dequeue bytes for connections that have received acks in last burst.
Definition: tcp_input.c:562
static void tcp_cong_recovery_off(tcp_connection_t *tc)
Definition: tcp_types.h:433
static index_t ip4_fib_forwarding_lookup(u32 fib_index, const ip4_address_t *addr)
Definition: ip4_fib.h:160
static void tcp_estimate_initial_rtt(tcp_connection_t *tc)
Definition: tcp_input.c:527
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
#define seq_gt(_s1, _s2)
Definition: tcp_packet.h:179
static int tcp_segment_check_paws(tcp_connection_t *tc)
RFC1323: Check against wrapped sequence numbers (PAWS).
Definition: tcp_input.c:128
static u8 tcp_cc_is_spurious_timeout_rxt(tcp_connection_t *tc)
Definition: tcp_input.c:709
static void tcp_established_trace_frame(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_ip4)
Definition: tcp_input.c:1405
#define tcp_fastrecovery_first_on(tc)
Definition: tcp_types.h:426
enum _tcp_input_next tcp_input_next_t
int session_stream_accept_notify(transport_connection_t *tc)
Definition: session.c:1086
struct _sack_scoreboard_hole sack_scoreboard_hole_t
static u8 tcp_segment_in_rcv_wnd(tcp_connection_t *tc, u32 seq, u32 end_seq)
Validate segment sequence number.
Definition: tcp_input.c:112
#define clib_max(x, y)
Definition: clib.h:320
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static clib_error_t * tcp_init(vlib_main_t *vm)
Definition: tcp.c:1446
u8 ip_is_zero(ip46_address_t *ip46_address, u8 is_ip4)
Definition: ip.c:20
u8 tcp_scoreboard_is_sane_post_recovery(tcp_connection_t *tc)
Test that scoreboard is sane after recovery.
Definition: tcp_sack.c:317
#define tcp_is_syn(_th)
Definition: tcp_packet.h:89
#define tcp_opts_wscale(_to)
Definition: tcp_packet.h:157
enum _tcp_syn_sent_next tcp_syn_sent_next_t
void tcp_send_reset_w_pkt(tcp_connection_t *tc, vlib_buffer_t *pkt, u32 thread_index, u8 is_ip4)
Send reset without reusing existing buffer.
Definition: tcp_output.c:652
static void tcp_update_snd_wnd(tcp_connection_t *tc, u32 seq, u32 ack, u32 snd_wnd)
Try to update snd_wnd based on feedback received from peer.
Definition: tcp_input.c:624
enum _tcp_established_next tcp_established_next_t
static void tcp_timer_update(tcp_timer_wheel_t *tw, tcp_connection_t *tc, u8 timer_id, u32 interval)
Definition: tcp_timer.h:43
vlib_node_registration_t tcp4_input_node
(constructor) VLIB_REGISTER_NODE (tcp4_input_node)
Definition: tcp_input.c:3001
void scoreboard_clear(sack_scoreboard_t *sb)
Definition: tcp_sack.c:277
void tcp_send_fin(tcp_connection_t *tc)
Send FIN.
Definition: tcp_output.c:863
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
enum _tcp_listen_next tcp_listen_next_t
#define foreach_tcp_state_next
Definition: tcp_input.c:31
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
static tcp_connection_t * tcp_listener_get(u32 tli)
Definition: tcp_inlines.h:58
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1583
static tcp_worker_ctx_t * tcp_get_worker(u32 thread_index)
Definition: tcp.h:282
void session_transport_closed_notify(transport_connection_t *tc)
Notification from transport that it is closed.
Definition: session.c:1036
VLIB buffer representation.
Definition: buffer.h:102
static int tcp_session_enqueue_data(tcp_connection_t *tc, vlib_buffer_t *b, u16 data_len)
Enqueue data for delivery to application.
Definition: tcp_input.c:1156
u64 uword
Definition: types.h:112
int session_stream_connect_notify(transport_connection_t *tc, session_error_t err)
Definition: session.c:757
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
void tcp_connection_init_vars(tcp_connection_t *tc)
Initialize tcp connection variables.
Definition: tcp.c:704
static void tcp_init_w_buffer(tcp_connection_t *tc, vlib_buffer_t *b, u8 is_ip4)
Initialize connection by gleaning network and rcv params from buffer.
Definition: tcp_inlines.h:328
session_t * session_lookup_listener6(u32 fib_index, ip6_address_t *lcl, u16 lcl_port, u8 proto, u8 use_wildcard)
static f64 tcp_time_now_us(u32 thread_index)
Definition: tcp_inlines.h:213
void scoreboard_init_rxt(sack_scoreboard_t *sb, u32 snd_una)
Definition: tcp_sack.c:254
static void tcp_connection_set_state(tcp_connection_t *tc, tcp_state_t state)
Definition: tcp_inlines.h:51
static tcp_connection_t * tcp_lookup_listener(vlib_buffer_t *b, u32 fib_index, int is_ip4)
Definition: tcp_input.c:1722
static u32 ooo_segment_offset_prod(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.h:650
struct clib_bihash_value offset
template key/value backing page structure
#define vnet_buffer(b)
Definition: buffer.h:417
static u8 tcp_lookup_is_valid(tcp_connection_t *tc, vlib_buffer_t *b, tcp_header_t *hdr)
Definition: tcp_input.c:1614
static u32 vlib_num_workers()
Definition: threads.h:377
void tcp_connection_cleanup(tcp_connection_t *tc)
Cleans up connection state.
Definition: tcp.c:242
void tcp_connection_del(tcp_connection_t *tc)
Connection removal.
Definition: tcp.c:289
f64 end
end of the time range
Definition: mactime.api:44
void tcp_reschedule(tcp_connection_t *tc)
Definition: tcp.c:1217
u16 flags
Copy of main node flags.
Definition: node.h:500
u32 session_tx_fifo_dequeue_drop(transport_connection_t *tc, u32 max_bytes)
Definition: session.c:561
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:577
static u8 tcp_timer_is_active(tcp_connection_t *tc, tcp_timers_e timer)
Definition: tcp_timer.h:118
void tcp_program_ack(tcp_connection_t *tc)
Definition: tcp_output.c:1035
vlib_node_registration_t tcp6_listen_node
(constructor) VLIB_REGISTER_NODE (tcp6_listen_node)
Definition: tcp_input.c:2696
#define tcp_opts_sack_permitted(_to)
Definition: tcp_packet.h:159
static u32 tcp_tstamp(tcp_connection_t *tc)
Generate timestamp for tcp connection.
Definition: tcp_inlines.h:206
static void tcp_cc_rcv_cong_ack(tcp_connection_t *tc, tcp_cc_ack_t ack_type, tcp_rate_sample_t *rs)
Definition: tcp_cc.h:29
int session_stream_accept(transport_connection_t *tc, u32 listener_index, u32 thread_index, u8 notify)
Accept a stream session.
Definition: session.c:1110
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
tcp_bts_flags_t flags
Rate sample flags from bt sample.
Definition: tcp_types.h:237
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static transport_connection_t * transport_get_listener(transport_proto_t tp, u32 conn_index)
Definition: transport.h:156
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static uword tcp46_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip4, u8 is_nolookup)
Definition: tcp_input.c:2813
static tcp_connection_t * tcp_get_connection_from_transport(transport_connection_t *tconn)
Definition: tcp_types.h:446
static tcp_main_t * vnet_get_tcp_main()
Definition: tcp.h:276
static uword tcp46_syn_sent_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip4)
Definition: tcp_input.c:1786
static uword tcp46_rcv_process_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip4)
Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED as per RFC793 p...
Definition: tcp_input.c:2101
session_t * session_lookup_listener4(u32 fib_index, ip4_address_t *lcl, u16 lcl_port, u8 proto, u8 use_wildcard)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
vlib_node_registration_t tcp6_input_nolookup_node
(constructor) VLIB_REGISTER_NODE (tcp6_input_nolookup_node)
Definition: tcp_input.c:2967
static tcp_connection_t * tcp_input_lookup_buffer(vlib_buffer_t *b, u8 thread_index, u32 *error, u8 is_ip4, u8 is_nolookup)
Definition: tcp_inlines.h:225
static void tcp_handle_old_ack(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Definition: tcp_input.c:940
#define tcp_ack(_th)
Definition: tcp_packet.h:83
#define seq_lt(_s1, _s2)
Definition: tcp_packet.h:177
static u32 transport_tx_fifo_size(transport_connection_t *tc)
Definition: session.h:524
transport_connection_t * session_lookup_half_open_connection(u64 handle, u8 proto, u8 is_ip4)
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:310
static u8 tcp_ack_is_cc_event(tcp_connection_t *tc, vlib_buffer_t *b, u32 prev_snd_wnd, u32 prev_snd_una, u8 *is_dack)
Checks if ack is a congestion control event.
Definition: tcp_input.c:973
static void tcp_estimate_rtt_us(tcp_connection_t *tc, f64 mrtt)
Definition: tcp_input.c:450
void tcp_rcv_sacks(tcp_connection_t *tc, u32 ack)
Definition: tcp_sack.c:326
static char * tcp_error_strings[]
Definition: tcp_input.c:24
#define TCP_EVT(_evt, _args...)
Definition: tcp_debug.h:145
static void tcp_rcv_rst(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Handle reset packet.
Definition: tcp_input.c:207