25 #define tcp_error(n,s) s, 31 #define foreach_tcp_state_next \ 32 _ (DROP4, "ip4-drop") \ 33 _ (DROP6, "ip6-drop") \ 34 _ (TCP4_OUTPUT, "tcp4-output") \ 35 _ (TCP6_OUTPUT, "tcp6-output") 37 typedef enum _tcp_established_next
39 #define _(s,n) TCP_ESTABLISHED_NEXT_##s, 45 typedef enum _tcp_rcv_process_next
47 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s, 53 typedef enum _tcp_syn_sent_next
55 #define _(s,n) TCP_SYN_SENT_NEXT_##s, 61 typedef enum _tcp_listen_next
63 #define _(s,n) TCP_LISTEN_NEXT_##s, 70 typedef enum _tcp_state_next
72 #define _(s,n) TCP_NEXT_##s, 78 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \ 79 : TCP_NEXT_TCP6_OUTPUT) 81 #define tcp_next_drop(is_ip4) (is_ip4 ? TCP_NEXT_DROP4 \ 114 return (
seq_geq (end_seq, tc->rcv_las)
115 &&
seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
148 &&
seq_leq (tc->rcv_las, seq_end))
151 tc->tsval_recent = tc->rcv_opts.tsval;
159 switch (tc->rst_state)
161 case TCP_STATE_SYN_RCVD:
166 case TCP_STATE_SYN_SENT:
170 case TCP_STATE_ESTABLISHED:
174 case TCP_STATE_CLOSE_WAIT:
175 case TCP_STATE_FIN_WAIT_1:
176 case TCP_STATE_FIN_WAIT_2:
177 case TCP_STATE_CLOSING:
178 case TCP_STATE_LAST_ACK:
181 case TCP_STATE_CLOSED:
182 case TCP_STATE_TIME_WAIT:
185 TCP_DBG (
"reset state: %u", tc->state);
194 tc->rst_state = tc->state;
209 TCP_EVT (TCP_EVT_RST_RCVD, tc);
212 case TCP_STATE_SYN_RCVD:
216 case TCP_STATE_SYN_SENT:
218 tc->rst_state = tc->state;
221 case TCP_STATE_ESTABLISHED:
228 case TCP_STATE_CLOSE_WAIT:
229 case TCP_STATE_FIN_WAIT_1:
230 case TCP_STATE_FIN_WAIT_2:
231 case TCP_STATE_CLOSING:
232 case TCP_STATE_LAST_ACK:
241 case TCP_STATE_CLOSED:
242 case TCP_STATE_TIME_WAIT:
245 TCP_DBG (
"reset state: %u", tc->state);
266 *error0 = TCP_ERROR_CONNECTION_CLOSED;
272 *error0 = TCP_ERROR_SEGMENT_INVALID;
278 *error0 = TCP_ERROR_OPTIONS;
284 *error0 = TCP_ERROR_PAWS;
293 tc0->tsval_recent = tc0->rcv_opts.tsval;
314 &&
vnet_buffer (b0)->tcp.seq_number == tc0->rcv_nxt - 1)
317 if (tc0->state == TCP_STATE_SYN_RCVD)
320 TCP_EVT (TCP_EVT_SYN_RCVD, tc0, 0);
321 *error0 = TCP_ERROR_SYNS_RCVD;
326 TCP_EVT (TCP_EVT_SYNACK_RCVD, tc0);
327 *error0 = TCP_ERROR_SYN_ACKS_RCVD;
334 if (tc0->rcv_wnd < tc0->snd_mss
335 && tc0->rcv_nxt ==
vnet_buffer (b0)->tcp.seq_number)
343 tc0->rcv_las - tc0->rcv_wnd)
345 tc0->rcv_nxt + tc0->rcv_wnd))
348 *error0 = TCP_ERROR_RCV_WND;
352 if ((tc0->flags & TCP_CONN_ZERO_RWND_SENT)
354 tc0->rcv_las + tc0->rcv_opts.mss))
355 *error0 = TCP_ERROR_ZERO_RWND;
376 *error0 = TCP_ERROR_RST_RCVD;
387 *error0 = TCP_ERROR_SPURIOUS_SYN;
413 *error = TCP_ERROR_ACK_INVALID;
418 tc->bytes_acked =
vnet_buffer (b)->tcp.ack_number - tc->snd_una;
420 *error = TCP_ERROR_ACK_OK;
443 err = mrtt - tc->srtt;
444 tc->srtt =
clib_max ((
int) tc->srtt + (err >> 3), 1);
445 diff = (
clib_abs (err) - (int) tc->rttvar) >> 2;
446 tc->rttvar =
clib_max ((
int) tc->rttvar + diff, 1);
452 tc->mrtt_us = tc->mrtt_us + (mrtt - tc->mrtt_us) * 0.125;
482 if (!(tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
491 if (tc->rtt_ts &&
seq_geq (ack, tc->rtt_seq))
535 tc->mrtt_us =
clib_max (tc->mrtt_us, 0.0001);
544 if (tc->rto_boff && mrtt > 1 *
THZ)
553 tc->rttvar = mrtt >> 1;
565 u32 *pending_deq_acked;
573 for (i = 0; i <
vec_len (pending_deq_acked); i++)
576 tc->flags &= ~TCP_CONN_DEQ_PENDING;
587 if (
seq_leq (tc->psh_seq, tc->snd_una))
588 tc->flags &= ~TCP_CONN_PSH_PENDING;
609 if (!(tc->flags & TCP_CONN_DEQ_PENDING))
612 tc->flags |= TCP_CONN_DEQ_PENDING;
614 tc->burst_acked += tc->bytes_acked;
628 if (
seq_lt (tc->snd_wl1, seq)
629 || (tc->snd_wl1 == seq &&
seq_leq (tc->snd_wl2, ack)))
631 tc->snd_wnd = snd_wnd;
676 tc->snd_congestion = tc->snd_nxt;
677 tc->cwnd_acc_bytes = 0;
678 tc->snd_rxt_bytes = 0;
679 tc->rxt_delivered = 0;
680 tc->prr_delivered = 0;
681 tc->prr_start = tc->snd_una;
682 tc->prev_ssthresh = tc->ssthresh;
683 tc->prev_cwnd = tc->cwnd;
694 tc->fr_occurences += 1;
695 TCP_EVT (TCP_EVT_CC_EVT, tc, 4);
701 tc->cwnd = tc->prev_cwnd;
702 tc->ssthresh = tc->prev_ssthresh;
704 ASSERT (tc->rto_boff == 0);
705 TCP_EVT (TCP_EVT_CC_EVT, tc, 5);
738 if (
seq_leq (tc->snd_una, tc->snd_congestion)
739 && ((!(tc->cwnd > tc->snd_mss
740 && tc->bytes_acked <= 4 * tc->snd_mss))
741 || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
747 return tc->sack_sb.lost_bytes || tc->rcv_dupacks >= tc->sack_sb.reorder;
769 if (tc->sack_sb.sacked_bytes)
771 tc->snd_congestion = tc->snd_nxt;
776 tc->rxt_delivered = 0;
777 tc->snd_rxt_bytes = 0;
779 tc->prr_delivered = 0;
781 tc->flags &= ~TCP_CONN_RXT_PENDING;
784 if (hole && hole->start == tc->snd_una && hole->end == tc->snd_nxt)
793 TCP_EVT (TCP_EVT_CC_EVT, tc, 3);
795 ASSERT (tc->rto_boff == 0);
818 (
seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
819 &&
seq_gt (tc->snd_congestion, tc->snd_una)))
820 tc->snd_congestion = tc->snd_una - 1;
844 TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
871 if (!tc->bytes_acked && tc->sack_sb.rxt_sacked)
874 tc->rxt_delivered += tc->sack_sb.rxt_sacked;
875 tc->prr_delivered += tc->bytes_acked + tc->sack_sb.last_sacked_bytes
876 - tc->sack_sb.last_bytes_delivered;
882 tc->rcv_dupacks += 1;
883 TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
885 tc->rxt_delivered =
clib_min (tc->rxt_delivered + tc->bytes_acked,
888 tc->prr_delivered +=
clib_min (tc->snd_mss,
889 tc->snd_nxt - tc->snd_una);
891 tc->prr_delivered += tc->bytes_acked -
clib_min (tc->bytes_acked,
903 if (
seq_geq (tc->snd_una, tc->snd_congestion))
908 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
923 if (!tc->bytes_acked)
950 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
963 return ((
vnet_buffer (b)->tcp.ack_number == prev_snd_una)
964 &&
seq_gt (tc->snd_nxt, tc->snd_una)
966 && (prev_snd_wnd == tc->snd_wnd));
974 u32 prev_snd_wnd,
u32 prev_snd_una,
u8 * is_dack)
978 *is_dack = tc->sack_sb.last_sacked_bytes
991 u32 prev_snd_wnd, prev_snd_una;
1009 tc->errors.above_ack_wnd += 1;
1010 *error = TCP_ERROR_ACK_FUTURE;
1018 tc->errors.below_ack_wnd += 1;
1019 *error = TCP_ERROR_ACK_OLD;
1040 prev_snd_wnd = tc->snd_wnd;
1041 prev_snd_una = tc->snd_una;
1044 clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1045 tc->bytes_acked =
vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1049 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1052 if (tc->bytes_acked + tc->sack_sb.last_sacked_bytes)
1055 if (tc->bytes_acked)
1059 TCP_EVT (TCP_EVT_ACK_RCVD, tc);
1068 tc->dupacks_in += is_dack;
1071 *error = TCP_ERROR_ACK_OK;
1074 *error = TCP_ERROR_ACK_DUP;
1084 *error = TCP_ERROR_ACK_OK;
1101 u32 thread_index, *pending_disconnects, *pending_resets;
1109 for (i = 0; i <
vec_len (pending_disconnects); i++)
1122 for (i = 0; i <
vec_len (pending_resets); i++)
1142 tc->flags |= TCP_CONN_FINRCVD;
1150 TCP_EVT (TCP_EVT_FIN_RCVD, tc);
1151 *error = TCP_ERROR_FIN_RCVD;
1159 int written, error = TCP_ERROR_ENQUEUED;
1165 tc->bytes_in += written;
1167 TCP_EVT (TCP_EVT_INPUT, tc, 0, data_len, written);
1172 tc->rcv_nxt += written;
1175 else if (written > data_len)
1177 tc->rcv_nxt += written;
1178 TCP_EVT (TCP_EVT_CC_INPUT, tc, data_len, written);
1180 else if (written > 0)
1183 tc->rcv_nxt += written;
1184 error = TCP_ERROR_PARTIALLY_ENQUEUED;
1189 if (tc->rcv_wnd < tc->snd_mss)
1190 return TCP_ERROR_ZERO_RWND;
1192 return TCP_ERROR_FIFO_FULL;
1225 TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, 0);
1226 return TCP_ERROR_FIFO_FULL;
1229 TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1238 s0 =
session_get (tc->c_s_index, tc->c_thread_index);
1246 start = tc->rcv_nxt +
offset;
1250 TCP_EVT (TCP_EVT_CC_SACKS, tc);
1254 return TCP_ERROR_ENQUEUED_OOO;
1266 if (!(b->
flags & VLIB_BUFFER_NEXT_PRESENT))
1273 n_bytes_to_drop -= discard;
1275 while (n_bytes_to_drop);
1276 if (n_bytes_to_drop > first)
1294 u32 error, n_bytes_to_drop, n_data_bytes;
1299 tc->data_segs_in += 1;
1313 tc->errors.below_data_wnd++;
1314 error = TCP_ERROR_SEGMENT_OLD;
1320 n_bytes_to_drop = tc->rcv_nxt -
vnet_buffer (b)->tcp.seq_number;
1321 n_data_bytes -= n_bytes_to_drop;
1325 error = TCP_ERROR_SEGMENT_OLD;
1336 tc->rcv_las + tc->rcv_wnd);
1380 s =
format (s,
"%d -> %d (%U)",
1381 clib_net_to_host_u16 (t->
tcp_header.dst_port),
1424 if (b0->
flags & VLIB_BUFFER_IS_TRACED)
1448 #define tcp_maybe_inc_counter(node_id, err, count) \ 1450 if (next0 != tcp_next_drop (is_ip4)) \ 1451 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \ 1452 tcp6_##node_id##_node.index, is_ip4, err, \ 1455 #define tcp_inc_counter(node_id, err, count) \ 1456 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \ 1457 tcp6_##node_id##_node.index, is_ip4, \ 1459 #define tcp_maybe_inc_err_counter(cnts, err) \ 1461 cnts[err] += (next0 != tcp_next_drop (is_ip4)); \ 1463 #define tcp_inc_err_counter(cnts, err, val) \ 1467 #define tcp_store_err_counters(node_id, cnts) \ 1470 for (i = 0; i < TCP_N_ERROR; i++) \ 1472 tcp_inc_counter(node_id, i, cnts[i]); \ 1482 u32 n_left_from, *from, *first_buffer;
1491 while (n_left_from > 0)
1493 u32 bi0, error0 = TCP_ERROR_ACK_OK;
1498 if (n_left_from > 1)
1516 error0 = TCP_ERROR_INVALID_CONNECTION;
1551 err_counters[TCP_ERROR_MSG_QUEUE_FULL] = errors;
1577 .name =
"tcp4-established",
1579 .vector_size =
sizeof (
u32),
1585 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n, 1596 .name =
"tcp6-established",
1598 .vector_size =
sizeof (
u32),
1604 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n, 1624 if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
1627 u8 is_ip_valid = 0, val_l, val_r;
1629 if (tc->connection.is_ip4)
1634 &tc->connection.lcl_ip.ip4);
1635 val_l = val_l ||
ip_is_zero (&tc->connection.lcl_ip, 1);
1637 &tc->connection.rmt_ip.ip4);
1638 val_r = val_r || tc->state == TCP_STATE_LISTEN;
1639 is_ip_valid = val_l && val_r;
1646 &tc->connection.lcl_ip.ip6);
1647 val_l = val_l ||
ip_is_zero (&tc->connection.lcl_ip, 0);
1649 &tc->connection.rmt_ip.ip6);
1650 val_r = val_r || tc->state == TCP_STATE_LISTEN;
1651 is_ip_valid = val_l && val_r;
1654 u8 is_valid = (tc->c_lcl_port == hdr->dst_port
1655 && (tc->state == TCP_STATE_LISTEN
1656 || tc->c_rmt_port == hdr->src_port) && is_ip_valid);
1662 tc->c_proto, tc->c_is_ip4);
1666 if (tmp->lcl_port == hdr->dst_port
1667 && tmp->rmt_port == hdr->src_port)
1669 TCP_DBG (
"half-open is valid!");
1698 TRANSPORT_PROTO_TCP,
1699 thread_index, &is_filtered);
1713 TRANSPORT_PROTO_TCP,
1714 thread_index, &is_filtered);
1732 tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
1740 tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
1745 (TRANSPORT_PROTO_TCP,
1758 u32 sw_if_idx, lb_idx;
1767 ip6_address_t *
dst_addr = &(tc->c_rmt_ip.ip6);
1782 tc->cfg_flags |= TCP_CFG_F_TSO;
1789 u32 n_left_from, *from, *first_buffer, errors = 0;
1796 while (n_left_from > 0)
1798 u32 bi0, ack0, seq0, error0 = TCP_ERROR_NONE;
1813 error0 = TCP_ERROR_INVALID_CONNECTION;
1823 my_thread_index, is_ip4));
1824 error0 = TCP_ERROR_SPURIOUS_SYN_ACK;
1835 || tcp0->src_port != tc0->c_rmt_port))
1837 error0 = TCP_ERROR_INVALID_CONNECTION;
1844 error0 = TCP_ERROR_SEGMENT_INVALID;
1865 if (
seq_leq (ack0, tc0->iss) ||
seq_gt (ack0, tc0->snd_nxt))
1869 error0 = TCP_ERROR_RCV_WND;
1874 if (
seq_gt (tc0->snd_una, ack0))
1876 error0 = TCP_ERROR_ACK_INVALID;
1891 error0 = TCP_ERROR_RST_RCVD;
1906 error0 = TCP_ERROR_SEGMENT_INVALID;
1913 error0 = TCP_ERROR_OPTIONS;
1921 new_tc0->irs = seq0;
1927 new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
1932 new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
1934 new_tc0->rcv_wscale = 0;
1936 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
1937 << new_tc0->snd_wscale;
1938 new_tc0->snd_wl1 = seq0;
1939 new_tc0->snd_wl2 = ack0;
1949 new_tc0->snd_una = ack0;
1950 new_tc0->state = TCP_STATE_ESTABLISHED;
1953 new_tc0->rcv_las = new_tc0->rcv_nxt;
1962 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
1966 new_tc0->tx_fifo_size =
1970 TCP_EVT (TCP_EVT_SYNACK_RCVD, new_tc0);
1971 error0 = TCP_ERROR_SYN_ACKS_RCVD;
1976 new_tc0->state = TCP_STATE_SYN_RCVD;
1984 TCP_EVT (TCP_EVT_RST_SENT, tc0);
1985 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
1989 new_tc0->tx_fifo_size =
1991 new_tc0->rtt_ts = 0;
1994 error0 = TCP_ERROR_SYNS_RCVD;
1998 if (!(new_tc0->cfg_flags & TCP_CFG_F_NO_TSO))
2006 if (error0 == TCP_ERROR_ACK_OK)
2007 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2021 tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2061 .name =
"tcp4-syn-sent",
2063 .vector_size =
sizeof (
u32),
2069 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n, 2080 .name =
"tcp6-syn-sent",
2082 .vector_size =
sizeof (
u32),
2088 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n, 2106 u32 n_left_from, *from, max_dequeue;
2111 while (n_left_from > 0)
2113 u32 bi0, error0 = TCP_ERROR_NONE;
2128 error0 = TCP_ERROR_INVALID_CONNECTION;
2142 if (tmp->state != tc0->state)
2144 if (tc0->state != TCP_STATE_CLOSED)
2156 error0 = TCP_ERROR_CONNECTION_CLOSED;
2171 case TCP_STATE_SYN_RCVD:
2174 if (tc0->rcv_nxt !=
vnet_buffer (b0)->tcp.seq_number || is_fin0)
2177 error0 = TCP_ERROR_SEGMENT_INVALID;
2190 error0 = TCP_ERROR_SEGMENT_INVALID;
2199 tc0->state = TCP_STATE_ESTABLISHED;
2200 TCP_EVT (TCP_EVT_STATE_CHANGE, tc0);
2202 if (!(tc0->cfg_flags & TCP_CFG_F_NO_TSO))
2207 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2208 << tc0->rcv_opts.wscale;
2216 error0 = TCP_ERROR_MSG_QUEUE_FULL;
2222 error0 = TCP_ERROR_ACK_OK;
2224 case TCP_STATE_ESTABLISHED:
2231 case TCP_STATE_FIN_WAIT_1:
2239 if (tc0->flags & TCP_CONN_FINPNDG)
2243 if (max_dequeue <= tc0->burst_acked)
2246 else if ((tc0->flags & TCP_CONN_FINRCVD) && tc0->bytes_acked)
2251 else if (tc0->snd_una == tc0->snd_nxt)
2259 if (tc0->flags & TCP_CONN_FINRCVD)
2274 if (tc0->burst_acked > 1)
2276 tc0->burst_acked - 1);
2277 tc0->burst_acked = 0;
2280 case TCP_STATE_FIN_WAIT_2:
2286 tc0->burst_acked = 0;
2288 case TCP_STATE_CLOSE_WAIT:
2293 if (!(tc0->flags & TCP_CONN_FINPNDG))
2298 if (max_dequeue > tc0->burst_acked)
2307 case TCP_STATE_CLOSING:
2314 if (tc0->snd_una != tc0->snd_nxt)
2325 case TCP_STATE_LAST_ACK:
2334 if (is_fin0 && tc0->snd_una != tc0->snd_nxt)
2352 case TCP_STATE_TIME_WAIT:
2378 case TCP_STATE_ESTABLISHED:
2379 case TCP_STATE_FIN_WAIT_1:
2380 case TCP_STATE_FIN_WAIT_2:
2384 case TCP_STATE_CLOSE_WAIT:
2385 case TCP_STATE_CLOSING:
2386 case TCP_STATE_LAST_ACK:
2387 case TCP_STATE_TIME_WAIT:
2397 TCP_EVT (TCP_EVT_FIN_RCVD, tc0);
2401 case TCP_STATE_ESTABLISHED:
2410 case TCP_STATE_SYN_RCVD:
2421 case TCP_STATE_CLOSE_WAIT:
2422 case TCP_STATE_CLOSING:
2423 case TCP_STATE_LAST_ACK:
2426 case TCP_STATE_FIN_WAIT_1:
2429 if (tc0->flags & TCP_CONN_FINPNDG)
2434 tc0->flags |= TCP_CONN_FINRCVD;
2447 case TCP_STATE_FIN_WAIT_2:
2457 case TCP_STATE_TIME_WAIT:
2465 error0 = TCP_ERROR_FIN_RCVD;
2504 .name =
"tcp4-rcv-process",
2506 .vector_size =
sizeof (
u32),
2512 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n, 2523 .name =
"tcp6-rcv-process",
2525 .vector_size =
sizeof (
u32),
2531 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n, 2546 u32 n_left_from, *from, n_syns = 0, *first_buffer;
2552 while (n_left_from > 0)
2554 u32 bi, error = TCP_ERROR_NONE;
2570 if (tc->state != TCP_STATE_TIME_WAIT)
2572 error = TCP_ERROR_CREATE_EXISTS;
2585 error = TCP_ERROR_CREATE_EXISTS;
2611 error = TCP_ERROR_OPTIONS;
2618 child->state = TCP_STATE_SYN_RCVD;
2619 child->c_fib_index = lc->c_fib_index;
2620 child->cc_algo = lc->cc_algo;
2629 TCP_EVT (TCP_EVT_SYN_RCVD, child, 1);
2632 lc->c_thread_index, 0 ))
2635 error = TCP_ERROR_CREATE_SESSION_FAIL;
2655 n_syns += (error == TCP_ERROR_NONE);
2679 .name =
"tcp4-listen",
2681 .vector_size =
sizeof (
u32),
2687 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n, 2698 .name =
"tcp6-listen",
2700 .vector_size =
sizeof (
u32),
2706 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n, 2714 typedef enum _tcp_input_next
2726 #define foreach_tcp4_input_next \ 2727 _ (DROP, "ip4-drop") \ 2728 _ (LISTEN, "tcp4-listen") \ 2729 _ (RCV_PROCESS, "tcp4-rcv-process") \ 2730 _ (SYN_SENT, "tcp4-syn-sent") \ 2731 _ (ESTABLISHED, "tcp4-established") \ 2732 _ (RESET, "tcp4-reset") \ 2733 _ (PUNT, "ip4-punt") 2735 #define foreach_tcp6_input_next \ 2736 _ (DROP, "ip6-drop") \ 2737 _ (LISTEN, "tcp6-listen") \ 2738 _ (RCV_PROCESS, "tcp6-rcv-process") \ 2739 _ (SYN_SENT, "tcp6-syn-sent") \ 2740 _ (ESTABLISHED, "tcp6-established") \ 2741 _ (RESET, "tcp6-reset") \ 2742 _ (PUNT, "ip6-punt") 2744 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN) 2755 for (i = 0; i < n_bufs; i++)
2757 if (bs[i]->
flags & VLIB_BUFFER_IS_TRACED)
2771 if (*error == TCP_ERROR_FILTERED || *error == TCP_ERROR_WRONG_THREAD)
2775 else if ((is_ip4 && tm->punt_unknown4) || (!is_ip4 && tm->punt_unknown6))
2778 *error = TCP_ERROR_PUNT;
2783 *error = TCP_ERROR_NO_LISTENER;
2798 *next = tm->dispatch_table[tc->state][
flags].next;
2799 error = tm->dispatch_table[tc->state][
flags].error;
2805 if (error == TCP_ERROR_DISPATCH)
2806 clib_warning (
"tcp conn %u disp error state %U flags %U",
2830 while (n_left_from >= 4)
2832 u32 error0 = TCP_ERROR_NO_LISTENER, error1 = TCP_ERROR_NO_LISTENER;
2855 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2856 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
2866 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2878 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
2892 while (n_left_from > 0)
2895 u32 error0 = TCP_ERROR_NO_LISTENER;
2897 if (n_left_from > 1)
2909 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
2949 .name =
"tcp4-input-nolookup",
2951 .vector_size =
sizeof (
u32),
2957 #define _(s,n) [TCP_INPUT_NEXT_##s] = n, 2969 .name =
"tcp6-input-nolookup",
2971 .vector_size =
sizeof (
u32),
2977 #define _(s,n) [TCP_INPUT_NEXT_##s] = n, 3003 .name =
"tcp4-input",
3005 .vector_size =
sizeof (
u32),
3011 #define _(s,n) [TCP_INPUT_NEXT_##s] = n, 3023 .name =
"tcp6-input",
3025 .vector_size =
sizeof (
u32),
3031 #define _(s,n) [TCP_INPUT_NEXT_##s] = n, 3040 #ifndef CLIB_MARCH_VARIANT 3045 for (i = 0; i <
ARRAY_LEN (tm->dispatch_table); i++)
3046 for (j = 0; j <
ARRAY_LEN (tm->dispatch_table[i]); j++)
3049 tm->dispatch_table[
i][j].error = TCP_ERROR_DISPATCH;
3052 #define _(t,f,n,e) \ 3054 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \ 3055 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \ 3064 TCP_ERROR_ACK_INVALID);
3066 TCP_ERROR_SEGMENT_INVALID);
3068 TCP_ERROR_SEGMENT_INVALID);
3070 TCP_ERROR_INVALID_CONNECTION);
3073 TCP_ERROR_SEGMENT_INVALID);
3075 TCP_ERROR_SEGMENT_INVALID);
3077 TCP_ERROR_SEGMENT_INVALID);
3079 TCP_ERROR_SEGMENT_INVALID);
3081 TCP_ERROR_SEGMENT_INVALID);
3083 TCP_ERROR_SEGMENT_INVALID);
3265 TCP_ERROR_CONNECTION_CLOSED);
3269 TCP_ERROR_CONNECTION_CLOSED);
u16 lb_n_buckets
number of buckets in the load-balance.
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
u32 connection_index
Index of the transport connection associated to the session.
void tcp_program_retransmit(tcp_connection_t *tc)
#define TCP_TIMER_HANDLE_INVALID
u32 * pending_disconnects
vector of pending disconnect notifications
static u32 ip6_fib_table_fwding_lookup(u32 fib_index, const ip6_address_t *dst)
static void tcp_persist_timer_set(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
static u32 tcp_time_now(void)
vl_api_wireguard_peer_flags_t flags
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
static tcp_connection_t * tcp_connection_get(u32 conn_index, u32 thread_index)
transport_connection_t * session_lookup_connection_wt6(u32 fib_index, ip6_address_t *lcl, ip6_address_t *rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 thread_index, u8 *result)
Lookup connection with ip6 and transport layer information.
vnet_main_t * vnet_get_main(void)
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define THZ
TCP tick frequency.
#define tcp_opts_tstamp(_to)
#define clib_memcpy_fast(a, b, c)
void session_transport_delete_notify(transport_connection_t *tc)
Notification from transport that connection is being deleted.
svm_fifo_t * rx_fifo
Pointers to rx/tx buffers.
#define tcp_fastrecovery_first_off(tc)
struct _tcp_main tcp_main_t
void tcp_connection_timers_reset(tcp_connection_t *tc)
Stop all connection timers.
u16 current_length
Nbytes between current data and the end of this buffer.
int session_main_flush_enqueue_events(u8 transport_proto, u32 thread_index)
Flushes queue of sessions that are to be notified of new data enqueued events.
struct _tcp_connection tcp_connection_t
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
static u32 tcp_set_time_now(tcp_worker_ctx_t *wrk)
void session_transport_reset_notify(transport_connection_t *tc)
Notify application that connection has been reset.
u32 dpo_get_urpf(const dpo_id_t *dpo)
Get a uRPF interface for the DPO.
u32 * pending_resets
vector of pending reset notifications
#define tcp_disconnect_pending_on(tc)
static void tcp_cc_congestion(tcp_connection_t *tc)
static u32 tcp_time_now_w_thread(u32 thread_index)
#define timestamp_lt(_t1, _t2)
static session_t * session_get(u32 si, u32 thread_index)
#define TCP_TICK
TCP tick period (s)
#define tcp_disconnect_pending_off(tc)
tcp_connection_t tcp_connection
#define VLIB_NODE_FN(node)
int session_enqueue_stream_connection(transport_connection_t *tc, vlib_buffer_t *b, u32 offset, u8 queue_event, u8 is_in_order)
u64 session_lookup_half_open_handle(transport_connection_t *tc)
vlib_error_t * errors
Vector of errors for this node.
format_function_t format_tcp_flags
static u8 tcp_is_descheduled(tcp_connection_t *tc)
struct _tcp_header tcp_header_t
int tcp_half_open_connection_cleanup(tcp_connection_t *tc)
Try to cleanup half-open connection.
#define tcp_in_cong_recovery(tc)
u32 * pending_deq_acked
vector of pending ack dequeues
void session_transport_closing_notify(transport_connection_t *tc)
Notification from transport that connection is being closed.
#define timestamp_leq(_t1, _t2)
void tcp_init_snd_vars(tcp_connection_t *tc)
Initialize connection send variables.
static void tcp_persist_timer_reset(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
#define VLIB_INIT_FUNCTION(x)
static int tcp_options_parse(tcp_header_t *th, tcp_options_t *to, u8 is_syn)
Parse TCP header options.
void tcp_bt_sample_delivery_rate(tcp_connection_t *tc, tcp_rate_sample_t *rs)
Generate a delivery rate sample from recently acked bytes.
#define seq_leq(_s1, _s2)
transport_connection_t * session_lookup_connection_wt4(u32 fib_index, ip4_address_t *lcl, ip4_address_t *rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 thread_index, u8 *result)
Lookup connection with ip4 and transport layer information.
vnet_hw_interface_flags_t flags
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define tcp_fastrecovery_off(tc)
static u32 ooo_segment_length(svm_fifo_t *f, ooo_segment_t *s)
static void * ip4_next_header(ip4_header_t *i)
static sack_scoreboard_hole_t * scoreboard_first_hole(sack_scoreboard_t *sb)
static tcp_header_t * tcp_buffer_hdr(vlib_buffer_t *b)
#define vlib_call_init_function(vm, x)
#define tcp_validate_txf_size(_tc, _a)
#define tcp_fastrecovery_on(tc)
static void tcp_retransmit_timer_update(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
static void tcp_timer_set(tcp_timer_wheel_t *tw, tcp_connection_t *tc, u8 timer_id, u32 interval)
static void tcp_cc_recovered(tcp_connection_t *tc)
static void svm_fifo_newest_ooo_segment_reset(svm_fifo_t *f)
static heap_elt_t * first(heap_header_t *h)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static void tcp_retransmit_timer_reset(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
void tcp_update_sack_list(tcp_connection_t *tc, u32 start, u32 end)
Build SACK list as per RFC2018.
vlib_main_t * vm
convenience pointer to this thread's vlib main
static tcp_connection_t * tcp_half_open_connection_get(u32 conn_index)
void tcp_send_ack(tcp_connection_t *tc)
void tcp_connection_tx_pacer_reset(tcp_connection_t *tc, u32 window, u32 start_bucket)
tcp_connection_t * tcp_connection_alloc_w_base(u8 thread_index, tcp_connection_t *base)
static const dpo_id_t * load_balance_get_bucket_i(const load_balance_t *lb, u32 bucket)
format_function_t format_tcp_connection_id
#define TCP_DUPACK_THRESHOLD
tcp_connection_t * tcp_connection_alloc(u8 thread_index)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
void tcp_connection_tx_pacer_update(tcp_connection_t *tc)
#define TCP_PAWS_IDLE
24 days
tcp_timer_wheel_t timer_wheel
worker timer wheel
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
static u8 tcp_is_lost_fin(tcp_connection_t *tc)
static ooo_segment_t * svm_fifo_newest_ooo_segment(svm_fifo_t *f)
static void tcp_cc_rcv_ack(tcp_connection_t *tc, tcp_rate_sample_t *rs)
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define TCP_DBG(_fmt, _args...)
#define tcp_recovery_off(tc)
void tcp_program_cleanup(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
void tcp_connection_free(tcp_connection_t *tc)
vl_api_mac_address_t dst_addr
#define VLIB_REGISTER_NODE(x,...)
#define CLIB_PREFETCH(addr, size, type)
int ip4_address_compare(ip4_address_t *a1, ip4_address_t *a2)
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
#define tcp_disconnect_pending(tc)
void tcp_program_dupack(tcp_connection_t *tc)
sll srl srl sll sra u16x4 i
void tcp_send_reset(tcp_connection_t *tc)
Build and set reset packet for connection.
format_function_t format_tcp_state
static void tcp_update_rto(tcp_connection_t *tc)
#define clib_warning(format, args...)
#define tcp_in_recovery(tc)
Don't register connection in lookup.
struct _transport_connection transport_connection_t
f64 rtt_time
RTT for sample.
static void tcp_cc_undo_recovery(tcp_connection_t *tc)
static void * ip6_next_header(ip6_header_t *i)
vlib_main_t vlib_node_runtime_t * node
static u32 transport_max_tx_dequeue(transport_connection_t *tc)
void tcp_send_synack(tcp_connection_t *tc)
#define seq_geq(_s1, _s2)
static load_balance_t * load_balance_get(index_t lbi)
static void tcp_cong_recovery_off(tcp_connection_t *tc)
static index_t ip4_fib_forwarding_lookup(u32 fib_index, const ip4_address_t *addr)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
#define tcp_fastrecovery_first_on(tc)
int session_stream_accept_notify(transport_connection_t *tc)
struct _sack_scoreboard_hole sack_scoreboard_hole_t
static vlib_main_t * vlib_get_main(void)
static clib_error_t * tcp_init(vlib_main_t *vm)
u8 ip_is_zero(ip46_address_t *ip46_address, u8 is_ip4)
u8 tcp_scoreboard_is_sane_post_recovery(tcp_connection_t *tc)
Test that scoreboard is sane after recovery.
#define tcp_opts_wscale(_to)
void tcp_send_reset_w_pkt(tcp_connection_t *tc, vlib_buffer_t *pkt, u32 thread_index, u8 is_ip4)
Send reset without reusing existing buffer.
static void tcp_timer_update(tcp_timer_wheel_t *tw, tcp_connection_t *tc, u8 timer_id, u32 interval)
void scoreboard_clear(sack_scoreboard_t *sb)
void tcp_send_fin(tcp_connection_t *tc)
Send FIN.
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
static tcp_connection_t * tcp_listener_get(u32 tli)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
static tcp_worker_ctx_t * tcp_get_worker(u32 thread_index)
void session_transport_closed_notify(transport_connection_t *tc)
Notification from transport that it is closed.
VLIB buffer representation.
int session_stream_connect_notify(transport_connection_t *tc, session_error_t err)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
void tcp_connection_init_vars(tcp_connection_t *tc)
Initialize tcp connection variables.
static void tcp_init_w_buffer(tcp_connection_t *tc, vlib_buffer_t *b, u8 is_ip4)
Initialize connection by gleaning network and rcv params from buffer.
session_t * session_lookup_listener6(u32 fib_index, ip6_address_t *lcl, u16 lcl_port, u8 proto, u8 use_wildcard)
static f64 tcp_time_now_us(u32 thread_index)
void scoreboard_init_rxt(sack_scoreboard_t *sb, u32 snd_una)
static void tcp_connection_set_state(tcp_connection_t *tc, tcp_state_t state)
static u32 ooo_segment_offset_prod(svm_fifo_t *f, ooo_segment_t *s)
struct clib_bihash_value offset
template key/value backing page structure
static u32 vlib_num_workers()
void tcp_connection_cleanup(tcp_connection_t *tc)
Cleans up connection state.
void tcp_connection_del(tcp_connection_t *tc)
Connection removal.
f64 end
end of the time range
void tcp_reschedule(tcp_connection_t *tc)
u16 flags
Copy of main node flags.
u32 session_tx_fifo_dequeue_drop(transport_connection_t *tc, u32 max_bytes)
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static u8 tcp_timer_is_active(tcp_connection_t *tc, tcp_timers_e timer)
void tcp_program_ack(tcp_connection_t *tc)
#define tcp_opts_sack_permitted(_to)
static u32 tcp_tstamp(tcp_connection_t *tc)
Generate timestamp for tcp connection.
static void tcp_cc_rcv_cong_ack(tcp_connection_t *tc, tcp_cc_ack_t ack_type, tcp_rate_sample_t *rs)
int session_stream_accept(transport_connection_t *tc, u32 listener_index, u32 thread_index, u8 notify)
Accept a stream session.
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
#define VLIB_NODE_FLAG_TRACE
tcp_bts_flags_t flags
Rate sample flags from bt sample.
#define CLIB_CACHE_LINE_BYTES
static transport_connection_t * transport_get_listener(transport_proto_t tp, u32 conn_index)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
static tcp_connection_t * tcp_get_connection_from_transport(transport_connection_t *tconn)
static tcp_main_t * vnet_get_tcp_main()
session_t * session_lookup_listener4(u32 fib_index, ip4_address_t *lcl, u16 lcl_port, u8 proto, u8 use_wildcard)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static tcp_connection_t * tcp_input_lookup_buffer(vlib_buffer_t *b, u8 thread_index, u32 *error, u8 is_ip4, u8 is_nolookup)
static u32 transport_tx_fifo_size(transport_connection_t *tc)
transport_connection_t * session_lookup_half_open_connection(u64 handle, u8 proto, u8 is_ip4)
void tcp_rcv_sacks(tcp_connection_t *tc, u32 ack)
#define TCP_EVT(_evt, _args...)